diff --git a/Documentation/ABI/testing/debugfs-driver-qat b/Documentation/ABI/testing/debugfs-driver-qat index 6731ffacc5f0c6a299344667d5a01151bae080b1..bd6793760f29842b841217d4226f6d5538403a62 100644 --- a/Documentation/ABI/testing/debugfs-driver-qat +++ b/Documentation/ABI/testing/debugfs-driver-qat @@ -1,4 +1,4 @@ -What: /sys/kernel/debug/qat__/qat/fw_counters +What: /sys/kernel/debug/qat__/fw_counters Date: November 2023 KernelVersion: 6.6 Contact: qat-linux@intel.com @@ -59,3 +59,51 @@ Description: (RO) Read returns the device health status. The driver does not monitor for Heartbeat. It is left for a user to poll the status periodically. + +What: /sys/kernel/debug/qat__/pm_status +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns power management information specific to the + QAT device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/cnv_errors +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Read returns, for each Acceleration Engine (AE), the number + of errors and the type of the last error detected by the device + when performing verified compression. + Reported counters:: + + : Number of Compress and Verify (CnV) errors and type + of the last CnV error detected by Acceleration + Engine N. + +What: /sys/kernel/debug/qat__/heartbeat/inject_error +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (WO) Write to inject an error that simulates an heartbeat + failure. This is to be used for testing purposes. + + After writing this file, the driver stops arbitration on a + random engine and disables the fetching of heartbeat counters. + If a workload is running on the device, a job submitted to the + accelerator might not get a response and a read of the + `heartbeat/status` attribute might report -1, i.e. device + unresponsive. + The error is unrecoverable thus the device must be restarted to + restore its functionality. + + This attribute is available only when the kernel is built with + CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION=y. + + A write of 1 enables error injection. + + The following example shows how to enable error injection:: + + # cd /sys/kernel/debug/qat__ + # echo 1 > heartbeat/inject_error diff --git a/Documentation/ABI/testing/debugfs-driver-qat_telemetry b/Documentation/ABI/testing/debugfs-driver-qat_telemetry new file mode 100644 index 0000000000000000000000000000000000000000..eacee207208827341b367aba6f4df1eb8432aeec --- /dev/null +++ b/Documentation/ABI/testing/debugfs-driver-qat_telemetry @@ -0,0 +1,228 @@ +What: /sys/kernel/debug/qat__/telemetry/control +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Enables/disables the reporting of telemetry metrics. + + Allowed values to write: + ======================== + * 0: disable telemetry + * 1: enable telemetry + * 2, 3, 4: enable telemetry and calculate minimum, maximum + and average for each counter over 2, 3 or 4 samples + + Returned values: + ================ + * 1-4: telemetry is enabled and running + * 0: telemetry is disabled + + Example. + + Writing '3' to this file starts the collection of + telemetry metrics. Samples are collected every second and + stored in a circular buffer of size 3. These values are then + used to calculate the minimum, maximum and average for each + counter. After enabling, counters can be retrieved through + the ``device_data`` file:: + + echo 3 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + Writing '0' to this file stops the collection of telemetry + metrics:: + + echo 0 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/control + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/device_data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RO) Reports device telemetry counters. + Reads report metrics about performance and utilization of + a QAT device: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms. + pci_trans_cnt number of PCIe partial transactions + max_rd_lat maximum logged read latency [ns] (could + be any read operation) + rd_lat_acc_avg average read latency [ns] + max_gp_lat max get to put latency [ns] (only takes + samples for AE0) + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_page_req_lat_avg Address Translator(AT), average page + request latency [ns] + at_trans_lat_avg AT, average page translation latency [ns] + at_max_tlb_used AT, maximum uTLB used + util_cpr utilization of Compression slice N [%] + exec_cpr execution count of Compression slice N + util_xlt utilization of Translator slice N [%] + exec_xlt execution count of Translator slice N + util_dcpr utilization of Decompression slice N [%] + exec_dcpr execution count of Decompression slice N + util_pke utilization of PKE N [%] + exec_pke execution count of PKE N + util_ucs utilization of UCS slice N [%] + exec_ucs execution count of UCS slice N + util_wat utilization of Wireless Authentication + slice N [%] + exec_wat execution count of Wireless Authentication + slice N + util_wcp utilization of Wireless Cipher slice N [%] + exec_wcp execution count of Wireless Cipher slice N + util_cph utilization of Cipher slice N [%] + exec_cph execution count of Cipher slice N + util_ath utilization of Authentication slice N [%] + exec_ath execution count of Authentication slice N + ======================= ======================================== + + The telemetry report file can be read with the following command:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/device_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + If a device lacks of a specific accelerator, the corresponding + attribute is not reported. + + This attribute is only available for qat_4xxx devices. + +What: /sys/kernel/debug/qat__/telemetry/rp__data +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Selects up to 4 Ring Pairs (RP) to monitor, one per file, + and report telemetry counters related to each. + + Allowed values to write: + ======================== + * 0 to ````: + Ring pair to be monitored. The value of ``num_rps`` can be + retrieved through ``/sys/bus/pci/devices//qat/num_rps``. + See Documentation/ABI/testing/sysfs-driver-qat. + + Reads report metrics about performance and utilization of + the selected RP: + + ======================= ======================================== + Field Description + ======================= ======================================== + sample_cnt number of acquisitions of telemetry data + from the device. Reads are performed + every 1000 ms + rp_num RP number associated with slot + service_type service associated to the RP + pci_trans_cnt number of PCIe partial transactions + gp_lat_acc_avg average get to put latency [ns] + bw_in PCIe, write bandwidth [Mbps] + bw_out PCIe, read bandwidth [Mbps] + at_glob_devtlb_hit Message descriptor DevTLB hit rate + at_glob_devtlb_miss Message descriptor DevTLB miss rate + tl_at_payld_devtlb_hit Payload DevTLB hit rate + tl_at_payld_devtlb_miss Payload DevTLB miss rate + ======================= ======================================== + + Example. + + Writing the value '32' to the file ``rp_C_data`` starts the + collection of telemetry metrics for ring pair 32:: + + echo 32 > /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + Once a ring pair is selected, statistics can be read accessing + the file:: + + cat /sys/kernel/debug/qat_4xxx_0000:6b:00.0/telemetry/rp_C_data + + If ``control`` is set to 1, only the current values of the + counters are displayed:: + + + + If ``control`` is 2, 3 or 4, counters are displayed in the + following format:: + + + + + On QAT GEN4 devices there are 64 RPs on a PF, so the allowed + values are 0..63. This number is absolute to the device. + If Virtual Functions (VF) are used, the ring pair number can + be derived from the Bus, Device, Function of the VF: + + ============ ====== ====== ====== ====== + PCI BDF/VF RP0 RP1 RP2 RP3 + ============ ====== ====== ====== ====== + 0000:6b:0.1 RP 0 RP 1 RP 2 RP 3 + 0000:6b:0.2 RP 4 RP 5 RP 6 RP 7 + 0000:6b:0.3 RP 8 RP 9 RP 10 RP 11 + 0000:6b:0.4 RP 12 RP 13 RP 14 RP 15 + 0000:6b:0.5 RP 16 RP 17 RP 18 RP 19 + 0000:6b:0.6 RP 20 RP 21 RP 22 RP 23 + 0000:6b:0.7 RP 24 RP 25 RP 26 RP 27 + 0000:6b:1.0 RP 28 RP 29 RP 30 RP 31 + 0000:6b:1.1 RP 32 RP 33 RP 34 RP 35 + 0000:6b:1.2 RP 36 RP 37 RP 38 RP 39 + 0000:6b:1.3 RP 40 RP 41 RP 42 RP 43 + 0000:6b:1.4 RP 44 RP 45 RP 46 RP 47 + 0000:6b:1.5 RP 48 RP 49 RP 50 RP 51 + 0000:6b:1.6 RP 52 RP 53 RP 54 RP 55 + 0000:6b:1.7 RP 56 RP 57 RP 58 RP 59 + 0000:6b:2.0 RP 60 RP 61 RP 62 RP 63 + ============ ====== ====== ====== ====== + + The mapping is only valid for the BDFs of VFs on the host. + + + The service provided on a ring-pair varies depending on the + configuration. The configuration for a given device can be + queried and set using ``cfg_services``. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + The following table reports how ring pairs are mapped to VFs + on the PF 0000:6b:0.0 configured for `sym;asym` or `asym;sym`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 asym RP 1 sym RP 2 asym RP 3 sym + 0000:6b:0.2 RP 4 asym RP 5 sym RP 6 asym RP 7 sym + 0000:6b:0.3 RP 8 asym RP 9 sym RP10 asym RP11 sym + ... ... ... ... ... + =========== ============ =========== ============ =========== + + All VFs follow the same pattern. + + + The following table reports how ring pairs are mapped to VFs on + the PF 0000:6b:0.0 configured for `dc`: + + =========== ============ =========== ============ =========== + PCI BDF/VF RP0/service RP1/service RP2/service RP3/service + =========== ============ =========== ============ =========== + 0000:6b:0.1 RP 0 dc RP 1 dc RP 2 dc RP 3 dc + 0000:6b:0.2 RP 4 dc RP 5 dc RP 6 dc RP 7 dc + 0000:6b:0.3 RP 8 dc RP 9 dc RP10 dc RP11 dc + ... ... ... ... ... + =========== ============ =========== ============ =========== + + The mapping of a RP to a service can be retrieved using + ``rp2srv`` from sysfs. + See Documentation/ABI/testing/sysfs-driver-qat for details. + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps index 8757dcf41c0825b80c43b04e377f514bedf980c0..a5f506f7d4819ecee67f4dcdb62f19d3b34e8473 100644 --- a/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps +++ b/Documentation/ABI/testing/sysfs-bus-event_source-devices-caps @@ -16,3 +16,9 @@ Description: Example output in powerpc: grep . /sys/bus/event_source/devices/cpu/caps/* /sys/bus/event_source/devices/cpu/caps/pmu_name:POWER9 + + The "branch_counter_nr" in the supported platform exposes the + maximum number of counters which can be shown in the u64 counters + of PERF_SAMPLE_BRANCH_COUNTERS, while the "branch_counter_width" + exposes the width of each counter. Both of them can be used by + the perf tool to parse the logged counters in each branch. diff --git a/Documentation/ABI/testing/sysfs-driver-qat b/Documentation/ABI/testing/sysfs-driver-qat index 96834d103a09e2ad0f156d9b2756f274de53a24b..96020fb051c347e0f3c87a0775853be0300425a5 100644 --- a/Documentation/ABI/testing/sysfs-driver-qat +++ b/Documentation/ABI/testing/sysfs-driver-qat @@ -95,3 +95,69 @@ Description: (RW) This configuration option provides a way to force the device i 0 This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/rp2srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This attribute provides a way for a user to query a + specific ring pair for the type of service that it is currently + configured for. + + When written to, the value is cached and used to perform the + read operation. Allowed values are in the range 0 to N-1, where + N is the max number of ring pairs supported by a device. This + can be queried using the attribute qat/num_rps. + + A read returns the service associated to the ring pair queried. + + The values are: + + * dc: the ring pair is configured for running compression services + * sym: the ring pair is configured for running symmetric crypto + services + * asym: the ring pair is configured for running asymmetric crypto + services + + Example usage:: + + # echo 1 > /sys/bus/pci/devices//qat/rp2srv + # cat /sys/bus/pci/devices//qat/rp2srv + sym + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/num_rps +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RO) Returns the number of ring pairs that a single device has. + + Example usage:: + + # cat /sys/bus/pci/devices//qat/num_rps + 64 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat/auto_reset +Date: March 2024 +KernelVersion: 6.8 +Contact: qat-linux@intel.com +Description: (RW) Reports the current state of the autoreset feature + for a QAT device + + Write to the attribute to enable or disable device auto reset. + + Device auto reset is disabled by default. + + The values are: + + * 1/Yy/on: auto reset enabled. If the device encounters an + unrecoverable error, it will be reset automatically. + * 0/Nn/off: auto reset disabled. If the device encounters an + unrecoverable error, it will not be reset. + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-driver-qat_ras b/Documentation/ABI/testing/sysfs-driver-qat_ras new file mode 100644 index 0000000000000000000000000000000000000000..176dea1e9c0aa9684cfc9c15037aecba2277795f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_ras @@ -0,0 +1,41 @@ +What: /sys/bus/pci/devices//qat_ras/errors_correctable +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of correctable errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_nonfatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of non fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/errors_fatal +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (RO) Reports the number of fatal errors detected by the device. + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_ras/reset_error_counters +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: (WO) Write to resets all error counters of a device. + + The following example reports how to reset the counters:: + + # echo 1 > /sys/bus/pci/devices//qat_ras/reset_error_counters + # cat /sys/bus/pci/devices//qat_ras/errors_correctable + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_nonfatal + 0 + # cat /sys/bus/pci/devices//qat_ras/errors_fatal + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/ABI/testing/sysfs-driver-qat_rl b/Documentation/ABI/testing/sysfs-driver-qat_rl new file mode 100644 index 0000000000000000000000000000000000000000..8c282ae3155ddc7c96351a1a0ab2ea711ba282a8 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-qat_rl @@ -0,0 +1,226 @@ +What: /sys/bus/pci/devices//qat_rl/sla_op +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (WO) This attribute is used to perform an operation on an SLA. + The supported operations are: add, update, rm, rm_all, and get. + + Input values must be filled through the associated attribute in + this group before a write to this file. + If the operation completes successfully, the associated + attributes will be updated. + The associated attributes are: cir, pir, srv, rp, and id. + + Supported operations: + + * add: Creates a new SLA with the provided inputs from user. + * Inputs: cir, pir, srv, and rp + * Output: id + + * get: Returns the configuration of the specified SLA in id attribute + * Inputs: id + * Outputs: cir, pir, srv, and rp + + * update: Updates the SLA with new values set in the following attributes + * Inputs: id, cir, and pir + + * rm: Removes the specified SLA in the id attribute. + * Inputs: id + + * rm_all: Removes all the configured SLAs. + * Inputs: None + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/rp +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) When read, reports the current assigned ring pairs for the + queried SLA. + When wrote to, configures the ring pairs associated to a new SLA. + + The value is a 64-bit bit mask and is written/displayed in hex. + Each bit of this mask represents a single ring pair i.e., + bit 1 == ring pair id 0; bit 3 == ring pair id 2. + + Selected ring pairs must to be assigned to a single service, + i.e. the one provided with the srv attribute. The service + assigned to a certain ring pair can be checked by querying + the attribute qat/rp2srv. + + The maximum number of ring pairs is 4 per SLA. + + Applicability in sla_op: + + * WRITE: add operation + * READ: get operation + + Example usage:: + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 + + ## Write + # echo 0x5 > /sys/bus/pci/devices//qat_rl/rp + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/id +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) If written to, the value is used to retrieve a particular + SLA and operate on it. + This is valid only for the following operations: update, rm, + and get. + A read of this attribute is only guaranteed to have correct data + after creation of an SLA. + + Applicability in sla_op: + + * WRITE: rm and update operations + * READ: add and get operations + + Example usage:: + + ## Read + ## Set attributes e.g. cir, pir, srv, etc + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Write + # echo 7 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/rp + 0x5 ## ring pair ID 0 and ring pair ID 2 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Committed information rate (CIR). Rate guaranteed to be + achieved by a particular SLA. The value is expressed in + permille scale, i.e. 1000 refers to the maximum device + throughput for a selected service. + + After sending a "get" to sla_op, this will be populated with the + CIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 500 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cir + 500 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/pir +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Peak information rate (PIR). The maximum rate that can be + achieved by that particular SLA. An SLA can reach a value + between CIR and PIR when the device is not fully utilized by + requests from other users (assigned to different SLAs). + + After sending a "get" to sla_op, this will be populated with the + PIR for that queried SLA. + Write to this file before sending an "add/update" sla_op, to set + the SLA to the specified value. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo 750 > /sys/bus/pci/devices//qat_rl/pir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/pir + 750 + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/srv +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) Service (SRV). Represents the service (sym, asym, dc) + associated to an SLA. + Can be written to or queried to set/show the SRV type for an SLA. + The SRV attribute is used to specify the SRV type before adding + an SLA. After an SLA is configured, reports the service + associated to that SLA. + + Applicability in sla_op: + + * WRITE: add and update operations + * READ: get operation + + Example usage:: + + ## Write + # echo "dc" > /sys/bus/pci/devices//qat_rl/srv + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/id + 4 + + ## Read + # echo 4 > /sys/bus/pci/devices//qat_rl/id + # echo "get" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/srv + dc + + This attribute is only available for qat_4xxx devices. + +What: /sys/bus/pci/devices//qat_rl/cap_rem +Date: January 2024 +KernelVersion: 6.7 +Contact: qat-linux@intel.com +Description: + (RW) This file will return the remaining capability for a + particular service/sla. This is the remaining value that a new + SLA can be set to or a current SLA can be increased with. + + Example usage:: + + # echo "asym" > /sys/bus/pci/devices//qat_rl/cap_rem + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 250 + # echo 250 > /sys/bus/pci/devices//qat_rl/cir + # echo "add" > /sys/bus/pci/devices//qat_rl/sla_op + # cat /sys/bus/pci/devices//qat_rl/cap_rem + 0 + + This attribute is only available for qat_4xxx devices. diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index b26b5274eaaf140ed8ccb617df2eca53a166e8bd..aa0edf0d07f02d79f31f00cd79d2d7561bc3f869 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1045,11 +1045,12 @@ All time durations are in microseconds. - user_usec - system_usec - and the following five when the controller is enabled: + and the following six when the controller is enabled: - nr_periods - nr_throttled - throttled_usec + - current_bw - nr_bursts - burst_usec @@ -1717,6 +1718,30 @@ IO Interface Files 8:16 rbytes=1459200 wbytes=314773504 rios=192 wios=353 dbytes=0 dios=0 8:0 rbytes=90430464 wbytes=299008000 rios=8950 wios=1252 dbytes=50331648 dios=3021 + io.extstat + A read-only nested-keyed file. + + Lines are keyed by $MAJ:$MIN device numbers and not ordered. + The following nested keys are defined. + + ======== ============================= + rwait IO read wait time + wwait IO write wait time + rserv IO read service time + wserv IO write service time + rcomp Number of completed read IOs + wcomp Number of completed write IOs + rbytesq Bytes of queued read IOs + wbytesq Bytes of queued write IOs + riosq Number of queued read IOs + wiosq Number of queued write IOs + ======== ============================= + + An example read output follows:: + + 253:16 rwait=0 wwait=3300 rserv=0 wserv=414366321956 rcomp=0 wcomp=12 rbytesq=0 wbytesq=40960000 riosq=0 wiosq=12 + 253:0 rwait=0 wwait=0 rserv=0 wserv=0 rcomp=0 wcomp=0 rbytesq=0 wbytesq=0 riosq=0 wiosq=0 + io.cost.qos A read-write nested-keyed file which exists only on the root cgroup. diff --git a/Documentation/admin-guide/iostats.rst b/Documentation/admin-guide/iostats.rst index 609a3201fd4e1ec233aa45260e94c335f01edbf7..f9af03371cc18e31aec0108745dc0db10ea8f920 100644 --- a/Documentation/admin-guide/iostats.rst +++ b/Documentation/admin-guide/iostats.rst @@ -131,6 +131,12 @@ Field 16 -- # of flush requests completed Field 17 -- # of milliseconds spent flushing This is the total number of milliseconds spent by all flush requests. +Field 18 -- # of milliseconds spent reading on device driver's side + +Field 19 -- # of milliseconds spent writing on device driver's side + +Field 20 -- # of milliseconds spent discarding on device driver's side + To avoid introducing performance bottlenecks, no locks are held while modifying these counters. This implies that minor inaccuracies may be introduced when changes collide, so (for instance) adding up all the diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 41644336e358727b5b9b184761e1d11b1332fcca..003561cc75d220579ac3146d98671e74985c84dc 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -586,6 +586,9 @@ nokmem -- Disable kernel memory accounting. nobpf -- Disable BPF memory accounting. + cgwb_v1 Enable writeback control for cgroup for cgroup v1 + interface. + checkreqprot= [SELINUX] Set initial checkreqprot flag value. Format: { "0" | "1" } See security/selinux/Kconfig help text. @@ -2299,6 +2302,11 @@ isapnp= [ISAPNP] Format: ,,, + zhaoxin_patch_bitmask= + [X86] Bitmask for Zhaoxin Platform's patch. + bit 0: enable KH-40000 dma patch's node check function + + isolcpus= [KNL,SMP,ISOL] Isolate a given set of CPUs from disturbance. [Deprecated - use cpusets instead] Format: [flag-list,] @@ -3275,6 +3283,11 @@ mga= [HW,DRM] + microcode.force_minrev= [X86] + Format: + Enable or disable the microcode minimal revision + enforcement for the runtime microcode loader. + min_addr=nn[KMG] [KNL,BOOT,IA-64] All physical memory below this physical address is ignored. @@ -7001,6 +7014,11 @@ vmpoff= [KNL,S390] Perform z/VM CP command after power off. Format: + vring_force_dma_api + Force virtio vring to use dma api. This is only needed + on xdragon platform (prior to 20181230 release, e.g. + 0930 release). + vsyscall= [X86-64] Controls the behavior of vsyscalls (i.e. calls to fixed addresses of 0xffffffffff600x00 from legacy diff --git a/Documentation/admin-guide/perf/dwc_pcie_pmu.rst b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst new file mode 100644 index 0000000000000000000000000000000000000000..d47cd229d7106f26ac47b85b66e6fc28a95479ad --- /dev/null +++ b/Documentation/admin-guide/perf/dwc_pcie_pmu.rst @@ -0,0 +1,94 @@ +====================================================================== +Synopsys DesignWare Cores (DWC) PCIe Performance Monitoring Unit (PMU) +====================================================================== + +DesignWare Cores (DWC) PCIe PMU +=============================== + +The PMU is a PCIe configuration space register block provided by each PCIe Root +Port in a Vendor-Specific Extended Capability named RAS D.E.S (Debug, Error +injection, and Statistics). + +As the name indicates, the RAS DES capability supports system level +debugging, AER error injection, and collection of statistics. To facilitate +collection of statistics, Synopsys DesignWare Cores PCIe controller +provides the following two features: + +- one 64-bit counter for Time Based Analysis (RX/TX data throughput and + time spent in each low-power LTSSM state) and +- one 32-bit counter for Event Counting (error and non-error events for + a specified lane) + +Note: There is no interrupt for counter overflow. + +Time Based Analysis +------------------- + +Using this feature you can obtain information regarding RX/TX data +throughput and time spent in each low-power LTSSM state by the controller. +The PMU measures data in two categories: + +- Group#0: Percentage of time the controller stays in LTSSM states. +- Group#1: Amount of data processed (Units of 16 bytes). + +Lane Event counters +------------------- + +Using this feature you can obtain Error and Non-Error information in +specific lane by the controller. The PMU event is selected by all of: + +- Group i +- Event j within the Group i +- Lane k + +Some of the events only exist for specific configurations. + +DesignWare Cores (DWC) PCIe PMU Driver +======================================= + +This driver adds PMU devices for each PCIe Root Port named based on the BDF of +the Root Port. For example, + + 30:03.0 PCI bridge: Device 1ded:8000 (rev 01) + +the PMU device name for this Root Port is dwc_rootport_3018. + +The DWC PCIe PMU driver registers a perf PMU driver, which provides +description of available events and configuration options in sysfs, see +/sys/bus/event_source/devices/dwc_rootport_{bdf}. + +The "format" directory describes format of the config fields of the +perf_event_attr structure. The "events" directory provides configuration +templates for all documented events. For example, +"Rx_PCIe_TLP_Data_Payload" is an equivalent of "eventid=0x22,type=0x1". + +The "perf list" command shall list the available events from sysfs, e.g.:: + + $# perf list | grep dwc_rootport + <...> + dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ [Kernel PMU event] + <...> + dwc_rootport_3018/rx_memory_read,lane=?/ [Kernel PMU event] + +Time Based Analysis Event Usage +------------------------------- + +Example usage of counting PCIe RX TLP data payload (Units of bytes):: + + $# perf stat -a -e dwc_rootport_3018/Rx_PCIe_TLP_Data_Payload/ + +The average RX/TX bandwidth can be calculated using the following formula: + + PCIe RX Bandwidth = Rx_PCIe_TLP_Data_Payload / Measure_Time_Window + PCIe TX Bandwidth = Tx_PCIe_TLP_Data_Payload / Measure_Time_Window + +Lane Event Usage +------------------------------- + +Each lane has the same event set and to avoid generating a list of hundreds +of events, the user need to specify the lane ID explicitly, e.g.:: + + $# perf stat -a -e dwc_rootport_3018/rx_memory_read,lane=4/ + +The driver does not support sampling, therefore "perf record" will not +work. Per-task (without "-a") perf sessions are not supported. diff --git a/Documentation/admin-guide/perf/index.rst b/Documentation/admin-guide/perf/index.rst index f60be04e4e336ecfc3d00eca73b609c3d20a909d..6bc7739fddb5e0b95a83d2f15edcf8ce1a11200d 100644 --- a/Documentation/admin-guide/perf/index.rst +++ b/Documentation/admin-guide/perf/index.rst @@ -19,6 +19,7 @@ Performance monitor support arm_dsu_pmu thunderx2-pmu alibaba_pmu + dwc_pcie_pmu nvidia-pmu meson-ddr-pmu cxl diff --git a/Documentation/admin-guide/sysctl/fs.rst b/Documentation/admin-guide/sysctl/fs.rst index a321b84eccaac0f66221836250073fb20f083fbd..11b2dd4ef5aeb26d7ca2d3751bb39c2c3d831ca1 100644 --- a/Documentation/admin-guide/sysctl/fs.rst +++ b/Documentation/admin-guide/sysctl/fs.rst @@ -205,6 +205,15 @@ already own the source file, or do not have read/write access to it. This protection is based on the restrictions in Openwall and grsecurity. +hardlink_cross_projid +--------------------- + +This is a temporary workaround plan to avoid the limitation when creating +hard link cross two projids. When set to "0", hardlink creation cross +two projids is restricted. When set to "1" hardlinks can be created +cross two projids. + + protected_regular ----------------- diff --git a/Documentation/admin-guide/sysctl/kernel.rst b/Documentation/admin-guide/sysctl/kernel.rst index cf33de56da27dc6bb1ac1e6a1929867584945926..af3fe24e938e68f590d428505c8e104cd43e629f 100644 --- a/Documentation/admin-guide/sysctl/kernel.rst +++ b/Documentation/admin-guide/sysctl/kernel.rst @@ -1603,6 +1603,24 @@ entry will default to 2 instead of 0. = ============================================================= +unprivileged_userns_clone +========================= + +This value controls if unprivileged users could unshare a new user +namespace. When the value is zero, unprivileged users are not allowed +to unshare a new user namespace. Privileged users (with CAP_SYS_ADMIN) +are not affected and are always capable of unsharing a new user +namespace. + + +userns_max_level +================ + +This value indicates the maximum nested level of user namespace. The +valid configuration values are 0-33. When configured to zero, user +namespace is effectively disabled. + + warn_limit ========== diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 45ba1f4dc004876f16a57fbcb34debd065769bc2..beabacb0fcba53713a6392b7bc5e0eec62123028 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -75,6 +75,7 @@ Currently, these files are in /proc/sys/vm: - watermark_boost_factor - watermark_scale_factor - zone_reclaim_mode +- enable_context_readahead admin_reserve_kbytes @@ -1044,3 +1045,19 @@ of other processes running on other nodes will not be affected. Allowing regular swap effectively restricts allocations to the local node unless explicitly overridden by memory policies or cpuset configurations. + + +enable_context_readahead +======================== + +Specific workloads whose io activities are mostly random, context readahead +feature may introduce unnecessary io read operations, which will impact app's +performance. + +Default it is enabled. + +To disable context readahead: + echo 0 > /proc/sys/vm/enable_context_readahead + +To enable context readahead again: + echo 1 > /proc/sys/vm/enable_context_readahead diff --git a/Documentation/arch/arm64/cpu-feature-registers.rst b/Documentation/arch/arm64/cpu-feature-registers.rst index de6d8a4790e2b6cd06f69fe5ebe72eb69c23cb3d..14ea68bcf196ed3b24f034615865d0fb31f383f1 100644 --- a/Documentation/arch/arm64/cpu-feature-registers.rst +++ b/Documentation/arch/arm64/cpu-feature-registers.rst @@ -152,6 +152,8 @@ infrastructure: +------------------------------+---------+---------+ | DIT | [51-48] | y | +------------------------------+---------+---------+ + | MPAM | [43-40] | n | + +------------------------------+---------+---------+ | SVE | [35-32] | y | +------------------------------+---------+---------+ | GIC | [27-24] | n | diff --git a/Documentation/arch/x86/hygon-secure-virtualization.rst b/Documentation/arch/x86/hygon-secure-virtualization.rst new file mode 100644 index 0000000000000000000000000000000000000000..ab94107c91f511f2e61299b7c918793e7209f6ef --- /dev/null +++ b/Documentation/arch/x86/hygon-secure-virtualization.rst @@ -0,0 +1,101 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=========================== +HYGON Secure Virtualization +=========================== + +China Secure Virtualization (CSV) is a key virtualization feature on Hygon +processors. + +The 1st generation of CSV (CSV for short) is a secure virtualization technology +to provide memory encryption for the virtual machine (VM), each VM's memory is +encrypted by its unique encryption key which is managed by secure processor. + +The 2nd generation of CSV (CSV2 for short) provides security enhancement to CSV +by encrypting not only the VM's memory but also the vCPU's registers of the VM. + +The 3rd generation of CSV (CSV3 for short) is a more advanced secure +virtualization technology, it integrates secure processor, memory encryption and +memory isolation to provide the ability to protect guest's private data. The CSV3 +guest's context like CPU registers, control block and nested page table is accessed +only by the guest itself and the secure processor. Neither other guests nor the +host can tamper with the guest's context. + +The secure processor is a separate processor inside Hygon hardware. The firmware +running inside the secure processor performs activities in a secure way, such as +OVMF encryption, VM launch, secure memory management and nested page table +management etc. For more information, please see CSV spec and CSV3 spec from Hygon. + +A CSV guest is running in the memory that is encrypted with a dedicated encrypt +key which is set by the secure processor. And CSV guest's memory encrypt key is +unique from the others. A low latency crypto engine resides on Hygon hardware +to minimize the negative effect on memory bandwidth. In CSV guest, a guest private +page will be automatically decrypted when read from memory and encrypted when +written to memory. + +CSV3 provides an enhancement technology named memory isolation to improve the +security. A dedicated memory isolation hardware is built in Hygon hardware. Only +the secure processor has privilege to configure the isolation hardware. At the +BIOS stage, host will reserve several memory regions as secure which are protected +by the isolation hardware. The secure processor allocates the reserved secure +memory for CSV3 guest and marks the memory as dedicated for the current CSV3 +guest. Any memory access (read or write) to CSV3 guest's private memory outside +the guest will be blocked by isolation hardware. + +A CSV3 guest may declare some memory regions as shared to share data with the +host. When a page is set as shared, read/write on the page will bypass the +isolation hardware and the guest's shared memory can be accessed by the host. A +method named CSV3 secure call command is designed and CSV3 guest sends the secure +call command to the secure processor to change private memory to shared memory. +In the method, 2 dedicated pages are reserved at early stage of the guest. Any +read/write on the dedicated pages will trigger nested page fault. When NPF +happens, the host helps to issue an external command to the secure processor but +cannot tamper with the data in the guest's private memory. Then the secure +processor checks the fault address and handles the command if the address is +exactly the dedicated pages. + +Support for CSV can be determined through the CPUID instruction. The CPUID +function 0x8000001f reports information to CSV:: + + 0x8000001f[eax]: + Bit[1] indicates support for CSV + Bit[3] indicates support for CSV2 + Bit[30] indicates support for CSV3 + +If CSV is support, MSR 0xc0010131 can be used to determine if CSV is active:: + + 0xc0010131: + Bit[0] 0 = CSV is not active + 1 = CSV is active + Bit[1] 0 = CSV2 is not active + 1 = CSV2 is active + Bit[30] 0 = CSV3 is not active + 1 = CSV3 is active + +All CSV/CSV2's configurations must be enabled in CSV3. Linux can activate CSV3 by +default (CONFIG_HYGON_CSV=y, CONFIG_CMA=y). CSV3 guest's memory is managed by +CMA (Contiguous Memory Allocation). User must specify CSV3 total secure memory on +the linux kernel command line with csv_mem_size or csv_mem_percentage:: + + csv_mem_size=nn[MG] + [KNL,CSV] + Reserve specified CSV3 memory size in CMA. CSV3's memory will be + allocated from these CMAs. + For instance, csv_mem_size=40G, 40G memory is reserved for CSV3. + + csv_mem_percentage=nn + [KNL,CSV] + Reserve specified memory size which is prorated according to the + whole system memory size. CSV3 guest's memory will be allocated + from these CMAs. + For instance, csv_mem_percentage=60, means 60% system memory is + reserved for CSV3. + The maximum percentage is 80. And the default percentage is 0. + +Limitations +The reserved CSV3 memory within CMA cannot be used by kernel or any application that +may pin memory using long term gup during the application's life time. +For instance, if the whole system memory is 64G and 32G is reserved for CSV3 with +kernel command line csv_mem_percentage=50, only 32G memory is available for CSV/CSV2. +As a result, user will fail to run a CSV/CSV2 guest with memory size which exceeds +32G. diff --git a/Documentation/arch/x86/microcode.rst b/Documentation/arch/x86/microcode.rst index b627c6f36bcf5a966b9f7ff7e34059f44ffe8f38..69c04052861df91fd452c3c172175199de37d837 100644 --- a/Documentation/arch/x86/microcode.rst +++ b/Documentation/arch/x86/microcode.rst @@ -35,6 +35,8 @@ on Intel: kernel/x86/microcode/GenuineIntel.bin on AMD : kernel/x86/microcode/AuthenticAMD.bin +on Hygon: + kernel/x86/microcode/HygonGenuine.bin During BSP (BootStrapping Processor) boot (pre-SMP), the kernel scans the microcode file in the initrd. If microcode matching the @@ -69,6 +71,10 @@ here for future reference only). cd $TMPDIR mkdir -p $DSTDIR + if [ -d /lib/firmware/hygon-ucode ]; then + cat /lib/firmware/hygon-ucode/microcode_hygon*.bin > $DSTDIR/HygonGenuine.bin + fi + if [ -d /lib/firmware/amd-ucode ]; then cat /lib/firmware/amd-ucode/microcode_amd*.bin > $DSTDIR/AuthenticAMD.bin fi @@ -217,7 +223,8 @@ currently supported. Here's an example:: - CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 amd-ucode/microcode_amd_fam15h.bin" + CONFIG_EXTRA_FIRMWARE="intel-ucode/06-3a-09 \ + amd-ucode/microcode_amd_fam15h.bin hygon-ucode/microcode_hygon_fam18h.bin" CONFIG_EXTRA_FIRMWARE_DIR="/lib/firmware" This basically means, you have the following tree structure locally:: @@ -227,6 +234,10 @@ This basically means, you have the following tree structure locally:: ... | |-- microcode_amd_fam15h.bin ... + |-- hygon-ucode + ... + | |-- microcode_hygon_fam18h.bin + ... |-- intel-ucode ... | |-- 06-3a-09 diff --git a/Documentation/arch/x86/resctrl.rst b/Documentation/arch/x86/resctrl.rst index cb05d90111b4f5d8380f9a858d5b6d7c4ffb698c..a6279df64a9db8aa69dd08d8643e9cc9b7c42da7 100644 --- a/Documentation/arch/x86/resctrl.rst +++ b/Documentation/arch/x86/resctrl.rst @@ -35,7 +35,7 @@ about the feature from resctrl's info directory. To use the feature mount the file system:: - # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps]] /sys/fs/resctrl + # mount -t resctrl resctrl [-o cdp[,cdpl2][,mba_MBps][,debug]] /sys/fs/resctrl mount options are: @@ -46,6 +46,9 @@ mount options are: "mba_MBps": Enable the MBA Software Controller(mba_sc) to specify MBA bandwidth in MBps +"debug": + Make debug files accessible. Available debug files are annotated with + "Available only with debug option". L2 and L3 CDP are controlled separately. @@ -124,6 +127,13 @@ related to allocation: "P": Corresponding region is pseudo-locked. No sharing allowed. +"sparse_masks": + Indicates if non-contiguous 1s value in CBM is supported. + + "0": + Only contiguous 1s value in CBM is supported. + "1": + Non-contiguous 1s value in CBM is supported. Memory bandwidth(MB) subdirectory contains the following files with respect to allocation: @@ -299,7 +309,14 @@ All groups contain the following files: "tasks": Reading this file shows the list of all tasks that belong to this group. Writing a task id to the file will add a task to the - group. If the group is a CTRL_MON group the task is removed from + group. Multiple tasks can be added by separating the task ids + with commas. Tasks will be assigned sequentially. Multiple + failures are not supported. A single failure encountered while + attempting to assign a task will cause the operation to abort and + already added tasks before the failure will remain in the group. + Failures will be logged to /sys/fs/resctrl/info/last_cmd_status. + + If the group is a CTRL_MON group the task is removed from whichever previous CTRL_MON group owned the task and also from any MON group that owned the task. If the group is a MON group, then the task must already belong to the CTRL_MON parent of this @@ -342,6 +359,10 @@ When control is enabled all CTRL_MON groups will also contain: file. On successful pseudo-locked region creation the mode will automatically change to "pseudo-locked". +"ctrl_hw_id": + Available only with debug option. The identifier used by hardware + for the control group. On x86 this is the CLOSID. + When monitoring is enabled all MON groups will also contain: "mon_data": @@ -355,6 +376,10 @@ When monitoring is enabled all MON groups will also contain: the sum for all tasks in the CTRL_MON group and all tasks in MON groups. Please see example section for more details on usage. +"mon_hw_id": + Available only with debug option. The identifier used by hardware + for the monitor group. On x86 this is the RMID. + Resource allocation rules ------------------------- @@ -445,12 +470,13 @@ For cache resources we describe the portion of the cache that is available for allocation using a bitmask. The maximum value of the mask is defined by each cpu model (and may be different for different cache levels). It is found using CPUID, but is also provided in the "info" directory of -the resctrl file system in "info/{resource}/cbm_mask". Intel hardware +the resctrl file system in "info/{resource}/cbm_mask". Some Intel hardware requires that these masks have all the '1' bits in a contiguous block. So 0x3, 0x6 and 0xC are legal 4-bit masks with two bits set, but 0x5, 0x9 -and 0xA are not. On a system with a 20-bit mask each bit represents 5% -of the capacity of the cache. You could partition the cache into four -equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. +and 0xA are not. Check /sys/fs/resctrl/info/{resource}/sparse_masks +if non-contiguous 1s value is supported. On a system with a 20-bit mask +each bit represents 5% of the capacity of the cache. You could partition +the cache into four equal parts with masks: 0x1f, 0x3e0, 0x7c00, 0xf8000. Memory bandwidth Allocation and monitoring ========================================== diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst index 936f6aaa75c857bca55a7ced70b0d99490a9014d..0cfa0f667f2a3cd7c83699ef195df621e932d0e9 100644 --- a/Documentation/dev-tools/kfence.rst +++ b/Documentation/dev-tools/kfence.rst @@ -67,13 +67,142 @@ The total memory dedicated to the KFENCE memory pool can be computed as:: Using the default config, and assuming a page size of 4 KiB, results in dedicating 2 MiB to the KFENCE memory pool. +You can change the KFENCE memory pool size by setting ``kfence.num_objects`` +in boot command line, or writing to +``/sys/module/kfence/parameters/num_objects`` when kfence is not enabled, +and the pool size of each node will be computed and updated +in the same way as above. You can set this value as large as possible, so +please be careful DO NOT use up all memorys. +When enabling KFENCE, ``num_objects`` will be adjusted to make the pool size +aligned up to 1GiB. That means, ``num_objects`` itself will be aligned up to +131071 (Unless ``num_objects`` is smaller than it and is regarded as using +the upstream mode). + +You can enable/disable KFENCE dynamically after startup by writing a proper +number to ``/sys/module/kfence/parameters/sample_interval``. Setting this value +to 0 means disabling KFENCE, and unused KFENCE pool memory will be +automatically freed. Otherwise KFENCE will be enabled, it will try to alloc +enough memory to hold ``num_objects`` the user has set. If this value is a +negative number, sample_interval will be invalid, and KFENCE will alloc slabs +and pages from its pool at all time if possible. + +You can change KFENCE pool mode by setting ``kfence.pool_mode`` in boot command +line, or writing to ``/sys/module/kfence/parameters/pool_mode`` when kfence is +not enabled. If the value is ``global`` (as default), ``num_objects`` becomes a +global total sum. The total KFENCE pools will hold ``num_objects`` slabs/pages. +Otherwise if the value is ``node``, ``num_objects`` becomes a per node value, +KFENCE pools on each node will hold ``num_objects`` slabs/pages separately. + Note: On architectures that support huge pages, KFENCE will ensure that the pool is using pages of size ``PAGE_SIZE``. This will result in additional page tables being allocated. +TLB recover issue +~~~~~~~~~~~~~~~~~ + +For some arch like x86, kernel virtual address directly mapping to physical +address is mapped by PUD huge TLB, so that performance can be improved since +kernel no need to visit PMD and PTE. Each PUD covers an 1GiB area. + +However, KFENCE needs to set guarded pages and breaks this design. PUD will be +splited to PTE, meaning that an 1GiB area will be splited to a large number of +4KiB (page size) areas. This may impact the performance. + +To solve this issue, the size of each kfence pool area is forced to be 1GiB, +and one area can hold 131071 objects, calculating by:: + + 1GiB / 4KiB / 2 - 1 = 131071 + +So the user input kfence.num_objects will be aligned up to 131071 for +convenience of splitting them to several 1GiB areas. + +One KFENCE pool area will be allocated in 1GiB aligned address, ensuring +only splitting one PUD. When KFENCE is disabled and there is no active +slabs/pages in this area, it will be freed and the corresponding TLB will +be recovered to the origin PUD (only on x86_64 now). + +An exception is the user input less than 131071 in boot cmdline. See mode 1 +of the following examples. + +Set a pool limit on various memory +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Like crashkernel, the user can limit the size of kfence pool by setting +``kfence.booting_max`` in boot command line. A reasonable config can be:: + + kfence.booting_max=0-128M:0,128M-256M:1M,256M-:2M + +So that: + On machines with memory of [0, 128M), kfence will not be enabled. + + On machines with memory of [128M, 256M), kfence will allocate at most 1MB + for kfence pool. (which means num_objects = 127 on page_size = 4KB) + + On machines with memory larger than 256M, kfence will allocate at most 2MB + for kfence pool. (which means num_objects = 255 on page_size = 4KB) + +Notes: + This config only sets the upper limit, so if the user sets num_objects = 127 + and ``kfence.booting_max=0-:2M``, kfence will still allocate 1MB for pool. + + This config only works for upstream mode. (pool_size < 1GB and + sample_interval > 0) Because if the user want to use debug mode, he must + focus on the specific machine and not need this general setting. + +Examples +~~~~~~~~ + +There are mainly three kinds of distributing method. + +1. Upstream mode:: + + num_objects < 131071 + pool_mode = global (cannot be node) + sample_interval cannot be negative + + In this mode, everything looks like it is in upstream. However, if user + enlarges ``num_objects`` after startup, it will be aligned up to 131071 + and become mode 2. + +2. Global mode:: + + num_objects >= 131071 + pool_mode = global + + For example, if num_objects = 131071 * 2, and there are 4 nodes in total. + Node 0 and node 1 will separately alloc 1GiB memoty for KFENCE pools, and + there is nothing to do with node 2 and node 3. Sampling slabs and pages on + these empty nodes (2 and 3) will be mapped to previous nodes (0 and 1). + + If num_objects = 131071 * 6, the memory usage will be [2, 2, 1, 1]GiB on + these 4 nodes. + +3. Per node mode:: + + num_objects >= 131071 + pool_mode = node + + This mode is easy to understand. If num_objects = 131071 * n, the memory + usage will be [n, n, n, n]GiB on 4 nodes. + +Monitoring specific slabs +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If users want to enable or disable KFENCE for specific slabs, setting via +per_slab switch at ``/sys/kernel/slab//skip_kfence``. The default +value is 0 for all slabs (meaning do not skip). + +Users can also switch monitoring order0 pages by +setting ``kfence.order0_page`` in boot command line, +or writing to ``/sys/module/kfence/parameters/order0_page``. + Error reports ~~~~~~~~~~~~~ +By default, KFENCE will only report faults in dmesg. If users want to panic +the kernel, set ``kfence.fault=panic`` in boot command line, or write "panic" +to ``/sys/module/kfence/parameters/fault``. + A typical out-of-bounds access looks like this:: ================================================================== @@ -258,7 +387,7 @@ object page are "guard pages", whose attributes are changed to a protected state, and cause page faults on any attempted access. Such page faults are then intercepted by KFENCE, which handles the fault gracefully by reporting an out-of-bounds access, and marking the page as accessible so that the faulting -code can (wrongly) continue executing (set ``panic_on_warn`` to panic instead). +code can (wrongly) continue executing. To detect out-of-bounds writes to memory within the object's page itself, KFENCE also uses pattern-based redzones. For each object page, a redzone is set diff --git a/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d542ecb1a7d6c3e3ec820179de29f1dd4259bb4 --- /dev/null +++ b/Documentation/devicetree/bindings/arm/arm,mpam-msc.yaml @@ -0,0 +1,227 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/arm/arm,mpam-msc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Arm Memory System Resource Partitioning and Monitoring (MPAM) + +description: | + The Arm MPAM specification can be found here: + + https://developer.arm.com/documentation/ddi0598/latest + +maintainers: + - Rob Herring + +properties: + compatible: + items: + - const: arm,mpam-msc # Further details are discoverable + - const: arm,mpam-memory-controller-msc + + reg: + maxItems: 1 + description: A memory region containing registers as defined in the MPAM + specification. + + interrupts: + minItems: 1 + items: + - description: error (optional) + - description: overflow (optional, only for monitoring) + + interrupt-names: + oneOf: + - items: + - enum: [ error, overflow ] + - items: + - const: error + - const: overflow + + arm,not-ready-us: + description: The maximum time in microseconds for monitoring data to be + accurate after a settings change. For more information, see the + Not-Ready (NRDY) bit description in the MPAM specification. + + numa-node-id: true # see NUMA binding + + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + +patternProperties: + '^ris@[0-9a-f]$': + type: object + additionalProperties: false + description: | + RIS nodes for each RIS in an MSC. These nodes are required for each RIS + implementing known MPAM controls + + properties: + compatible: + enum: + # Bulk storage for cache + - arm,mpam-cache + # Memory bandwidth + - arm,mpam-memory + + reg: + minimum: 0 + maximum: 0xf + + cpus: + $ref: '/schemas/types.yaml#/definitions/phandle-array' + description: + Phandle(s) to the CPU node(s) this RIS belongs to. By default, the parent + device's affinity is used. + + arm,mpam-device: + $ref: '/schemas/types.yaml#/definitions/phandle' + description: + By default, the MPAM enabled device associated with a RIS is the MSC's + parent node. It is possible for each RIS to be associated with different + devices in which case 'arm,mpam-device' should be used. + + required: + - compatible + - reg + +required: + - compatible + - reg + +dependencies: + interrupts: [ interrupt-names ] + +additionalProperties: false + +examples: + - | + /* + cpus { + cpu@0 { + next-level-cache = <&L2_0>; + }; + cpu@100 { + next-level-cache = <&L2_1>; + }; + }; + */ + L2_0: cache-controller-0 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L2_1: cache-controller-1 { + compatible = "cache"; + cache-level = <2>; + cache-unified; + next-level-cache = <&L3>; + + }; + + L3: cache-controller@30000000 { + compatible = "arm,dsu-l3-cache", "cache"; + cache-level = <3>; + cache-unified; + + ranges = <0x0 0x30000000 0x800000>; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + /* CPU affinity implied by parent cache node's */ + reg = <0x10000 0x2000>; + interrupts = <1>, <2>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + }; + }; + + mem: memory-controller@20000 { + compatible = "foo,a-memory-controller"; + reg = <0x20000 0x1000>; + + #address-cells = <1>; + #size-cells = <1>; + ranges; + + msc@21000 { + compatible = "arm,mpam-memory-controller-msc", "arm,mpam-msc"; + reg = <0x21000 0x1000>; + interrupts = <3>; + interrupt-names = "error"; + arm,not-ready-us = <1>; + numa-node-id = <1>; + }; + }; + + iommu@40000 { + reg = <0x40000 0x1000>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@41000 { + compatible = "arm,mpam-msc"; + reg = <0 0x1000>; + interrupts = <5>, <6>; + interrupt-names = "error", "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@2 { + compatible = "arm,mpam-cache"; + reg = <0>; + // TODO: How to map to device(s)? + }; + }; + }; + + msc@80000 { + compatible = "foo,a-standalone-msc"; + reg = <0x80000 0x1000>; + + clocks = <&clks 123>; + + ranges; + #address-cells = <1>; + #size-cells = <1>; + + msc@10000 { + compatible = "arm,mpam-msc"; + + reg = <0x10000 0x2000>; + interrupts = <7>; + interrupt-names = "overflow"; + arm,not-ready-us = <1>; + + #address-cells = <1>; + #size-cells = <0>; + + ris@0 { + compatible = "arm,mpam-cache"; + reg = <0>; + arm,mpam-device = <&L2_0>; + }; + + ris@1 { + compatible = "arm,mpam-memory"; + reg = <1>; + arm,mpam-device = <&mem>; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/gpu/phytium,dc.yaml b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5be348f6e23f7b37d345e1fe568a33d8e1239d7f --- /dev/null +++ b/Documentation/devicetree/bindings/gpu/phytium,dc.yaml @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/dc/snps,dc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Phytium Display Controller + +maintainers: + - Chen Baozi + +allOf: + - $ref: /schemas/dc/display-controller.yaml# + +properties: + compatible: + const: phytium,dc + + reg: + minItems: 1 + items: + - description: Offset and length of the memory mapped registers + + interrupts: + maxItems: 1 + + clocks: + minItems: 1 + items: + - description:Display controller reference clock source + +unevaluatedProperties: false + +required: + - compatible + - reg + - interrupts + +Example: + /memreserve/ 0xf4000000 0x4000000; // (optional) + + dc0@32000000 { + compatible = "phytium,dc"; + reg = <0x0 0x32000000 0x0 0x8000>, + <0x0 0xf4000000 0x0 0x4000000>; // (optional) + interrupts = ; + pipe_mask = 0x3 + edp_mask = 0x0; + }; diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 573578db9509102783fee841fdf7caf82f0eb6c2..133cfb2bb05cef2afecf0d060226b4eed8d0d8f0 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -1045,6 +1045,8 @@ patternProperties: description: PHICOMM Co., Ltd. "^phytec,.*": description: PHYTEC Messtechnik GmbH + "^phytium,.*": + description: Phytium Technology Co., Ltd. "^picochip,.*": description: Picochip Ltd "^pine64,.*": diff --git a/Documentation/filesystems/erofs.rst b/Documentation/filesystems/erofs.rst index f200d78744952825790e105f8903c44d1601e02e..445224817823271ae38641eaa3af907f476e2665 100644 --- a/Documentation/filesystems/erofs.rst +++ b/Documentation/filesystems/erofs.rst @@ -91,6 +91,10 @@ compatibility checking tool (fsck.erofs), and a debugging tool (dump.erofs): - git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs-utils.git +For more information, please also refer to the documentation site: + +- https://erofs.docs.kernel.org + Bugs and patches are welcome, please kindly help us and send to the following linux-erofs mailing list: diff --git a/Documentation/filesystems/fuse-io.rst b/Documentation/filesystems/fuse-io.rst index 255a368fe534b4582c9be673523330e962803123..6464de4266ad504f8bdc89c834230f3c9f5219dc 100644 --- a/Documentation/filesystems/fuse-io.rst +++ b/Documentation/filesystems/fuse-io.rst @@ -15,7 +15,8 @@ The direct-io mode can be selected with the FOPEN_DIRECT_IO flag in the FUSE_OPEN reply. In direct-io mode the page cache is completely bypassed for reads and writes. -No read-ahead takes place. Shared mmap is disabled. +No read-ahead takes place. Shared mmap is disabled by default. To allow shared +mmap, the FUSE_DIRECT_IO_ALLOW_MMAP flag may be enabled in the FUSE_INIT reply. In cached mode reads may be satisfied from the page cache, and data may be read-ahead by the kernel to fill the cache. The cache is always kept consistent diff --git a/Documentation/gpu/hydcu-fixup-header.rst b/Documentation/gpu/hydcu-fixup-header.rst new file mode 100644 index 0000000000000000000000000000000000000000..5dca3ff3a137d4d19d193736de954ecfd3053f04 --- /dev/null +++ b/Documentation/gpu/hydcu-fixup-header.rst @@ -0,0 +1,13 @@ +.. SPDX-License-Identifier: GPL-2.0-only + +========================= + drm/hygon/hydcu-fixup-header hydcu-fixup-header driver +========================= + +The drm/hygon/hydcu-fixup-header driver supports all HYGON DCUs. + +General description +====================== + +The drm/hygon/hydcu-fixup-header driver adds flags NO_BUS_RESET to hydcu +device to disable vfio pci reset, as dcu is not support now. diff --git a/Documentation/scheduler/sched-bwc.rst b/Documentation/scheduler/sched-bwc.rst index 41ed2ceafc92ee1b09148913e50c762c16542663..329b00ba40f3949a6e66f6a897f1bb59c5ed49c4 100644 --- a/Documentation/scheduler/sched-bwc.rst +++ b/Documentation/scheduler/sched-bwc.rst @@ -122,9 +122,15 @@ This is tunable via procfs:: Larger slice values will reduce transfer overheads, while smaller values allow for more fine-grained consumption. +Sometimes users might want a group to burst without accumulation. This is +tunable via:: + /proc/sys/kernel/sched_cfs_bw_burst_onset_percent (default=0) + +Up to 100% runtime of cpu.cfs_burst_us might be given on setting bandwidth. + Statistics ---------- -A group's bandwidth statistics are exported via 5 fields in cpu.stat. +A group's bandwidth statistics are exported via 6 fields in cpu.stat. cpu.stat: @@ -132,6 +138,7 @@ cpu.stat: - nr_throttled: Number of times the group has been throttled/limited. - throttled_time: The total time duration (in nanoseconds) for which entities of the group have been throttled. +- current_bw: Current runtime in global pool. - nr_bursts: Number of periods burst occurs. - burst_time: Cumulative wall-time (in nanoseconds) that any CPUs has used above quota in respective periods. diff --git a/Documentation/virt/coco/csv-guest.rst b/Documentation/virt/coco/csv-guest.rst new file mode 100644 index 0000000000000000000000000000000000000000..23cba2a5fd7c093bea25d6c1cc0f1ce41846d015 --- /dev/null +++ b/Documentation/virt/coco/csv-guest.rst @@ -0,0 +1,33 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================================================== +CSV Guest API Documentation +=================================================================== + +1. General description +====================== + +The CSV guest driver exposes IOCTL interfaces via the /dev/csv-guest misc +device to allow userspace to get certain CSV guest-specific details. + +2. API description +================== + +In this section, for each supported IOCTL, the following information is +provided along with a generic description. + +:Input parameters: Parameters passed to the IOCTL and related details. +:Output: Details about output data and return value (with details about + the non common error values). + +2.1 CSV_CMD_GET_REPORT +----------------------- + +:Input parameters: struct csv_report_req +:Output: Upon successful execution, CSV_REPORT data is copied to + csv_report_req.report_data and return 0. Return -EINVAL for invalid + operands, -EIO on VMMCALL failure or standard error number on other + common failures. + +The CSV_CMD_GET_REPORT IOCTL can be used by the attestation software to get +the CSV_REPORT from the CSV module using VMMCALL[KVM_HC_VM_ATTESTATION]. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 21a7578142a18b4ad537acbd654ba510dd16fc9f..edc682a94ca4f8a727c5cdecab763d67883de0f9 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -416,6 +416,13 @@ Reads the general purpose registers from the vcpu. __u64 pc; }; + /* LoongArch */ + struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + unsigned long gpr[32]; + unsigned long pc; + }; + 4.12 KVM_SET_REGS ----------------- @@ -506,7 +513,7 @@ translation mode. ------------------ :Capability: basic -:Architectures: x86, ppc, mips, riscv +:Architectures: x86, ppc, mips, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_interrupt (in) :Returns: 0 on success, negative on failure. @@ -592,6 +599,14 @@ b) KVM_INTERRUPT_UNSET This is an asynchronous vcpu ioctl and can be invoked from any thread. +LOONGARCH: +^^^^^^^^^^ + +Queues an external interrupt to be injected into the virtual CPU. A negative +interrupt number dequeues the interrupt. + +This is an asynchronous vcpu ioctl and can be invoked from any thread. + 4.17 KVM_DEBUG_GUEST -------------------- @@ -737,7 +752,7 @@ signal mask. ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (out) :Returns: 0 on success, -1 on error @@ -746,7 +761,7 @@ Reads the floating point state from the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -761,12 +776,21 @@ Reads the floating point state from the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.23 KVM_SET_FPU ---------------- :Capability: basic -:Architectures: x86 +:Architectures: x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_fpu (in) :Returns: 0 on success, -1 on error @@ -775,7 +799,7 @@ Writes the floating point state to the vcpu. :: - /* for KVM_GET_FPU and KVM_SET_FPU */ + /* x86: for KVM_GET_FPU and KVM_SET_FPU */ struct kvm_fpu { __u8 fpr[8][16]; __u16 fcw; @@ -790,6 +814,15 @@ Writes the floating point state to the vcpu. __u32 pad2; }; + /* LoongArch: for KVM_GET_FPU and KVM_SET_FPU */ + struct kvm_fpu { + __u32 fcsr; + __u64 fcc; + struct kvm_fpureg { + __u64 val64[4]; + }fpr[32]; + }; + 4.24 KVM_CREATE_IRQCHIP ----------------------- @@ -1387,7 +1420,7 @@ documentation when it pops into existence). ------------------- :Capability: KVM_CAP_ENABLE_CAP -:Architectures: mips, ppc, s390, x86 +:Architectures: mips, ppc, s390, x86, loongarch :Type: vcpu ioctl :Parameters: struct kvm_enable_cap (in) :Returns: 0 on success; -1 on error @@ -1442,7 +1475,7 @@ for vm-wide capabilities. --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (out) :Returns: 0 on success; -1 on error @@ -1460,7 +1493,7 @@ Possible values are: ========================== =============================================== KVM_MP_STATE_RUNNABLE the vcpu is currently running - [x86,arm64,riscv] + [x86,arm64,riscv,loongarch] KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP) which has not yet received an INIT signal [x86] KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is @@ -1516,11 +1549,14 @@ For riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.39 KVM_SET_MP_STATE --------------------- :Capability: KVM_CAP_MP_STATE -:Architectures: x86, s390, arm64, riscv +:Architectures: x86, s390, arm64, riscv, loongarch :Type: vcpu ioctl :Parameters: struct kvm_mp_state (in) :Returns: 0 on success; -1 on error @@ -1538,6 +1574,9 @@ For arm64/riscv: The only states that are valid are KVM_MP_STATE_STOPPED and KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not. +On LoongArch, only the KVM_MP_STATE_RUNNABLE state is used to reflect +whether the vcpu is runnable. + 4.40 KVM_SET_IDENTITY_MAP_ADDR ------------------------------ @@ -2841,6 +2880,19 @@ Following are the RISC-V D-extension registers: 0x8020 0000 0600 0020 fcsr Floating point control and status register ======================= ========= ============================================= +LoongArch registers are mapped using the lower 32 bits. The upper 16 bits of +that is the register group type. + +LoongArch csr registers are used to control guest cpu or get status of guest +cpu, and they have the following id bit patterns:: + + 0x9030 0000 0001 00 (64-bit) + +LoongArch KVM control registers are used to implement some new defined functions +such as set vcpu counter or reset vcpu, and they have the following id bit patterns:: + + 0x9030 0000 0002 + 4.69 KVM_GET_ONE_REG -------------------- diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst index ad13ec55ddfe5110ab8922a5aafe1732951209de..9ca5a45c2140a9f3dfc1dc58b28a871f4c3b844f 100644 --- a/Documentation/virt/kvm/index.rst +++ b/Documentation/virt/kvm/index.rst @@ -14,6 +14,7 @@ KVM s390/index ppc-pv x86/index + loongarch/index locking vcpu-requests diff --git a/Documentation/virt/kvm/loongarch/hypercalls.rst b/Documentation/virt/kvm/loongarch/hypercalls.rst new file mode 100644 index 0000000000000000000000000000000000000000..1679e48d67d28c4ee30cdc9120d74ae2f54cda57 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/hypercalls.rst @@ -0,0 +1,79 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=================================== +The LoongArch paravirtual interface +=================================== + +KVM hypercalls use the HVCL instruction with code 0x100, and the hypercall +number is put in a0 and up to five arguments may be placed in a1-a5, the +return value is placed in v0 (alias with a0). + +The code for that interface can be found in arch/loongarch/kvm/* + +Querying for existence +====================== + +To find out if we're running on KVM or not, cpucfg can be used with index +CPUCFG_KVM_BASE (0x40000000), cpucfg range between 0x40000000 - 0x400000FF +is marked as a specially reserved range. All existing and future processors +will not implement any features in this range. + +When Linux is running on KVM, cpucfg with index CPUCFG_KVM_BASE (0x40000000) +returns magic string "KVM\0" + +Once you determined you're running under a PV capable KVM, you can now use +hypercalls as described below. + +KVM hypercall ABI +================= + +Hypercall ABI on KVM is simple, only one scratch register a0 (v0) and at most +five generic registers used as input parameter. FP register and vector register +is not used for input register and should not be modified during hypercall. +Hypercall function can be inlined since there is only one scratch register. + +The parameters are as follows: + + ======== ================ ================ + Register IN OUT + ======== ================ ================ + a0 function number Return code + a1 1st parameter - + a2 2nd parameter - + a3 3rd parameter - + a4 4th parameter - + a5 5th parameter - + ======== ================ ================ + +Return codes can be as follows: + + ==== ========================= + Code Meaning + ==== ========================= + 0 Success + -1 Hypercall not implemented + -2 Hypercall parameter error + ==== ========================= + +KVM Hypercalls Documentation +============================ + +The template for each hypercall is: +1. Hypercall name +2. Purpose + +1. KVM_HCALL_FUNC_PV_IPI +------------------------ + +:Purpose: Send IPIs to multiple vCPUs. + +- a0: KVM_HCALL_FUNC_PV_IPI +- a1: lower part of the bitmap of destination physical CPUIDs +- a2: higher part of the bitmap of destination physical CPUIDs +- a3: the lowest physical CPUID in bitmap + +The hypercall lets a guest send multicast IPIs, with at most 128 +destinations per hypercall. The destinations are represented by a bitmap +contained in the first two arguments (a1 and a2). Bit 0 of a1 corresponds +to the physical CPUID in the third argument (a3), bit 1 corresponds to the +physical ID a3+1, and so on. diff --git a/Documentation/virt/kvm/loongarch/index.rst b/Documentation/virt/kvm/loongarch/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..83387b4c53455033acaac2ddceb404d898ccaa39 --- /dev/null +++ b/Documentation/virt/kvm/loongarch/index.rst @@ -0,0 +1,10 @@ +.. SPDX-License-Identifier: GPL-2.0 + +========================= +KVM for LoongArch systems +========================= + +.. toctree:: + :maxdepth: 2 + + hypercalls.rst diff --git a/MAINTAINERS b/MAINTAINERS index dd5de540ec0b52c4222c6fd1df24d10d3b7d9090..4117c6b815b5801f7c13082e214651810ee7e6c7 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7741,6 +7741,7 @@ R: Yue Hu R: Jeffle Xu L: linux-erofs@lists.ozlabs.org S: Maintained +W: https://erofs.docs.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs.git F: Documentation/ABI/testing/sysfs-fs-erofs F: Documentation/filesystems/erofs.rst @@ -11522,6 +11523,18 @@ F: include/kvm/arm_* F: tools/testing/selftests/kvm/*/aarch64/ F: tools/testing/selftests/kvm/aarch64/ +KERNEL VIRTUAL MACHINE FOR LOONGARCH (KVM/LoongArch) +M: Tianrui Zhao +M: Bibo Mao +M: Huacai Chen +L: kvm@vger.kernel.org +L: loongarch@lists.linux.dev +S: Maintained +T: git git://git.kernel.org/pub/scm/virt/kvm/kvm.git +F: arch/loongarch/include/asm/kvm* +F: arch/loongarch/include/uapi/asm/kvm* +F: arch/loongarch/kvm/ + KERNEL VIRTUAL MACHINE FOR MIPS (KVM/mips) M: Huacai Chen L: linux-mips@vger.kernel.org @@ -18062,6 +18075,8 @@ S: Supported F: Documentation/arch/x86/resctrl* F: arch/x86/include/asm/resctrl.h F: arch/x86/kernel/cpu/resctrl/ +F: fs/resctrl/ +F: include/linux/resctrl*.h F: tools/testing/selftests/resctrl/ READ-COPY UPDATE (RCU) @@ -23864,6 +23879,24 @@ L: linux-kernel@vger.kernel.org S: Maintained F: arch/x86/kernel/cpu/zhaoxin.c +ZHAOXIN PMU UNCORE SUPPORT +M: Leoliu-oc +S: Maintained +F: arch/x86/events/zhaoxin/core.c +F: arch/x86/events/zhaoxin/uncore.c +F: arch/x86/events/zhaoxin/uncore.h + +ZHAOXIN TEMPERATURE MONITORING DRIVERS +M: Leoliu-oc +L: linux-hwmon@vger.kernel.org +S: Maintained +F: drivers/hwmon/zhaoxin-cputemp.c + +ZHAOXIN ZXPAUSE INSTRUCTION SUPPORT +M: LeoLiu-oc +S: Maintained +F: arch/x86/kernel/cpu/zxpause.c + ZONEFS FILESYSTEM M: Damien Le Moal M: Naohiro Aota diff --git a/anolis/.gitignore b/anolis/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9b1960e711fc1719053c5908bb5ff3cc321789e5 --- /dev/null +++ b/anolis/.gitignore @@ -0,0 +1 @@ +output/ \ No newline at end of file diff --git a/anolis/Makefile b/anolis/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..069ceab2c498663811dbdfd4dbf9933a3f94aa5e --- /dev/null +++ b/anolis/Makefile @@ -0,0 +1,87 @@ +include Makefile.variables + +all: help examples + +dist-check: + @if [ "${DIST_BUILD_MODE}" == "official" ]; then \ + if [ "$(shell git describe --tags --exact-match HEAD 2>/dev/null)" != "${DIST_ANOLIS_VERSION}" ]; then \ + echo "Error: For official build, the tag ${DIST_ANOLIS_VERSION} should point to HEAD"; \ + exit 1; \ + fi \ + fi + @if [ "${DIST_BUILD_MODE}" == "diy" ] && [ -z "${DIST_DIY}" ]; then \ + echo "Error: For diy build, the variable DIST_DIY should not be empty"; \ + exit 1; \ + fi + +dist-genlog: + sh genlog.sh + +dist-genspec: dist-check + sh genspec.sh + +dist-genrpmtree: dist-check + sh genrpmtree.sh + +dist-rpms: dist-genrpmtree dist-check + sh buildpkg.sh + +clean: + rm -rf $(DIST_OUTPUT) + +dist-version: + @echo $(DIST_ANOLIS_VERSION) + +examples: + @echo '' + @echo 'Build Examples:' + @echo '- *RECOMMEND* devel build with basic rpm packages' + @echo ' DIST_BUILD_MODE=devel DIST_BUILD_EXTRA=base make dist-rpms' + @echo '- *RECOMMEND* devel build with full rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-rpms' + @echo '- *RECOMMEND* nightly build with full rpm packages' + @echo ' DIST_BUILD_MODE=nightly make dist-rpms' + @echo '- diy build with basic rpm packages' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" DIST_BUILD_EXTRA=base make dist-rpms' + @echo '' + @echo 'Kernel Version Examples:' + @echo '- show the kernel version of devel mode' + @echo ' DIST_BUILD_MODE=devel make dist-version' + @echo '- show the kernel version of diy mode' + @echo ' DIST_BUILD_MODE=diy BUILD_DIY="your_diy_name" make dist-version' + @echo '' + @echo 'Other Examples:' + @echo '- only generate rpm tree in devel mode, but do not build rpm packages' + @echo ' DIST_BUILD_MODE=devel make dist-genrpmtree' + @echo '- cleanup' + @echo ' make clean' + +help: + @echo 'For anolis release' + @echo '' + @echo 'RUN `make examples` for some examples' + @echo '--------------------------------' + @echo 'generic commands:' + @echo ' dist-genspec - generate kernel spec file through kernel.spec.template and changelog files' + @echo ' dist-genlog - generate changelogs' + @echo ' dist-genrpmtree - generate rpm tree' + @echo ' dist-rpms - build kernel rpm package, it will auto generated in $(DIST_SHORT_OUTPUT)' + @echo ' dist-version - show dist version' + @echo ' clean - cleanup output dir' + @echo ' examples - show some examples' + @echo '' + @echo '-------------------------------' + @echo 'the environment variables that could override:' + @echo ' DIST - the distribution suffix, eg: .an7, .an8, .an23' + @echo ' DIST_OUTPUT - the output directory, default: $(DIST_SHORT_OUTPUT)' + @echo ' DIST_BUILD_MODE - the build mode. optional: official/nightly/devel/diy' + @echo ' !!! NOTE: BE CAUTIOUS ABOUT USING official BUILD !!!' + @echo ' - official build. kernel version: $(DIST_KERNELVERSION)-$(DIST_OFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - nightly build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), with srpm' + @echo ' - devel build. kernel version: $(DIST_KERNELVERSION)-$(DIST_UNOFFICIAL_PKGRELEASEVERION), without srpm' + @echo ' - diy build. kernel version: $(DIST_KERNELVERSION)-$(DIST_DIY_PKGRELEASEVERION), with srpm' + @echo ' DIST_BUILD_NUMBER - the build number for unofficial build, eg: 1/2' + @echo ' DIST_DIY - the kernel version for diy build' + @echo ' DIST_BUILD_VARIANT & DIST_BUILD_EXTRA - see comments in buildpkg.sh' + +export \ No newline at end of file diff --git a/anolis/Makefile.variables b/anolis/Makefile.variables new file mode 100644 index 0000000000000000000000000000000000000000..d806e7988230170bb2b1fd6a4af31b1a755d4397 --- /dev/null +++ b/anolis/Makefile.variables @@ -0,0 +1,80 @@ +# the global environment variables, which will be passed to shell scripts +# all variables are start with DIST_, to avoid influences kernel build + +# the dist suffix, eg: an7, an8, an23 +DIST ?= .an23 + +# build mode: +# - official build, the kernel version looks like: 5.10.134-15.1_rc1, and also generate source rpm +# - nightly build, the kernel version looks like: 5.10.134-1.git.6235a991a61d, and also generate source rpm +# - devel build, same as nightly build, without source rpm +DIST_BUILD_MODE ?= devel + +# the package release version. +# eg: for ANCK 5.10-015.1, the major version is 15, the minor version is 1 +DIST_RELEASE_MAJOR_VERSION = 1 +DIST_RELEASE_MINOR_VERSION = + +# testing stage. +# eg: alpha, beta, rc +DIST_TESTING_STAGE = rc +DIST_TESTING_STAGE_MAJOR_VERSION = 1 +DIST_TESTING_STAGE_MINOR_VERSION = + +# special versions, eg: the pgo version +DIST_SPECIAL_VERSION_NAME = +DIST_SPECIAL_VERSION_MAJOR = +DIST_SPECIAL_VERSION_MINOR = + +# build number +DIST_BUILD_NUMBER ?= 1 + +# the kernel root +DIST_SRCROOT = $(shell realpath ..)/ +DIST_SOURCES = $(DIST_SRCROOT)anolis/ +DIST_RPM = $(DIST_SOURCES)rpm/ +DIST_CHANGELOG = $(DIST_SOURCES)changelog/ + +# the output directory +DIST_OUTPUT ?= $(DIST_SOURCES)output/ +DIST_RPMBUILDDIR_OUTPUT = ${DIST_OUTPUT}/rpmbuild +DIST_SHORT_OUTPUT=$(subst $(DIST_SRCROOT),,$(DIST_OUTPUT)) + +DIST_SPEC_TEMPLATE = kernel.spec.template +DIST_SPEC_FILE = kernel.spec + +# generate anolis kernel version + +# kernel version for offical build +DIST_RELEASE_VERSION = $(DIST_RELEASE_MAJOR_VERSION)$(if $(DIST_RELEASE_MINOR_VERSION),.$(DIST_RELEASE_MINOR_VERSION)) +DIST_SPECIAL_VERSION = $(if $(DIST_SPECIAL_VERSION_NAME),.$(DIST_SPECIAL_VERSION_NAME)$(if $(DIST_SPECIAL_VERSION_MAJOR),.$(DIST_SPECIAL_VERSION_MAJOR))$(if $(DIST_SPECIAL_VERSION_MINOR),.$(DIST_SPECIAL_VERSION_MINOR))) +DIST_TESTING_VERSION = $(if $(DIST_TESTING_STAGE),_$(DIST_TESTING_STAGE)$(if $(DIST_TESTING_STAGE_MAJOR_VERSION),$(DIST_TESTING_STAGE_MAJOR_VERSION))$(if $(DIST_TESTING_STAGE_MINOR_VERSION),.$(DIST_TESTING_STAGE_MINOR_VERSION))) +DIST_LINUXVERSION:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^VERSION\ =\ /{s///;p;q}') +DIST_LINUXKPATCHLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^PATCHLEVEL\ =\ /{s///;p;q}') +DIST_LINUXKSUBLEVEL:=$(shell cat $(DIST_SRCROOT)/Makefile | sed -ne '/^SUBLEVEL\ =\ /{s///;p;q}') +DIST_KERNELVERSION = $(DIST_LINUXVERSION).$(DIST_LINUXKPATCHLEVEL).$(DIST_LINUXKSUBLEVEL) +DIST_OFFICIAL_PKGRELEASEVERION = $(DIST_RELEASE_VERSION)$(DIST_SPECIAL_VERSION)$(DIST_TESTING_VERSION) + +# kernel version for unoffical build +DIST_GIT_HEAD_SHORT_COMMIT_ID = $(shell git rev-parse --short HEAD) +DIST_GIT_HEAD_FULL_COMMIT_ID = $(shell git rev-parse HEAD) +DIST_UNOFFICIAL_PKGRELEASEVERION = ${DIST_BUILD_NUMBER}.git.$(DIST_GIT_HEAD_SHORT_COMMIT_ID) + +# kernel version for diy build +DIST_DIY_PKGRELEASEVERION = ${DIST_DIY}.diy + +# final kernel version +ifeq ("${DIST_BUILD_MODE}", "official") +DIST_PKGRELEASEVERION = $(DIST_OFFICIAL_PKGRELEASEVERION) +else ifeq ("${DIST_BUILD_MODE}", "diy") +DIST_PKGRELEASEVERION = $(DIST_DIY_PKGRELEASEVERION) +else +DIST_PKGRELEASEVERION = $(DIST_UNOFFICIAL_PKGRELEASEVERION) +endif +DIST_ANOLIS_VERSION = $(DIST_KERNELVERSION)-$(DIST_PKGRELEASEVERION) + +# the package id used for compress kernel tarball: +# for official build, we compress tarball from tag +# for unofficial build, we compress tarball from git HEAD +DIST_PKG_COMMIT_ID = $(if $(DIST_OFFICIAL_BUILD),$(DIST_ANOLIS_VERSION),$(DIST_GIT_HEAD_FULL_COMMIT_ID)) + diff --git a/anolis/buildpkg.sh b/anolis/buildpkg.sh new file mode 100644 index 0000000000000000000000000000000000000000..34ff6a929f1de8d54b58b543457f9fb0f772bf2a --- /dev/null +++ b/anolis/buildpkg.sh @@ -0,0 +1,89 @@ +set -xe + +function do_rpmbuild() { + if [ "$DIST_BUILD_MODE" == "official" ] || \ + [ "$DIST_BUILD_MODE" == "nightly" ] || \ + [ "$DIST_BUILD_MODE" == "diy" ]; then + CMD="-ba" + else + CMD="-bb" + fi + + # Now we have: + # + variants: default, only-debug, with-debug + # + extras: base, with-debuginfo, full + # + modes: official, nightly, dev + #TODO: add with-gcov + # + # Matrix + # + # | BuildMode | KernelName | GenerateSrpm | + # |-----------|-----------------|--------------| + # | official | without sha id | Yes | + # | nightly | with git sha id | Yes | + # | devel | with git sha id | No | + # + # | Extra\Var | Default | Only-debug | With-debug | + # |-----------|----------|------------|------------| + # | Base | +default | -default | +default | + # | | -debug | +debug | +debug | + # | | +headers | + # |-----------|------------------------------------| + # | debuginfo | +debuginfo | + # |-----------|------------------------------------| + # | full | +tools +doc +perf | + # + # Note: pre-release mode will always be "full" and "with-debug" by default + + build_opts="--with headers --without bpftool --without signmodules" + + if [ "_${DIST_BUILD_VARIANT}" == "_only-debug" ]; then + build_opts="$build_opts --without default --with debug" + elif [ "_${DIST_BUILD_VARIANT}" == "_with-debug" ]; then + build_opts="$build_opts --with default --with debug" + else # assume default + build_opts="$build_opts --with default --without debug" + fi + + if [ "_${DIST_BUILD_EXTRA}" == "_debuginfo" ]; then + build_opts="$build_opts --with debuginfo --without tools --without doc --without perf" + elif [ "_${DIST_BUILD_EXTRA}" == "_base" ]; then + build_opts="$build_opts --without debuginfo --without tools --without doc --without perf" + else # assume full + build_opts="$build_opts --with debuginfo --with tools --with doc --with perf" + fi + + # launch a new shell to clear current environment variables passed by Makefile + rpmbuild \ + --define "%_smp_mflags -j$(nproc)" \ + --define "%packager " \ + --define "%_topdir ${DIST_RPMBUILDDIR_OUTPUT}" \ + ${build_opts} \ + ${CMD} ${DIST_RPMBUILDDIR_OUTPUT}/SPECS/kernel.spec \ + --target=$(uname -m) || exit 1 +} + +function output() { + if [ -z "$DIST_OFFICIAL_BUILD" ]; then + targetdir=${DIST_BUILD_NUMBER} + else + targetdir=${DIST_ANOLIS_VERSION} + fi + + mkdir -p ${DIST_OUTPUT}/${targetdir} + + cp ${DIST_RPMBUILDDIR_OUTPUT}/RPMS/$(uname -m)/*.rpm ${DIST_OUTPUT}/${targetdir}/ + + # copy srpm packages if and only if they exist. + if [ -f ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ]; then + cp ${DIST_RPMBUILDDIR_OUTPUT}/SRPMS/*.rpm ${DIST_OUTPUT}/${targetdir} + fi + + ls ${DIST_OUTPUT}/${targetdir}/*.rpm + + rpm_num=$(ls ${DIST_OUTPUT}/${targetdir}/*.rpm | wc -l) + echo "${rpm_num} rpm(s) copied." +} + +do_rpmbuild +output \ No newline at end of file diff --git a/anolis/changelog/000-changelog.base b/anolis/changelog/000-changelog.base new file mode 100644 index 0000000000000000000000000000000000000000..7bf7526d60c11b342db84dbb5667240420dd4686 --- /dev/null +++ b/anolis/changelog/000-changelog.base @@ -0,0 +1,2 @@ +* Fri Dec 15 2023 Qiao Ma [6.6.7-1_rc1%%DIST%%] +- anolis: bump kernel to 6.6.7 (Qiao Ma) \ No newline at end of file diff --git a/anolis/genlog.sh b/anolis/genlog.sh new file mode 100644 index 0000000000000000000000000000000000000000..a11293855fa59c2ce8cd91b9960741bbac3cdd02 --- /dev/null +++ b/anolis/genlog.sh @@ -0,0 +1,65 @@ +# by default, it generates changlogs from latest-tag to HEAD +function get_changelog_start_end() { + if [ -z "$CHANGELOG_START" ]; then + CHANGELOG_START=$(git describe --tags --abbrev=0) + fi + if [ -z "$CHANGELOG_START" ]; then + echo "cannot decide CHANGELOG_START" + exit 1 + fi + + if [ -z "$CHANGELOG_END" ]; then + CHANGELOG_END=$(git log --format="%H" -1 HEAD) + fi +} + +function get_author_sign() { + if [ -z "$AUTHOR_SIGN" ]; then + AUTHOR_SIGN=$(git var GIT_COMMITTER_IDENT |sed 's/>.*/>/') + fi + if [ -z "$AUTHOR_SIGN" ]; then + echo "unkonwn AUTHOR_SIGN" + exit 1 + fi +} + +function get_changelog_file_name() { + local file_base_name="changelog.${DIST_ANOLIS_VERSION}" + local files_num=$(ls ${DIST_CHANGELOG} | grep -E '[0-9]+-changelog.*' | wc -l) + local file_name=$(printf "%03d-${file_base_name}\n" ${files_num}) + CHANGELOG_FILE=${DIST_CHANGELOG}/${file_name} +} + +function generate_changelog() { + get_changelog_start_end + get_author_sign + get_changelog_file_name + + touch ${CHANGELOG_FILE} + echo "* $(date +"%a %b %d %Y") ${AUTHOR_SIGN} [${DIST_ANOLIS_VERSION}%%DIST%%]" > ${CHANGELOG_FILE} + + # TODO: + # 1. if config changes, add kernel config refresh log + # 2. if linux upstream kernel version updated, add related log + + local commits=$(git rev-list ${CHANGELOG_START}..${CHANGELOG_END}) + for commit in $commits + do + ## eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) + local log=$(git log --format='- %s (%an)' -1 ${commit}) + + ## eg: {CVE-2022-32250} + ## xargs is used to strip space + local cve_list=$(git log --format='%b' -1 ${commit} | grep -Eio '^[[:blank:]]*Fixes:[[:blank:]]*CVE-.*[[:blank:]]*$' | sed 's/fixes://ig' | xargs | sed 's/[[:blank:]]/,/') + local cve_fmt="" + if [ -n "${cve_list}" ]; then + cve_fmt=$(cat <<< "${cve_list}" | paste -sd "," -) + cve_fmt=" {${cve_fmt}}" + fi + ## merge them together, eg: - anolis: net/netfilter: rename nft_expr_info (Kangjie Xu) {CVE-2022-32250} + echo "${log}${cve_fmt}" >> ${CHANGELOG_FILE} + done + echo "" >> ${CHANGELOG_FILE} +} + +generate_changelog \ No newline at end of file diff --git a/anolis/genrpmtree.sh b/anolis/genrpmtree.sh new file mode 100644 index 0000000000000000000000000000000000000000..7f1f4036f797dd0d5d2a7ad75c0968517d44debc --- /dev/null +++ b/anolis/genrpmtree.sh @@ -0,0 +1,37 @@ +#! /bin/bash + +set -xe + +function do_prep() { + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT} + mkdir -p ${DIST_RPMBUILDDIR_OUTPUT}/{BUILD,RPMS,SOURCES,SPECS,SRPMS} + + cp ${DIST_RPM}/cpupower* ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + cp ${DIST_RPM}/generate_bls_conf.sh ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/ + + # for official build, the corresponding tag should exist + if [ -n "$DIST_OFFICIAL_BUILD" ]; then + if ! git tag | grep -q -x "${DIST_PKG_COMMIT_ID}"; then + echo "cannot find official build tag: ${DIST_PKG_COMMIT_ID}" + exit 1 + fi + fi + + pkgname="linux-${DIST_ANOLIS_VERSION}${DIST}" + pushd ${DIST_SRCROOT} > /dev/null + git archive --format=tar --prefix="${pkgname}/" ${DIST_PKG_COMMIT_ID} | xz -T$(nproc) > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz + md5sum ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/${pkgname}.tar.xz > ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/download + popd > /dev/null + DIST_OUTPUT=${DIST_RPMBUILDDIR_OUTPUT}/SPECS/ sh genspec.sh + + cp ${DIST_SRCROOT}/arch/x86/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64.config + cp ${DIST_SRCROOT}/arch/x86/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-x86_64-debug.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64.config + cp ${DIST_SRCROOT}/arch/arm64/configs/anolis-debug_defconfig ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-aarch64-debug.config + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64.config + cp ${DIST_SRCROOT}/arch/loongarch/configs/anolis-debug_defconfig \ + ${DIST_RPMBUILDDIR_OUTPUT}/SOURCES/kernel-${DIST_KERNELVERSION}-loongarch64-debug.config +} + +do_prep \ No newline at end of file diff --git a/anolis/genspec.sh b/anolis/genspec.sh new file mode 100644 index 0000000000000000000000000000000000000000..cfd9f58c2a4edadf70a260c33c138262fc02d931 --- /dev/null +++ b/anolis/genspec.sh @@ -0,0 +1,16 @@ +#! /bin/bash +# generate kernel spec through spec template and changelog files. +# it it call from Makefile, do not run it directly. + +mkdir -p ${DIST_OUTPUT} +cp -f ${DIST_RPM}/${DIST_SPEC_TEMPLATE} ${DIST_OUTPUT}/${DIST_SPEC_FILE} + +for changelog_file in $(ls ${DIST_CHANGELOG} | sort) +do + sed -i "/%changelog/r ${DIST_CHANGELOG}/${changelog_file}" ${DIST_OUTPUT}/${DIST_SPEC_FILE} +done + +sed -i -e " + s/%%DIST%%/$DIST/ + s/%%DIST_KERNELVERSION%%/$DIST_KERNELVERSION/ + s/%%DIST_PKGRELEASEVERION%%/$DIST_PKGRELEASEVERION/" ${DIST_OUTPUT}/${DIST_SPEC_FILE} \ No newline at end of file diff --git a/anolis/rpm/cpupower.config b/anolis/rpm/cpupower.config new file mode 100644 index 0000000000000000000000000000000000000000..8629a4a3ede722c5b8c91b259441583dbe65ba6f --- /dev/null +++ b/anolis/rpm/cpupower.config @@ -0,0 +1,3 @@ +# See 'cpupower help' and cpupower(1) for more info +CPUPOWER_START_OPTS="frequency-set -g performance" +CPUPOWER_STOP_OPTS="frequency-set -g ondemand" diff --git a/anolis/rpm/cpupower.service b/anolis/rpm/cpupower.service new file mode 100644 index 0000000000000000000000000000000000000000..5f10ab7ee39a271b492c2a2bd9a24bdb26db7cb7 --- /dev/null +++ b/anolis/rpm/cpupower.service @@ -0,0 +1,13 @@ +[Unit] +Description=Configure CPU power related settings +After=syslog.target + +[Service] +Type=oneshot +RemainAfterExit=yes +EnvironmentFile=/etc/sysconfig/cpupower +ExecStart=/usr/bin/cpupower $CPUPOWER_START_OPTS +ExecStop=/usr/bin/cpupower $CPUPOWER_STOP_OPTS + +[Install] +WantedBy=multi-user.target diff --git a/anolis/rpm/generate_bls_conf.sh b/anolis/rpm/generate_bls_conf.sh new file mode 100755 index 0000000000000000000000000000000000000000..878696c12f338f947361b19266af48fc7b7b57a3 --- /dev/null +++ b/anolis/rpm/generate_bls_conf.sh @@ -0,0 +1,35 @@ +#!/bin/bash +set -e + +. /etc/os-release + +if [ "${ID}" == "anolis" ]; then + VERSION=${VERSION%%.*} +fi + +kernelver=$1 && shift +rootfs=$1 && shift +variant=$1 && shift + +output="${rootfs}/lib/modules/${kernelver}/bls.conf" +date=$(date -u +%Y%m%d%H%M%S) + +if [ "${variant:-5}" = "debug" ]; then + debugname=" with debugging" + debugid="-debug" +else + debugname="" + debugid="" +fi + +cat >${output} < in your rpmbuild command or force values +# to 0 in here to disable them. +# +# standard kernel +%define with_up %{?_without_up: 0} %{?!_without_up: 1} +# kernel-debug +%define with_debug %{?_without_debug: 0} %{?!_without_debug: 1} +# kernel-doc +%define with_doc %{?_without_doc: 0} %{?!_without_doc: 1} +# kernel-headers +%define with_headers %{?_without_headers: 0} %{?!_without_headers: 1} +# perf +%define with_perf %{?_without_perf: 0} %{?!_without_perf: 1} +# tools +%define with_tools %{?_without_tools: 0} %{?!_without_tools: 1} +# bpf tool +%define with_bpftool %{?_without_bpftool: 0} %{?!_without_bpftool: 1} +# kernel-debuginfo +%define with_debuginfo %{?_without_debuginfo: 0} %{?!_without_debuginfo: 1} +# +# Additional options for user-friendly one-off kernel building: +# +# Only build the base kernel (--with baseonly): +%define with_baseonly %{?_with_baseonly: 1} %{?!_with_baseonly: 0} +# Only build the debug kernel (--with dbgonly): +%define with_dbgonly %{?_with_dbgonly: 1} %{?!_with_dbgonly: 0} +# +# should we do C=1 builds with sparse +%define with_sparse %{?_with_sparse: 1} %{?!_with_sparse: 0} + +#For loongarch, disable kernel-debug and with_bpftool for unsupport. +%ifarch loongarch64 +%define with_debug 0 +%define with_bpftool 0 +%endif + +%define with_gcov %{?_with_gcov: 1} %{?!_with_gcov: 0} + +# turn off debug kernel for gcov builds +%if %{with_gcov} +%define with_debug 0 +%endif + +%define make_target bzImage +%define image_install_path boot + +%define KVERREL %{version}-%{release}.%{_target_cpu} +%define KVERREL_RE %(echo %KVERREL | sed 's/+/[+]/g') +%define hdrarch %_target_cpu +%define asmarch %_target_cpu + +%if !%{with_debuginfo} +%define _enable_debug_packages 0 +%endif +%define debuginfodir /usr/lib/debug +# Needed because we override almost everything involving build-ids +# and debuginfo generation. Currently we rely on the old alldebug setting. +%global _build_id_links alldebug + +# if requested, only build base kernel +%if %{with_baseonly} +%define with_debug 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%endif + +# if requested, only build debug kernel +%if %{with_dbgonly} +%define with_up 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%endif + +# Overrides for generic default options + +# only package docs noarch +%ifnarch noarch +%define with_doc 0 +%define doc_build_fail true +%endif + +# don't build noarch kernels or headers (duh) +%ifarch noarch +%define with_up 0 +%define with_headers 0 +%define with_tools 0 +%define with_perf 0 +%define with_bpftool 0 +%define with_debug 0 +%define with_doc 0 +%define all_arch_configs %{name}-%{version}-*.config +%endif + +# Per-arch tweaks + +%ifarch x86_64 +%define asmarch x86 +%define all_arch_configs %{name}-%{version}-x86_64*.config +%define kernel_image arch/x86/boot/bzImage +%endif + +%ifarch aarch64 +%define all_arch_configs %{name}-%{version}-aarch64*.config +%define asmarch arm64 +%define hdrarch arm64 +%define make_target Image.gz +%define kernel_image arch/arm64/boot/Image.gz +%endif + +%ifarch loongarch64 +%define all_arch_configs %{name}-%{version}-loongarch64*.config +%define asmarch loongarch +%define make_target vmlinux +%define hdrarch loongarch +%define kernel_image vmlinux +%endif + +# To temporarily exclude an architecture from being built, add it to +# %%nobuildarches. Do _NOT_ use the ExclusiveArch: line, because if we +# don't build kernel-headers then the new build system will no longer let +# us use the previous build of that package -- it'll just be completely AWOL. +# Which is a BadThing(tm). + +# We only build kernel-headers on the following... +%define nobuildarches i386 i686 + +%ifarch %nobuildarches +%define with_up 0 +%define with_debug 0 +%define with_debuginfo 0 +%define with_perf 0 +%define with_tools 0 +%define with_bpftool 0 +%define _enable_debug_packages 0 +%endif + +# Architectures we build tools/cpupower on +%define cpupowerarchs x86_64 aarch64 + + +# +# Packages that need to be installed before the kernel is, because the %%post +# scripts use them. +# +%define kernel_prereq coreutils, systemd >= 203-2, /usr/bin/kernel-install +%define initrd_prereq dracut >= 027 + + +Name: kernel%{?variant} +Group: System Environment/Kernel +License: GPLv2 and Redistributable, no modification permitted +URL: http://www.kernel.org/ +Version: %{kernelversion} +Release: %{pkg_release} +Summary: The Linux kernel, based on version %{version}, heavily modified with backports +# DO NOT CHANGE THE 'ExclusiveArch' LINE TO TEMPORARILY EXCLUDE AN ARCHITECTURE BUILD. +# SET %%nobuildarches (ABOVE) INSTEAD +ExclusiveArch: noarch i686 x86_64 aarch64 loongarch64 +ExclusiveOS: Linux + + +# +# List the packages used during the kernel build +# +BuildRequires: kmod, patch, bash, coreutils, tar, git, which +BuildRequires: bzip2, xz, findutils, gzip, m4, perl-interpreter, perl-Carp, perl-devel, perl-generators, make, diffutils, gawk +BuildRequires: gcc, binutils, system-rpm-config, hmaccalc, python3-devel +BuildRequires: net-tools, hostname, bc, bison, flex, elfutils-devel, dwarves +BuildRequires: libnl3-devel +%if %{with_doc} +BuildRequires: xmlto, asciidoc, python3-sphinx +%endif +%if %{with_headers} +BuildRequires: rsync +%endif +%if %{with_sparse} +BuildRequires: sparse +%endif +%if %{with_perf} +BuildRequires: zlib-devel binutils-devel newt-devel perl(ExtUtils::Embed) bison flex xz-devel +BuildRequires: audit-libs-devel +BuildRequires: java-devel +BuildRequires: libbpf-devel +BuildRequires: libbabeltrace-devel +BuildRequires: libtraceevent-devel +BuildRequires: numactl-devel +%ifarch aarch64 +BuildRequires: opencsd-devel >= 1.0.0 +%endif +%endif +%if %{with_tools} +BuildRequires: gettext ncurses-devel +BuildRequires: libcap-devel libcap-ng-devel +BuildRequires: pciutils-devel +BuildRequires: openssl-devel +%endif +%if %{with_bpftool} +BuildRequires: python3-docutils +BuildRequires: zlib-devel binutils-devel +%endif +BuildConflicts: rhbuildsys(DiskFree) < 500Mb +%if %{with_debuginfo} +BuildRequires: rpm-build, elfutils +#BuildConflicts: rpm < 4.13.0.1-19 +# Most of these should be enabled after more investigation +%undefine _include_minidebuginfo +%undefine _find_debuginfo_dwz_opts +%undefine _unique_build_ids +%undefine _unique_debug_names +%undefine _unique_debug_srcs +%undefine _debugsource_packages +%undefine _debuginfo_subpackages +%global _find_debuginfo_opts -r --keep-section .BTF* +%global _missing_build_ids_terminate_build 1 +%global _no_recompute_build_ids 1 +%endif + +BuildRequires: openssl openssl-devel + +# These below are required to build man pages +%if %{with_perf} +BuildRequires: xmlto +%endif +%if %{with_perf} || %{with_tools} +BuildRequires: asciidoc +%endif + +Source0: linux-%{kernelversion}-%{pkg_release}.tar.xz + +%define modsign_cmd %{SOURCE18} + +Source20: kernel-%{version}-aarch64.config +Source21: kernel-%{version}-aarch64-debug.config +Source39: kernel-%{version}-x86_64.config +Source40: kernel-%{version}-x86_64-debug.config +Source43: generate_bls_conf.sh +Source45: kernel-%{version}-loongarch64.config +Source46: kernel-%{version}-loongarch64-debug.config + + +# Sources for kernel-tools +Source2000: cpupower.service +Source2001: cpupower.config + +## Patches needed for building this package + +# %%PATCH_LIST%% + +# END OF PATCH DEFINITIONS + +BuildRoot: %{_tmppath}/%{name}-%{KVERREL}-root + +%description +This is the package which provides the Linux kernel for Alibaba Cloud Linux. +It is based on upstream Linux at version %{version} and maintains kABI +compatibility of a set of approved symbols, however it is heavily modified with +backports and fixes pulled from newer upstream Linux %{name} releases. This means +this is not a %{version} kernel anymore: it includes several components which come +from newer upstream linux versions, while maintaining a well tested and stable +core. Some of the components/backports that may be pulled in are: changes like +updates to the core kernel (eg.: scheduler, cgroups, memory management, security +fixes and features), updates to block layer, supported filesystems, major driver +updates for supported hardware in Alibaba Cloud Linux, enhancements for +enterprise customers, etc. + +# +# This macro does requires, provides, conflicts, obsoletes for a kernel package. +# %%kernel_reqprovconf +# It uses any kernel__conflicts and kernel__obsoletes +# macros defined above. +# +%define kernel_reqprovconf \ +Provides: %{name} = %{kernelversion}-%{pkg_release}\ +Provides: %{name}-%{_target_cpu} = %{kernelversion}-%{pkg_release}%{?1:+%{1}}\ +Provides: kernel-drm-nouveau = 16\ +Provides: %{name}-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Requires(pre): %{kernel_prereq}\ +Requires(pre): %{initrd_prereq}\ +Requires(pre): linux-firmware >= 20190516-94.git711d3297\ +Requires(preun): systemd >= 200\ +Conflicts: xfsprogs < 4.3.0-1\ +Conflicts: xorg-x11-drv-vmmouse < 13.0.99\ +%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\ +%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\ +# We can't let RPM do the dependencies automatic because it'll then pick up\ +# a correct but undesirable perl dependency from the module headers which\ +# isn't required for the kernel proper to function\ +AutoReq: no\ +AutoProv: yes\ +%{nil} + + +%package doc +Summary: Various documentation bits found in the kernel source +Group: Documentation +%description doc +This package contains documentation files from the kernel +source. Various bits of information about the Linux kernel and the +device drivers shipped with it are documented in these files. + +You'll want to install this package if you need a reference to the +options that can be passed to Linux kernel modules at load time. + + +%package headers +Summary: Header files for the Linux kernel for use by glibc +Group: Development/System +Obsoletes: glibc-kernheaders < 3.0-46 +Provides: glibc-kernheaders = 3.0-46 +%if "0%{?variant}" +Obsoletes: kernel-headers < %{kernelversion}-%{pkg_release} +Provides: kernel-headers = %{kernelversion}-%{pkg_release} +%endif +%description headers +Kernel-headers includes the C header files that specify the interface +between the Linux kernel and userspace libraries and programs. The +header files define structures and constants that are needed for +building most standard programs and are also needed for rebuilding the +glibc package. + +%package debuginfo-common-%{_target_cpu} +Summary: Kernel source files used by %{name}-debuginfo packages +Group: Development/Debug +Provides: installonlypkg(kernel) +%description debuginfo-common-%{_target_cpu} +This package is required by %{name}-debuginfo subpackages. +It provides the kernel source files common to all builds. + +%if %{with_perf} +%package -n perf +Summary: Performance monitoring for the Linux kernel +Group: Development/System +Requires: bzip2 +License: GPLv2 +%description -n perf +This package contains the perf tool, which enables performance monitoring +of the Linux kernel. + +%package -n perf-debuginfo +Summary: Debug information for package perf +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n perf-debuginfo +This package provides debug information for the perf package. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/perf(\.debug)?|.*%%{_libexecdir}/perf-core/.*|.*%%{_libdir}/libperf-jvmti.so(\.debug)?|XXX' -o perf-debuginfo.list} + +%package -n python3-perf +Summary: Python bindings for apps which will manipulate perf events +Group: Development/Libraries +%description -n python3-perf +The python3-perf package contains a module that permits applications +written in the Python programming language to use the interface +to manipulate perf events. + +%package -n python3-perf-debuginfo +Summary: Debug information for package perf python bindings +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n python3-perf-debuginfo +This package provides debug information for the perf python bindings. + +# the python_sitearch macro should already be defined from above +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{python3_sitearch}/perf.*so(\.debug)?|XXX' -o python3-perf-debuginfo.list} + +# with_perf +%endif + +%if %{with_tools} +%package -n %{name}-tools +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +%ifarch %{cpupowerarchs} +Provides: cpupowerutils = 1:009-0.6.p1 +Obsoletes: cpupowerutils < 1:009-0.6.p1 +Provides: cpufreq-utils = 1:009-0.6.p1 +Provides: cpufrequtils = 1:009-0.6.p1 +Obsoletes: cpufreq-utils < 1:009-0.6.p1 +Obsoletes: cpufrequtils < 1:009-0.6.p1 +Obsoletes: cpuspeed < 1:1.5-16 +Requires: %{name}-tools-libs = %{version}-%{release} +%endif +%define __requires_exclude ^%{_bindir}/python +%description -n %{name}-tools +This package contains the tools/ directory from the kernel source +and the supporting documentation. + +%package -n %{name}-tools-libs +Summary: Libraries for the %{name}-tools +Group: Development/System +License: GPLv2 +%description -n %{name}-tools-libs +This package contains the libraries built from the tools/ directory +from the kernel source. + +%package -n %{name}-tools-libs-devel +Summary: Assortment of tools for the Linux kernel +Group: Development/System +License: GPLv2 +Requires: %{name}-tools = %{version}-%{release} +%ifarch %{cpupowerarchs} +Provides: cpupowerutils-devel = 1:009-0.6.p1 +Obsoletes: cpupowerutils-devel < 1:009-0.6.p1 +%endif +Requires: %{name}-tools-libs = %{version}-%{release} +Provides: %{name}-tools-devel +%description -n %{name}-tools-libs-devel +This package contains the development files for the tools/ directory from +the kernel source. + +%package -n %{name}-tools-debuginfo +Summary: Debug information for package %{name}-tools +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n %{name}-tools-debuginfo +This package provides debug information for package %{name}-tools. + +# Note that this pattern only works right to match the .build-id +# symlinks because of the trailing nonmatching alternation and +# the leading .*, because of find-debuginfo.sh's buggy handling +# of matching the pattern against the symlinks file. +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_bindir}/centrino-decode(\.debug)?|.*%%{_bindir}/powernow-k8-decode(\.debug)?|.*%%{_bindir}/cpupower(\.debug)?|.*%%{_libdir}/libcpupower.*|.*%%{_bindir}/turbostat(\.debug)?|.*%%{_bindir}/x86_energy_perf_policy(\.debug)?|.*%%{_bindir}/tmon(\.debug)?|.*%%{_bindir}/lsgpio(\.debug)?|.*%%{_bindir}/gpio-hammer(\.debug)?|.*%%{_bindir}/gpio-event-mon(\.debug)?|.*%%{_bindir}/iio_event_monitor(\.debug)?|.*%%{_bindir}/iio_generic_buffer(\.debug)?|.*%%{_bindir}/lsiio(\.debug)?|XXX' -o %{name}-tools-debuginfo.list} + +# with_tools +%endif + +%if %{with_bpftool} + +%package -n bpftool +Summary: Inspection and simple manipulation of eBPF programs and maps +License: GPLv2 +%description -n bpftool +This package contains the bpftool, which allows inspection and simple +manipulation of eBPF programs and maps. + +%package -n bpftool-debuginfo +Summary: Debug information for package bpftool +Group: Development/Debug +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release} +AutoReqProv: no +%description -n bpftool-debuginfo +This package provides debug information for the bpftool package. + +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '.*%%{_sbindir}/bpftool(\.debug)?|XXX' -o bpftool-debuginfo.list} + +# with_bpftool +%endif + +%if %{with_gcov} +%package gcov +Summary: gcov graph and source files for coverage data collection. +Group: Development/System +%description gcov +kernel-gcov includes the gcov graph and source files for gcov coverage collection. +%endif + +# +# This macro creates a kernel--debuginfo package. +# %%kernel_debuginfo_package +# +%define kernel_debuginfo_package() \ +%package %{?1:%{1}-}debuginfo\ +Summary: Debug information for package %{name}%{?1:-%{1}}\ +Group: Development/Debug\ +Requires: %{name}-debuginfo-common-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}%{?1:-%{1}}-debuginfo-%{_target_cpu} = %{version}-%{release}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +%description %{?1:%{1}-}debuginfo\ +This package provides debug information for package %{name}%{?1:-%{1}}.\ +This is required to use SystemTap with %{name}%{?1:-%{1}}-%{KVERREL}.\ +%{expand:%%global _find_debuginfo_opts %{?_find_debuginfo_opts} -p '/.*/%%{KVERREL_RE}%{?1:[+]%{1}}/.*|/.*%%{KVERREL_RE}%{?1:\+%{1}}(\.debug)?' -o debuginfo%{?1}.list}\ +%{nil} + +# +# This macro creates a kernel--devel package. +# %%kernel_devel_package +# +%define kernel_devel_package() \ +%package %{?1:%{1}-}devel\ +Summary: Development package for building kernel modules to match the %{?2:%{2} }kernel\ +Group: System Environment/Kernel\ +Provides: %{name}%{?1:-%{1}}-devel-%{_target_cpu} = %{version}-%{release}\ +Provides: %{name}-devel-%{_target_cpu} = %{version}-%{release}%{?1:+%{1}}\ +Provides: %{name}-devel-uname-r = %{KVERREL}%{?variant}%{?1:+%{1}}\ +Provides: installonlypkg(kernel)\ +AutoReqProv: no\ +Requires(pre): findutils\ +Requires: findutils\ +Requires: perl-interpreter\ +%description %{?1:%{1}-}devel\ +This package provides kernel headers and makefiles sufficient to build modules\ +against the %{?2:%{2} }kernel package.\ +%{nil} + +# +# This macro creates a %%{name}- and its -devel and -debuginfo too. +# %%define variant_summary The Linux kernel compiled for +# %%kernel_variant_package [-n ] +# +%define kernel_variant_package(n:) \ +%package %{?1:%{1}}\ +Summary: %{variant_summary}\ +Group: System Environment/Kernel\ +Provides: installonlypkg(kernel)\ +Requires: grubby \ +%{expand:%%kernel_reqprovconf}\ +%{expand:%%kernel_devel_package %{?1:%{1}} %{!?-n:%{?1:%{1}}}%{?-n:%{-n*}}}\ +%{expand:%%kernel_debuginfo_package %{?1:%{1}}}\ +%{nil} + +# First the auxiliary packages of the main kernel package. +%kernel_devel_package +%kernel_debuginfo_package + +# Now, each variant package. + +%define variant_summary The Linux kernel compiled with extra debugging enabled +%kernel_variant_package debug +%description debug +The kernel package contains the Linux kernel (vmlinuz), the core of any +Linux operating system. The kernel handles the basic functions +of the operating system: memory allocation, process allocation, device +input and output, etc. + +This variant of the kernel has numerous debugging options enabled. +It should only be installed when trying to gather additional information +on kernel bugs, as some of these options impact performance noticably. + +%prep +# do a few sanity-checks for --with *only builds +%if %{with_baseonly} +%if !%{with_up} +echo "Cannot build --with baseonly, up build is disabled" +exit 1 +%endif +%endif + +# more sanity checking; do it quietly +if [ "%{patches}" != "%%{patches}" ] ; then + for patch in %{patches} ; do + if [ ! -f $patch ] ; then + echo "ERROR: Patch ${patch##/*/} listed in specfile but is missing" + exit 1 + fi + done +fi 2>/dev/null + +patch_command='patch -p1 -F1 -s' +ApplyPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + if ! grep -E "^Patch[0-9]+: $patch\$" %{_specdir}/${RPM_PACKAGE_NAME%%%%%{?variant}}.spec ; then + if [ "${patch:0:8}" != "patch-4." ] ; then + echo "ERROR: Patch $patch not listed as a source patch in specfile" + exit 1 + fi + fi 2>/dev/null + case "$patch" in + *.bz2) bunzip2 < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.gz) gunzip < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *.xz) unxz < "$RPM_SOURCE_DIR/$patch" | sed -n '/^---$/,$p' | $patch_command ${1+"$@"} ;; + *) sed -n '/^---$/,$p' "$RPM_SOURCE_DIR/$patch" | $patch_command ${1+"$@"} ;; + esac +} + +# don't apply patch if it's empty +ApplyOptionalPatch() +{ + local patch=$1 + shift + if [ ! -f $RPM_SOURCE_DIR/$patch ]; then + exit 1 + fi + local C=$(wc -l $RPM_SOURCE_DIR/$patch | awk '{print $1}') + if [ "$C" -gt 9 ]; then + ApplyPatch $patch ${1+"$@"} + fi +} + +%setup -q -n %{name}-%{kernelversion}-%{pkg_release} -c +mv linux-%{kernelversion}-%{pkg_release} linux-%{KVERREL} + +cd linux-%{KVERREL} + +# Drop some necessary files from the source dir into the buildroot +cp $RPM_SOURCE_DIR/kernel-%{version}-*.config . + +# %%PATCH_APPLICATION%% + +# END OF PATCH APPLICATIONS + +# Any further pre-build tree manipulations happen here. + +chmod +x scripts/checkpatch.pl +mv COPYING COPYING-%{version} + +# This Prevents scripts/setlocalversion from mucking with our version numbers. +touch .scmversion + +# Do not use "ambiguous" python shebangs. RHEL 8 now has a new script +# (/usr/lib/rpm/redhat/brp-mangle-shebangs), which forces us to specify a +# "non-ambiguous" python shebang for scripts we ship in buildroot. This +# script throws an error like below: +# *** ERROR: ambiguous python shebang in /usr/bin/kvm_stat: #!/usr/bin/python. Change it to python3 (or python2) explicitly. +# We patch all sources below for which we got a report/error. +pathfix.py -i "%{__python3} %{py3_shbang_opts}" -p -n \ + tools/kvm/kvm_stat/kvm_stat \ + scripts/show_delta \ + scripts/diffconfig \ + scripts/bloat-o-meter \ + scripts/jobserver-exec \ + tools \ + Documentation \ + scripts/clang-tools + +%define make make HOSTCFLAGS="%{?build_hostcflags}" HOSTLDFLAGS="%{?build_hostldflags}" + +# only deal with configs if we are going to build for the arch +%ifnarch %nobuildarches + +rm -rf configs +mkdir configs + +# Remove configs not for the buildarch +for cfg in kernel-%{version}-*.config; do + if [ `echo %{all_arch_configs} | grep -c $cfg` -eq 0 ]; then + rm -f $cfg + fi +done + +# enable GCOV kernel config options if gcov is on +%if %{with_gcov} +for i in *.config +do + sed -i 's/# CONFIG_GCOV_KERNEL is not set/CONFIG_GCOV_KERNEL=y\nCONFIG_GCOV_PROFILE_ALL=y\n/' $i +done +%endif + +# now run oldconfig over all the config files +for i in *.config +do + mv $i .config + Arch=`sed -n 3p .config | cut -d' ' -f2 | cut -d'/' -f2` + make ARCH=$Arch listnewconfig | grep -E '^CONFIG_' >.newoptions || true + if [ -s .newoptions ]; then + cat .newoptions + #exit 1 + fi + rm -f .newoptions + make ARCH=$Arch olddefconfig + echo "# $Arch" > configs/$i + cat .config >> configs/$i +done +# end of kernel config +%endif + +# # End of Configs stuff + +# get rid of unwanted files resulting from patch fuzz +find . \( -name "*.orig" -o -name "*~" \) -exec rm -f {} \; >/dev/null + +# remove unnecessary SCM files +find . -name .gitignore -exec rm -f {} \; >/dev/null + +cd .. + +### +### build +### +%build + +%if %{with_sparse} +%define sparse_mflags C=1 +%endif + +cp_vmlinux() +{ + eu-strip --remove-comment -o "$2" "$1" +} + +BuildKernel() { + MakeTarget=$1 + KernelImage=$2 + Flavour=$3 + Flav=${Flavour:++${Flavour}} + InstallName=${5:-vmlinuz} + + DoModules=1 + + # Pick the right config file for the kernel we're building + Config=kernel-%{version}-%{_target_cpu}${Flavour:+-${Flavour}}.config + DevelDir=/usr/src/kernels/%{KVERREL}${Flav} + + # When the bootable image is just the ELF kernel, strip it. + # We already copy the unstripped file into the debuginfo package. + if [ "$KernelImage" = vmlinux ]; then + CopyKernel=cp_vmlinux + else + CopyKernel=cp + fi + + KernelVer=%{version}-%{release}.%{_target_cpu}${Flav} + echo BUILDING A KERNEL FOR ${Flavour} %{_target_cpu}... + + # make sure EXTRAVERSION says what we want it to say + perl -p -i -e "s/^EXTRAVERSION.*/EXTRAVERSION = -%{release}.%{_target_cpu}${Flav}/" Makefile + + # and now to start the build process + + %{make} -s %{?_smp_mflags} mrproper + cp configs/$Config .config + + %if %{signmodules} + cp %{SOURCE11} certs/. + cp %{SOURCE12} certs/. + %endif + + Arch=`head -1 .config | cut -b 3-` + echo USING ARCH=$Arch + + KCFLAGS="%{?kcflags}" + + # add kpatch flags for base kernel + if [ "$Flavour" == "" ]; then + KCFLAGS="$KCFLAGS %{?kpatch_kcflags}" + fi + + %{make} -s ARCH=$Arch olddefconfig >/dev/null + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" $MakeTarget %{?sparse_mflags} %{?kernel_mflags} + if [ $DoModules -eq 1 ]; then + %{make} -s ARCH=$Arch V=1 %{?_smp_mflags} KCFLAGS="$KCFLAGS" WITH_GCOV="%{?with_gcov}" modules %{?sparse_mflags} || exit 1 + fi + + mkdir -p $RPM_BUILD_ROOT/%{image_install_path} + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer +%if %{with_debuginfo} + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/%{image_install_path} +%endif + +%ifarch aarch64 + %{make} -s ARCH=$Arch V=1 dtbs dtbs_install INSTALL_DTBS_PATH=$RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer + cp -r $RPM_BUILD_ROOT/%{image_install_path}/dtb-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/dtb + find arch/$Arch/boot/dts -name '*.dtb' -type f | xargs rm -f +%endif + + # Start installing the results + install -m 644 .config $RPM_BUILD_ROOT/boot/config-$KernelVer + install -m 644 .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/config + install -m 644 System.map $RPM_BUILD_ROOT/boot/System.map-$KernelVer + install -m 644 System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/System.map + + # We estimate the size of the initramfs because rpm needs to take this size + # into consideration when performing disk space calculations. (See bz #530778) + dd if=/dev/zero of=$RPM_BUILD_ROOT/boot/initramfs-$KernelVer.img bs=1M count=20 + + if [ -f arch/$Arch/boot/zImage.stub ]; then + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/%{image_install_path}/zImage.stub-$KernelVer || : + cp arch/$Arch/boot/zImage.stub $RPM_BUILD_ROOT/lib/modules/$KernelVer/zImage.stub-$KernelVer || : + fi + + $CopyKernel $KernelImage \ + $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + chmod 755 $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + cp $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer $RPM_BUILD_ROOT/lib/modules/$KernelVer/$InstallName + + # hmac sign the kernel for FIPS + echo "Creating hmac file: $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac" + ls -l $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer + sha512hmac $RPM_BUILD_ROOT/%{image_install_path}/$InstallName-$KernelVer | sed -e "s,$RPM_BUILD_ROOT,," > $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac; + cp $RPM_BUILD_ROOT/%{image_install_path}/.vmlinuz-$KernelVer.hmac $RPM_BUILD_ROOT/lib/modules/$KernelVer/.vmlinuz.hmac + + if [ $DoModules -eq 1 ]; then + # Override $(mod-fw) because we don't want it to install any firmware + # we'll get it from the linux-firmware package and we don't want conflicts + %{make} -s %{?_smp_mflags} ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT modules_install KERNELRELEASE=$KernelVer mod-fw= + fi + +%if %{with_gcov} + # install gcov-needed files to $BUILDROOT/$BUILD/...: + # gcov_info->filename is absolute path + # gcno references to sources can use absolute paths (e.g. in out-of-tree builds) + # sysfs symlink targets (set up at compile time) use absolute paths to BUILD dir + find . \( -name '*.gcno' -o -name '*.[chS]' \) -exec install -D '{}' "$RPM_BUILD_ROOT/$(pwd)/{}" \; +%endif + + if [ $DoVDSO -ne 0 ]; then + %{make} -s ARCH=$Arch INSTALL_MOD_PATH=$RPM_BUILD_ROOT vdso_install KERNELRELEASE=$KernelVer + if [ ! -s ldconfig-kernel.conf ]; then + echo > ldconfig-kernel.conf "\ + # Placeholder file, no vDSO hwcap entries used in this kernel." + fi + %{__install} -D -m 444 ldconfig-kernel.conf \ + $RPM_BUILD_ROOT/etc/ld.so.conf.d/%{name}-$KernelVer.conf + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/vdso/.build-id + fi + + # And save the headers/makefiles etc for building modules against + # + # This all looks scary, but the end result is supposed to be: + # * all arch relevant include/ files + # * all Makefile/Kconfig files + # * all script/ files + + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/source + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + (cd $RPM_BUILD_ROOT/lib/modules/$KernelVer ; ln -s build source) + # dirs for additional modules per module-init-tools, kbuild/modules.txt + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/updates + mkdir -p $RPM_BUILD_ROOT/lib/modules/$KernelVer/weak-updates + # first copy everything + cp --parents `find -type f -name "Makefile*" -o -name "Kconfig*"` $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp Module.symvers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp System.map $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + if [ -s Module.markers ]; then + cp Module.markers $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + fi + + # create the kABI metadata for use in packaging + # NOTENOTE: the name symvers is used by the rpm backend + # NOTENOTE: to discover and run the /usr/lib/rpm/fileattrs/kabi.attr + # NOTENOTE: script which dynamically adds exported kernel symbol + # NOTENOTE: checksums to the rpm metadata provides list. + # NOTENOTE: if you change the symvers name, update the backend too + echo "**** GENERATING kernel ABI metadata ****" + gzip -c9 < Module.symvers > $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz + cp $RPM_BUILD_ROOT/boot/symvers-$KernelVer.gz $RPM_BUILD_ROOT/lib/modules/$KernelVer/symvers.gz + + # then drop all but the needed Makefiles/Kconfig files + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Documentation + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include + cp .config $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + cp -a scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + rm -rf $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/tracing + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/spdxcheck.py + if [ -f tools/objtool/objtool ]; then + cp -a tools/objtool/objtool $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/tools/objtool/ || : + fi + if [ -d arch/$Arch/scripts ]; then + cp -a arch/$Arch/scripts $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch} || : + fi + if [ -f arch/$Arch/*lds ]; then + cp -a arch/$Arch/*lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/arch/%{_arch}/ || : + fi + if [ -f arch/%{asmarch}/kernel/module.lds ]; then + cp -a --parents arch/%{asmarch}/kernel/module.lds $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*.o + rm -f $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/scripts/*/*.o + if [ -d arch/%{asmarch}/include ]; then + cp -a --parents arch/%{asmarch}/include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + fi +%ifarch aarch64 + # arch/arm64/include/asm/xen references arch/arm + cp -a --parents arch/arm/include/asm/xen $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + # arch/arm64/include/asm/opcodes.h references arch/arm + cp -a --parents arch/arm/include/asm/opcodes.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + cp -a include $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include +%ifarch x86_64 + # files for 'make prepare' to succeed with kernel-devel + cp -a --parents arch/x86/entry/syscalls/syscall_32.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/entry/syscalls/syscall_64.tbl $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_32.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_64.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs_common.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/tools/relocs.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents tools/include/tools/le_byteshift.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/purgatory.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/stack.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/setup-x86_64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/purgatory/entry64.S $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/string.c $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents arch/x86/boot/ctype.h $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + + cp -a --parents scripts/syscalltbl.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ + cp -a --parents scripts/syscallhdr.sh $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/ +%endif + # Make sure the Makefile, version.h, and auto.conf have a matching + # timestamp so that external modules can be built + touch -r $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/Makefile \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/generated/uapi/linux/version.h \ + $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/include/config/auto.conf + +%if %{with_debuginfo} + eu-readelf -n vmlinux | grep "Build ID" | awk '{print $NF}' > vmlinux.id + cp vmlinux.id $RPM_BUILD_ROOT/lib/modules/$KernelVer/build/vmlinux.id + + # + # save the vmlinux file for kernel debugging into the kernel-debuginfo rpm + # + mkdir -p $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer + cp vmlinux $RPM_BUILD_ROOT%{debuginfodir}/lib/modules/$KernelVer +%endif + + find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name "*.ko" -type f >modnames + + # mark modules executable so that strip-to-file can strip them + xargs --no-run-if-empty chmod u+x < modnames + + # Generate a list of modules for block and networking. + + grep -F /drivers/ modnames | xargs --no-run-if-empty nm -upA | + sed -n 's,^.*/\([^/]*\.ko\): *U \(.*\)$,\1 \2,p' > drivers.undef + + collect_modules_list() + { + sed -r -n -e "s/^([^ ]+) \\.?($2)\$/\\1/p" drivers.undef | + LC_ALL=C sort -u > $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + if [ ! -z "$3" ]; then + sed -r -e "/^($3)\$/d" -i $RPM_BUILD_ROOT/lib/modules/$KernelVer/modules.$1 + fi + } + + collect_modules_list networking \ + 'register_netdev|ieee80211_register_hw|usbnet_probe|phy_driver_register|rt(l_|2x00)(pci|usb)_probe|register_netdevice' + collect_modules_list block \ + 'ata_scsi_ioctl|scsi_add_host|scsi_add_host_with_dma|blk_alloc_queue|blk_init_queue|register_mtd_blktrans|scsi_esp_register|scsi_register_device_handler|blk_queue_physical_block_size' 'pktcdvd.ko|dm-mod.ko' + collect_modules_list drm \ + 'drm_open|drm_init' + collect_modules_list modesetting \ + 'drm_crtc_init' + + # detect missing or incorrect license tags + ( find $RPM_BUILD_ROOT/lib/modules/$KernelVer -name '*.ko' | xargs /sbin/modinfo -l | \ + grep -E -v 'GPL( v2)?$|Dual BSD/GPL$|Dual MPL/GPL$|GPL and additional rights$' ) && exit 1 + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Copy the System.map file for depmod to use, and create a backup of the + # full module tree so we can restore it after we're done filtering + cp System.map $RPM_BUILD_ROOT/. + pushd $RPM_BUILD_ROOT + + if [ $DoModules -eq 1 ]; then + + # Run depmod on the resulting module tree and make sure it isn't broken + depmod -b . -aeF ./System.map $KernelVer &> depmod.out + %ifnarch loongarch64 + if [ -s depmod.out ]; then + echo "Depmod failure" + cat depmod.out + exit 1 + else + rm depmod.out + fi + %else + rm -rf depmod.out + %endif + else + # Ensure important files/directories exist to let the packaging succeed + mkdir -p lib/modules/$KernelVer/kernel + # Add files usually created by make modules, needed to prevent errors + # thrown by depmod during package installation + touch lib/modules/$KernelVer/modules.order + touch lib/modules/$KernelVer/modules.builtin + fi + + # remove files that will be auto generated by depmod at rpm -i time + pushd $RPM_BUILD_ROOT/lib/modules/$KernelVer/ + rm -f modules.{alias*,builtin.bin,dep*,*map,symbols*,devname,softdep} + popd + + # Cleanup + rm System.map + popd + +%if %{signmodules} + if [ $DoModules -eq 1 ]; then + # Save the signing keys so we can sign the modules in __modsign_install_post + cp certs/signing_key.pem certs/signing_key.pem.sign${Flav} + cp certs/signing_key.x509 certs/signing_key.x509.sign${Flav} + fi +%endif + + # Move the devel headers out of the root file system + mkdir -p $RPM_BUILD_ROOT/usr/src/kernels + mv $RPM_BUILD_ROOT/lib/modules/$KernelVer/build $RPM_BUILD_ROOT/$DevelDir + + # This is going to create a broken link during the build, but we don't use + # it after this point. We need the link to actually point to something + # when kernel-devel is installed, and a relative link doesn't work across + # the F17 UsrMove feature. + ln -sf $DevelDir $RPM_BUILD_ROOT/lib/modules/$KernelVer/build + + # prune junk from kernel-devel + find $RPM_BUILD_ROOT/usr/src/kernels -name ".*.cmd" -exec rm -f {} \; + + # build a BLS config for this kernel + %{SOURCE43} "$KernelVer" "$RPM_BUILD_ROOT" "%{?variant}" +} + +### +# DO it... +### + +# prepare directories +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/boot +mkdir -p $RPM_BUILD_ROOT%{_libexecdir} + +cd linux-%{KVERREL} + + +%if %{with_debug} +BuildKernel %make_target %kernel_image debug +%endif + +%if %{with_up} +BuildKernel %make_target %kernel_image +%endif + +%global perf_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" -C tools/perf V=1 NO_PERF_READ_VDSO32=1 NO_PERF_READ_VDSOX32=1 WERROR=0 NO_LIBUNWIND=1 HAVE_CPLUS_DEMANGLE=1 NO_GTK2=1 NO_STRLCPY=1 NO_BIONIC=1 LIBBPF_DYNAMIC=1 LIBTRACEEVENT_DYNAMIC=1 %{?perf_build_extra_opts} prefix=%{_prefix} PYTHON=%{__python3} +%if %{with_perf} +# perf +# make sure check-headers.sh is executable +chmod +x tools/perf/check-headers.sh +%{perf_make} DESTDIR=$RPM_BUILD_ROOT all +%endif + +%global tools_make \ + %{make} V=1 CFLAGS="${RPM_OPT_FLAGS}" LDFLAGS="%{__global_ldflags}" + +%if %{with_tools} +%ifarch %{cpupowerarchs} +# cpupower +# make sure version-gen.sh is executable. +chmod +x tools/power/cpupower/utils/version-gen.sh +%{tools_make} -C tools/power/cpupower CPUFREQ_BENCH=false DEBUG=false +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + %{tools_make} centrino-decode powernow-k8-decode + popd +%endif +%ifarch x86_64 + pushd tools/power/x86/x86_energy_perf_policy/ + %{tools_make} + popd + pushd tools/power/x86/turbostat + %{tools_make} + popd + pushd tools/power/x86/intel-speed-select + %{make} + popd +%endif +%endif +pushd tools/thermal/tmon/ +%{make} V=1 +popd +pushd tools/iio/ +%{make} V=1 +popd +pushd tools/gpio/ +%{make} V=1 +popd +# build MM tools +pushd tools/mm/ +%{make} V=1 slabinfo page_owner_sort page-types +popd +%endif + +%global bpftool_make \ + make EXTRA_CFLAGS="${RPM_OPT_FLAGS}" EXTRA_LDFLAGS="%{__global_ldflags}" DESTDIR=$RPM_BUILD_ROOT V=1 +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} +popd +%endif + +%if %{with_doc} +# Make the HTML pages. +make htmldocs || %{doc_build_fail} + +# sometimes non-world-readable files sneak into the kernel source tree +chmod -R a=rX Documentation +find Documentation -type d | xargs chmod u+w +%endif + +# In the modsign case, we do 3 things. 1) We check the "flavour" and hard +# code the value in the following invocations. This is somewhat sub-optimal +# but we're doing this inside of an RPM macro and it isn't as easy as it +# could be because of that. 2) We restore the .tmp_versions/ directory from +# the one we saved off in BuildKernel above. This is to make sure we're +# signing the modules we actually built/installed in that flavour. 3) We +# grab the arch and invoke mod-sign.sh command to actually sign the modules. +# +# We have to do all of those things _after_ find-debuginfo runs, otherwise +# that will strip the signature off of the modules. + +%define __modsign_install_post \ + if [ "%{signmodules}" -eq "1" ]; then \ + if [ "%{with_debug}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign+debug certs/signing_key.x509.sign+debug $RPM_BUILD_ROOT/lib/modules/%{KVERREL}+debug/ \ + fi \ + if [ "%{with_up}" -ne "0" ]; then \ + %{modsign_cmd} certs/signing_key.pem.sign certs/signing_key.x509.sign $RPM_BUILD_ROOT/lib/modules/%{KVERREL}/ \ + fi \ + fi \ +%{nil} + +### +### Special hacks for debuginfo subpackages. +### + +# This macro is used by %%install, so we must redefine it before that. +%define debug_package %{nil} + +%if %{with_debuginfo} + +%ifnarch noarch +%global __debug_package 1 +%files -f debugfiles.list debuginfo-common-%{_target_cpu} +%defattr(-,root,root) +%endif + +%endif + +# +# Disgusting hack alert! We need to ensure we sign modules *after* all +# invocations of strip occur, which is in __debug_install_post if +# find-debuginfo.sh runs, and __os_install_post if not. +# +%define __spec_install_post \ + %{?__debug_package:%{__debug_install_post}}\ + %{__arch_install_post}\ + %{__os_install_post}\ + %{__modsign_install_post} + +### +### install +### + +%install + +cd linux-%{KVERREL} + +%if %{with_doc} +docdir=$RPM_BUILD_ROOT%{_datadir}/doc/kernel-doc-%{kernelversion} + +# copy the source over +mkdir -p $docdir +tar -h -f - --exclude=man --exclude='.*' -c Documentation | tar xf - -C $docdir + +# with_doc +%endif + +# We have to do the headers install before the tools install because the +# kernel headers_install will remove any header files in /usr/include that +# it doesn't install itself. + +%if %{with_headers} +# Install kernel headers +%{make} ARCH=%{hdrarch} INSTALL_HDR_PATH=$RPM_BUILD_ROOT/usr headers_install + +find $RPM_BUILD_ROOT/usr/include \ + \( -name .install -o -name .check -o \ + -name ..install.cmd -o -name ..check.cmd \) -delete + +%endif + +%if %{with_perf} +# perf tool binary and supporting scripts/binaries +%{perf_make} DESTDIR=$RPM_BUILD_ROOT lib=%{_lib} install-bin +# remove the 'trace' symlink. +rm -f %{buildroot}%{_bindir}/trace + +# For both of the below, yes, this should be using a macro but right now +# it's hard coded and we don't actually want it anyway right now. +# Whoever wants examples can fix it up! + +# remove examples +rm -rf %{buildroot}/usr/lib/perf/examples +rm -rf %{buildroot}/usr/lib/perf/include + +# python-perf extension +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-python_ext + +# perf man pages (note: implicit rpm magic compresses them later) +mkdir -p %{buildroot}/%{_mandir}/man1 +%{perf_make} DESTDIR=$RPM_BUILD_ROOT install-man + +# remove any tracevent files, eg. its plugins still gets built and installed, +# even if we build against system's libtracevent during perf build (by setting +# LIBTRACEEVENT_DYNAMIC=1 above in perf_make macro). Those files should already +# ship with libtraceevent package. +rm -rf %{buildroot}%{_libdir}/traceevent +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%{make} -C tools/power/cpupower DESTDIR=$RPM_BUILD_ROOT libdir=%{_libdir} mandir=%{_mandir} CPUFREQ_BENCH=false install +rm -f %{buildroot}%{_libdir}/*.{a,la} +%find_lang cpupower +mv cpupower.lang ../ +%ifarch x86_64 + pushd tools/power/cpupower/debug/x86_64 + install -m755 centrino-decode %{buildroot}%{_bindir}/centrino-decode + install -m755 powernow-k8-decode %{buildroot}%{_bindir}/powernow-k8-decode + popd +%endif +chmod 0755 %{buildroot}%{_libdir}/libcpupower.so* +mkdir -p %{buildroot}%{_unitdir} %{buildroot}%{_sysconfdir}/sysconfig +install -m644 %{SOURCE2000} %{buildroot}%{_unitdir}/cpupower.service +install -m644 %{SOURCE2001} %{buildroot}%{_sysconfdir}/sysconfig/cpupower +%endif +%ifarch x86_64 + mkdir -p %{buildroot}%{_mandir}/man8 + pushd tools/power/x86/x86_energy_perf_policy + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/turbostat + %{tools_make} DESTDIR=%{buildroot} install + popd + pushd tools/power/x86/intel-speed-select + %{make} DESTDIR=%{buildroot} install + popd +%endif +pushd tools/thermal/tmon +%{make} V=1 INSTALL_ROOT=%{buildroot} install +popd +pushd tools/iio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/gpio +%{make} V=1 DESTDIR=%{buildroot} install +popd +pushd tools/kvm/kvm_stat +make INSTALL_ROOT=%{buildroot} install-tools +make INSTALL_ROOT=%{buildroot} install-man +popd +# install MM tools +pushd tools/mm/ +install -m755 slabinfo %{buildroot}%{_bindir}/slabinfo +install -m755 page_owner_sort %{buildroot}%{_bindir}/page_owner_sort +install -m755 page-types %{buildroot}%{_bindir}/page-types +popd +%endif + +%if %{with_bpftool} +pushd tools/bpf/bpftool +%{bpftool_make} prefix=%{_prefix} bash_compdir=%{_sysconfdir}/bash_completion.d/ mandir=%{_mandir} install doc-install +popd +%endif + +# We have to do the headers checksum calculation after the tools install because +# these might end up installing their own set of headers on top of kernel's +%if %{with_headers} +# compute a content hash to export as Provides: kernel-headers-checksum +HEADERS_CHKSUM=$(export LC_ALL=C; find $RPM_BUILD_ROOT/usr/include -type f -name "*.h" \ + ! -path $RPM_BUILD_ROOT/usr/include/linux/version.h | \ + sort | xargs cat | sha1sum - | cut -f 1 -d ' '); +# export the checksum via usr/include/linux/version.h, so the dynamic +# find-provides can grab the hash to update it accordingly +echo "#define KERNEL_HEADERS_CHECKSUM \"$HEADERS_CHKSUM\"" >> $RPM_BUILD_ROOT/usr/include/linux/version.h +%endif + +### +### clean +### + +%clean +rm -rf $RPM_BUILD_ROOT + +### +### scripts +### + +%if %{with_tools} +%post -n %{name}-tools-libs +/sbin/ldconfig + +%postun -n %{name}-tools-libs +/sbin/ldconfig +%endif + +# +# This macro defines a %%post script for a kernel*-devel package. +# %%kernel_devel_post [] +# +%define kernel_devel_post() \ +%{expand:%%post %{?1:%{1}-}devel}\ +if [ -f /etc/sysconfig/kernel ]\ +then\ + . /etc/sysconfig/kernel || exit $?\ +fi\ +if [ "$HARDLINK" != "no" -a -x /usr/sbin/hardlink ]\ +then\ + (cd /usr/src/kernels/%{KVERREL}%{?1:+%{1}} &&\ + /usr/bin/find . -type f | while read f; do\ + hardlink -c /usr/src/kernels/*%{?dist}.*/$f $f\ + done)\ +fi\ +%{nil} + +# This macro defines a %%posttrans script for a kernel package. +# %%kernel_variant_posttrans [] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_posttrans() \ +%{expand:%%posttrans %{?1:%{1}}}\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --add-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +/bin/kernel-install add %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +%{nil} + +# +# This macro defines a %%post script for a kernel package and its devel package. +# %%kernel_variant_post [-v ] [-r ] +# More text can follow to go at the end of this variant's %%post. +# +%define kernel_variant_post(v:r:) \ +%{expand:%%kernel_devel_post %{?-v*}}\ +%{expand:%%kernel_variant_posttrans %{?-v*}}\ +%{expand:%%post %{?-v*}}\ +%{-r:\ +if [ `uname -i` == "x86_64" -o `uname -i` == "i386" ] &&\ + [ -f /etc/sysconfig/kernel ]; then\ + /bin/sed -r -i -e 's/^DEFAULTKERNEL=%{-r*}$/DEFAULTKERNEL=kernel%{?-v:-%{-v*}}/' /etc/sysconfig/kernel || exit $?\ +fi}\ +%{nil} + +# +# This macro defines a %%preun script for a kernel package. +# %%kernel_variant_preun +# +%define kernel_variant_preun() \ +%{expand:%%preun %{?1}}\ +/bin/kernel-install remove %{KVERREL}%{?1:+%{1}} /lib/modules/%{KVERREL}%{?1:+%{1}}/vmlinuz || exit $?\ +if [ -x %{_sbindir}/weak-modules ]\ +then\ + %{_sbindir}/weak-modules --remove-kernel %{KVERREL}%{?1:+%{1}} || exit $?\ +fi\ +%{nil} + +%kernel_variant_preun +%kernel_variant_post -r kernel-smp + +%kernel_variant_preun debug +%kernel_variant_post -v debug + +if [ -x /sbin/ldconfig ] +then + /sbin/ldconfig -X || exit $? +fi + +### +### file lists +### + +%if %{with_headers} +%files headers +%defattr(-,root,root) +/usr/include/* +%endif + +# only some architecture builds need kernel-doc +%if %{with_doc} +%files doc +%defattr(-,root,root) +%{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation/* +%dir %{_datadir}/doc/kernel-doc-%{kernelversion}/Documentation +%dir %{_datadir}/doc/kernel-doc-%{kernelversion} +%endif + +%if %{with_perf} +%files -n perf +%{_bindir}/perf +%{_libdir}/libperf-jvmti.so +%dir %{_libexecdir}/perf-core +%{_libexecdir}/perf-core/* +%{_datadir}/perf-core/* +%{_mandir}/man[1-8]/perf* +%{_sysconfdir}/bash_completion.d/perf +%doc linux-%{KVERREL}/tools/perf/Documentation/examples.txt +%{_docdir}/perf-tip/tips.txt + +%files -n python3-perf +%{python3_sitearch}/* + +%if %{with_debuginfo} +%files -f perf-debuginfo.list -n perf-debuginfo + +%files -f python3-perf-debuginfo.list -n python3-perf-debuginfo +%endif +# with_perf +%endif + +%if %{with_tools} +%ifarch %{cpupowerarchs} +%defattr(-,root,root) +%files -n %{name}-tools -f cpupower.lang +%{_bindir}/cpupower +%{_datadir}/bash-completion/completions/cpupower +%ifarch x86_64 +%{_bindir}/centrino-decode +%{_bindir}/powernow-k8-decode +%endif +%{_unitdir}/cpupower.service +%{_mandir}/man[1-8]/cpupower* +%config(noreplace) %{_sysconfdir}/sysconfig/cpupower +%ifarch x86_64 +%{_bindir}/x86_energy_perf_policy +%{_mandir}/man8/x86_energy_perf_policy* +%{_bindir}/turbostat +%{_mandir}/man8/turbostat* +%{_bindir}/intel-speed-select +%endif +# !cpupowerarchs +%else +%files -n %{name}-tools +%defattr(-,root,root) +# cpupowerarchs +%endif +%{_bindir}/tmon +%{_bindir}/iio_event_monitor +%{_bindir}/iio_generic_buffer +%{_bindir}/lsiio +%{_bindir}/lsgpio +%{_bindir}/gpio-hammer +%{_bindir}/gpio-event-mon +%{_bindir}/gpio-watch +%{_mandir}/man1/kvm_stat* +%{_bindir}/kvm_stat +%{_bindir}/page_owner_sort +%{_bindir}/slabinfo +%{_bindir}/page-types + +%if %{with_debuginfo} +%files -f %{name}-tools-debuginfo.list -n %{name}-tools-debuginfo +%defattr(-,root,root) +%endif + +%ifarch %{cpupowerarchs} +%files -n %{name}-tools-libs +%{_libdir}/libcpupower.so.1 +%{_libdir}/libcpupower.so.0.0.1 + +%files -n %{name}-tools-libs-devel +%{_libdir}/libcpupower.so +%{_includedir}/cpufreq.h +%endif +# with_tools +%endif + +%if %{with_bpftool} +%files -n bpftool +%{_sbindir}/bpftool +%{_sysconfdir}/bash_completion.d/bpftool +%{_mandir}/man8/bpftool-cgroup.8.* +%{_mandir}/man8/bpftool-map.8.* +%{_mandir}/man8/bpftool-prog.8.* +%{_mandir}/man8/bpftool-perf.8.* +%{_mandir}/man8/bpftool.8.* +%{_mandir}/man8/bpftool-btf.8.* +%{_mandir}/man8/bpftool-feature.8.* +%{_mandir}/man8/bpftool-gen.8.* +%{_mandir}/man8/bpftool-iter.8.* +%{_mandir}/man8/bpftool-link.8.* +%{_mandir}/man8/bpftool-net.8.* +%{_mandir}/man8/bpftool-struct_ops.8.* + +%if %{with_debuginfo} +%files -f bpftool-debuginfo.list -n bpftool-debuginfo +%defattr(-,root,root) +%endif +%endif + +# empty meta-package +%ifnarch %nobuildarches noarch +%files +%defattr(-,root,root) +%endif + +%if %{with_gcov} +%ifarch x86_64 aarch64 +%files gcov +%defattr(-,root,root) +%{_builddir} +%endif +%endif + +# This is %%{image_install_path} on an arch where that includes ELF files, +# or empty otherwise. +%define elf_image_install_path %{?kernel_image_elf:%{image_install_path}} + +# +# This macro defines the %%files sections for a kernel package +# and its devel and debuginfo packages. +# %%kernel_variant_files [-k vmlinux] +# +%define kernel_variant_files(k:) \ +%if %{1}\ +%{expand:%%files %{?2}}\ +%defattr(-,root,root)\ +%{!?_licensedir:%global license %%doc}\ +%license linux-%{KVERREL}/COPYING-%{version}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/%{?-k:%{-k*}}%{!?-k:vmlinuz}\ +%ghost /%{image_install_path}/%{?-k:%{-k*}}%{!?-k:vmlinuz}-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/.vmlinuz.hmac \ +%ghost /%{image_install_path}/.vmlinuz-%{KVERREL}%{?2:+%{2}}.hmac \ +%ifarch aarch64\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/dtb \ +%ghost /%{image_install_path}/dtb-%{KVERREL}%{?2:+%{2}} \ +%endif\ +%attr(0600, root, root) /lib/modules/%{KVERREL}%{?2:+%{2}}/System.map\ +%attr(0600, root, root) /boot/System.map-%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/symvers.gz\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/config\ +%attr(0600, root, root) /boot/symvers-%{KVERREL}%{?2:+%{2}}.gz\ +%attr(0600, root, root) /boot/initramfs-%{KVERREL}%{?2:+%{2}}.img\ +%attr(0644, root, root) /boot/config-%{KVERREL}%{?2:+%{2}}\ +%dir /lib/modules\ +%dir /lib/modules/%{KVERREL}%{?2:+%{2}}\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/kernel\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/build\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/source\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/weak-updates\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/bls.conf\ +/lib/modules/%{KVERREL}%{?2:+%{2}}/modules.*\ +%{expand:%%files %{?2:%{2}-}devel}\ +%defattr(-,root,root)\ +%defverify(not mtime)\ +/usr/src/kernels/%{KVERREL}%{?2:+%{2}}\ +%if %{with_debuginfo}\ +%ifnarch noarch\ +%{expand:%%files -f debuginfo%{?2}.list %{?2:%{2}-}debuginfo}\ +%defattr(-,root,root)\ +%endif\ +%endif\ +%endif\ +%{nil} + +%kernel_variant_files %{with_up} +%kernel_variant_files %{with_debug} debug + +# plz don't put in a version string unless you're going to tag +# and build. +# +# +%changelog + diff --git a/arch/Kconfig b/arch/Kconfig index 12d51495caec181de91b5c362e8334bd8989598a..fff9db8bc106e577245f270bc88f56714855a511 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -1324,6 +1324,14 @@ config STRICT_MODULE_RWX config ARCH_HAS_PHYS_TO_DMA bool +config ARCH_HAS_CPU_RESCTRL + bool + help + The 'resctrl' filesystem allows CPU controls of shared resources + such as caches and memory bandwidth to be configured. An architecture + selects this if it provides the arch-specific hooks for the filesystem + and needs the per-task CLOSID/RMID properties. + config HAVE_ARCH_COMPILER_H bool help diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6062a52a084ffff5b3d94b290b7fdd8846b44293..490c120e1e158955974e7b1ab321a939525bfc91 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -21,6 +21,7 @@ config ARM64 select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_CURRENT_STACK_POINTER + select ARCH_HAS_COPY_MC if ACPI_APEI_GHES select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -1956,7 +1957,27 @@ config ARM64_TLB_RANGE The feature introduces new assembly instructions, and they were support when binutils >= 2.30. -endmenu # "ARMv8.4 architectural features" +config ARM64_MPAM + bool "Enable support for MPAM" + select ACPI_MPAM if ACPI + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS + help + Memory Partitioning and Monitoring is an optional extension + that allows the CPUs to mark load and store transactions with + labels for partition-id and performance-monitoring-group. + System components, such as the caches, can use the partition-id + to apply a performance policy. MPAM monitors can use the + partition-id and performance-monitoring-group to measure the + cache occupancy or data throughput. + + Use of this extension requires CPU support, support in the + memory system components (MSC), and a description from firmware + of where the MSC are in the address space. + + MPAM is exposed to user-space via the resctrl pseudo filesystem. + +endmenu menu "ARMv8.5 architectural features" diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms index 6069120199bbcaf26800641174b423ee84cc30fd..62b813d80700f22151d5c6d528a9a70c9b07c881 100644 --- a/arch/arm64/Kconfig.platforms +++ b/arch/arm64/Kconfig.platforms @@ -244,6 +244,12 @@ config ARCH_NPCM General support for NPCM8xx BMC (Arbel). Nuvoton NPCM8xx BMC based on the Cortex A35. +config ARCH_PHYTIUM + bool "Phytium SoC Family" + select ARM_GIC_PHYTIUM_2500 + help + This enables support for Phytium ARMv8 SoC family. + config ARCH_QCOM bool "Qualcomm Platforms" select GPIOLIB diff --git a/arch/arm64/configs/anolis-debug_defconfig b/arch/arm64/configs/anolis-debug_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..65ebc3694ef2e14689f167555c123701ed1aad29 --- /dev/null +++ b/arch/arm64/configs/anolis-debug_defconfig @@ -0,0 +1,7208 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y +CONFIG_KASAN_SHADOW_OFFSET=0xdfff800000000000 + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +CONFIG_ARM64_AMU_EXTN=y +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +CONFIG_PM_TEST_SUSPEND=y +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +CONFIG_FW_UPLOAD=y +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +CONFIG_I2C_GPIO_FAULT_INJECTOR=y +CONFIG_I2C_HISI=m +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +CONFIG_SPI_DEBUG=y +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +CONFIG_GPIO_HISI=m +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP08=y +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +CONFIG_MAILBOX_TEST=m +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SMMU_V3_SVA=y +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +CONFIG_KUNPENG_HCCS=m +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=m +CONFIG_HISI_PCIE_PMU=m +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_DEBUG=y +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_CURVE25519=m +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +# CONFIG_UBSAN_UNREACHABLE is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_SW_TAGS is not set +# CONFIG_KASAN_HW_TAGS is not set +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_KFENCE_NUM_OBJECTS=255 +CONFIG_KFENCE_DEFERRABLE=y +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +CONFIG_WQ_WATCHDOG=y +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +# CONFIG_LOCK_STAT is not set +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +CONFIG_PERCPU_TEST=m +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/arm64/configs/anolis_defconfig b/arch/arm64/configs/anolis_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..a563537ca90739502d81edd0665dc64c03927332 --- /dev/null +++ b/arch/arm64/configs/anolis_defconfig @@ -0,0 +1,7142 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/arm64 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_IRQ_IPI=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_ARCH_HAS_TICK_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_SCHED_THERMAL_PRESSURE=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=20 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +CONFIG_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_ARM64=y +CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_64BIT=y +CONFIG_MMU=y +CONFIG_ARM64_PAGE_SHIFT=12 +CONFIG_ARM64_CONT_PTE_SHIFT=4 +CONFIG_ARM64_CONT_PMD_SHIFT=4 +CONFIG_ARCH_MMAP_RND_BITS_MIN=18 +CONFIG_ARCH_MMAP_RND_BITS_MAX=33 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_SMP=y +CONFIG_KERNEL_MODE_NEON=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y + +# +# Platform selection +# +# CONFIG_ARCH_ACTIONS is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_APPLE is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_BITMAIN is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SPARX5 is not set +# CONFIG_ARCH_K3 is not set +# CONFIG_ARCH_LG1K is not set +CONFIG_ARCH_HISI=y +# CONFIG_ARCH_KEEMBAY is not set +# CONFIG_ARCH_MEDIATEK is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_NXP is not set +# CONFIG_ARCH_MA35 is not set +# CONFIG_ARCH_NPCM is not set +CONFIG_ARCH_PHYTIUM=y +CONFIG_ARCH_QCOM=y +# CONFIG_ARCH_REALTEK is not set +# CONFIG_ARCH_RENESAS is not set +# CONFIG_ARCH_ROCKCHIP is not set +CONFIG_ARCH_SEATTLE=y +# CONFIG_ARCH_INTEL_SOCFPGA is not set +# CONFIG_ARCH_STM32 is not set +# CONFIG_ARCH_SYNQUACER is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_SPRD is not set +CONFIG_ARCH_THUNDER=y +CONFIG_ARCH_THUNDER2=y +# CONFIG_ARCH_UNIPHIER is not set +CONFIG_ARCH_VEXPRESS=y +# CONFIG_ARCH_VISCONTI is not set +CONFIG_ARCH_XGENE=y +# CONFIG_ARCH_ZYNQMP is not set +# end of Platform selection + +# +# Kernel Features +# + +# +# ARM errata workarounds via the alternatives framework +# +CONFIG_AMPERE_ERRATUM_AC03_CPU_38=y +CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y +CONFIG_ARM64_ERRATUM_826319=y +CONFIG_ARM64_ERRATUM_827319=y +CONFIG_ARM64_ERRATUM_824069=y +CONFIG_ARM64_ERRATUM_819472=y +CONFIG_ARM64_ERRATUM_832075=y +CONFIG_ARM64_ERRATUM_834220=y +CONFIG_ARM64_ERRATUM_1742098=y +CONFIG_ARM64_ERRATUM_845719=y +CONFIG_ARM64_ERRATUM_843419=y +CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y +CONFIG_ARM64_ERRATUM_1024718=y +CONFIG_ARM64_ERRATUM_1418040=y +CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT=y +CONFIG_ARM64_ERRATUM_1165522=y +CONFIG_ARM64_ERRATUM_1319367=y +CONFIG_ARM64_ERRATUM_1530923=y +CONFIG_ARM64_WORKAROUND_REPEAT_TLBI=y +CONFIG_ARM64_ERRATUM_2441007=y +CONFIG_ARM64_ERRATUM_1286807=y +CONFIG_ARM64_ERRATUM_1463225=y +CONFIG_ARM64_ERRATUM_1542419=y +CONFIG_ARM64_ERRATUM_1508412=y +CONFIG_ARM64_ERRATUM_2051678=y +CONFIG_ARM64_ERRATUM_2077057=y +CONFIG_ARM64_ERRATUM_2658417=y +CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE=y +CONFIG_ARM64_ERRATUM_2054223=y +CONFIG_ARM64_ERRATUM_2067961=y +CONFIG_ARM64_ERRATUM_2441009=y +CONFIG_ARM64_ERRATUM_2457168=y +CONFIG_ARM64_ERRATUM_2645198=y +CONFIG_ARM64_ERRATUM_2966298=y +CONFIG_CAVIUM_ERRATUM_22375=y +CONFIG_CAVIUM_ERRATUM_23144=y +CONFIG_CAVIUM_ERRATUM_23154=y +CONFIG_CAVIUM_ERRATUM_27456=y +CONFIG_CAVIUM_ERRATUM_30115=y +CONFIG_CAVIUM_TX2_ERRATUM_219=y +CONFIG_FUJITSU_ERRATUM_010001=y +CONFIG_HISILICON_ERRATUM_161600802=y +CONFIG_QCOM_FALKOR_ERRATUM_1003=y +CONFIG_QCOM_FALKOR_ERRATUM_1009=y +CONFIG_QCOM_QDF2400_ERRATUM_0065=y +CONFIG_QCOM_FALKOR_ERRATUM_E1041=y +CONFIG_NVIDIA_CARMEL_CNP_ERRATUM=y +CONFIG_ROCKCHIP_ERRATUM_3588001=y +CONFIG_SOCIONEXT_SYNQUACER_PREITS=y +# end of ARM errata workarounds via the alternatives framework + +CONFIG_ARM64_4K_PAGES=y +# CONFIG_ARM64_16K_PAGES is not set +# CONFIG_ARM64_64K_PAGES is not set +# CONFIG_ARM64_VA_BITS_39 is not set +CONFIG_ARM64_VA_BITS_48=y +CONFIG_ARM64_VA_BITS=48 +CONFIG_ARM64_PA_BITS_48=y +CONFIG_ARM64_PA_BITS=48 +# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=1024 +CONFIG_HOTPLUG_CPU=y +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_HW_PERF_EVENTS=y +CONFIG_CC_HAVE_SHADOW_CALL_STACK=y +CONFIG_PARAVIRT=y +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_TRANS_TABLE=y +# CONFIG_XEN is not set +CONFIG_ARCH_FORCE_MAX_ORDER=10 +CONFIG_UNMAP_KERNEL_AT_EL0=y +CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY=y +# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set +# CONFIG_ARM64_SW_TTBR0_PAN is not set +CONFIG_ARM64_TAGGED_ADDR_ABI=y +CONFIG_COMPAT=y +CONFIG_KUSER_HELPERS=y +# CONFIG_COMPAT_ALIGNMENT_FIXUPS is not set +# CONFIG_ARMV8_DEPRECATED is not set + +# +# ARMv8.1 architectural features +# +CONFIG_ARM64_HW_AFDBM=y +CONFIG_ARM64_PAN=y +CONFIG_AS_HAS_LSE_ATOMICS=y +CONFIG_ARM64_LSE_ATOMICS=y +CONFIG_ARM64_USE_LSE_ATOMICS=y +# end of ARMv8.1 architectural features + +# +# ARMv8.2 architectural features +# +CONFIG_AS_HAS_ARMV8_2=y +CONFIG_AS_HAS_SHA3=y +CONFIG_ARM64_PMEM=y +CONFIG_ARM64_RAS_EXTN=y +CONFIG_ARM64_CNP=y +# end of ARMv8.2 architectural features + +# +# ARMv8.3 architectural features +# +# CONFIG_ARM64_PTR_AUTH is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y +CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y +CONFIG_AS_HAS_ARMV8_3=y +CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y +CONFIG_AS_HAS_LDAPR=y +# end of ARMv8.3 architectural features + +# +# ARMv8.4 architectural features +# +CONFIG_ARM64_AMU_EXTN=y +CONFIG_AS_HAS_ARMV8_4=y +CONFIG_ARM64_TLB_RANGE=y +CONFIG_ARM64_MPAM=y +# end of ARMv8.4 architectural features + +# +# ARMv8.5 architectural features +# +CONFIG_AS_HAS_ARMV8_5=y +# CONFIG_ARM64_BTI is not set +CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y +CONFIG_ARM64_E0PD=y +CONFIG_ARM64_AS_HAS_MTE=y +CONFIG_ARM64_MTE=y +# end of ARMv8.5 architectural features + +# +# ARMv8.7 architectural features +# +CONFIG_ARM64_EPAN=y +# end of ARMv8.7 architectural features + +CONFIG_ARM64_SVE=y +CONFIG_ARM64_SME=y +CONFIG_ARM64_PSEUDO_NMI=y +# CONFIG_ARM64_DEBUG_PRIORITY_MASKING is not set +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_MODULE_REGION_FULL=y +CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y +CONFIG_STACKPROTECTOR_PER_TASK=y +# end of Kernel Features + +# +# Boot options +# +CONFIG_ARM64_ACPI_PARKING_PROTOCOL=y +CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_FORCE is not set +CONFIG_EFI_STUB=y +CONFIG_EFI=y +CONFIG_DMI=y +# end of Boot options + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_CLK=y +CONFIG_PM_GENERIC_DOMAINS=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_PM_GENERIC_DOMAINS_SLEEP=y +CONFIG_PM_GENERIC_DOMAINS_OF=y +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +# end of Power management options + +# +# CPU Power Management +# + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set + +# +# ARM CPU Idle Drivers +# +# CONFIG_ARM_PSCI_CPUIDLE is not set +# end of ARM CPU Idle Drivers +# end of CPU Idle + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_ACPI_CPPC_CPUFREQ=m +CONFIG_ACPI_CPPC_CPUFREQ_FIE=y +CONFIG_ARM_SCPI_CPUFREQ=m +# CONFIG_ARM_QCOM_CPUFREQ_HW is not set +# end of CPU Frequency scaling +# end of CPU Power Management + +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_CCA_REQUIRED=y +CONFIG_ACPI_TABLE_LIB=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +# CONFIG_ACPI_TAD is not set +# CONFIG_ACPI_DOCK is not set +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_BGRT is not set +CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_SEA=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_IORT=y +CONFIG_ACPI_GTDT=y +CONFIG_ACPI_AGDI=y +CONFIG_ACPI_APMT=y +CONFIG_ACPI_PPTT=y +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_PRMT=y +CONFIG_IRQ_BYPASS_MANAGER=y +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +# CONFIG_NVHE_EL2_DEBUG is not set + +# +# General architecture-dependent options +# +CONFIG_ARCH_HAS_SUBPAGE_FAULTS=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_KRETPROBES=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_KEEPINITRD=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y +# CONFIG_SHADOW_CALL_STACK is not set +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_ARCH_MMAP_RND_BITS=18 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=11 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +CONFIG_CLONE_BACKWARDS=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_COMPILER_H=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_RELR=y +CONFIG_RELR=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_8B=y +CONFIG_FUNCTION_ALIGNMENT=8 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +# CONFIG_UNIXWARE_DISKLABEL is not set +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y +CONFIG_ARCH_HAVE_ELF_PROT=y +CONFIG_ARCH_USE_GNU_PROPERTY=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_GENERIC_EARLY_IOREMAP=y +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +# CONFIG_DEVICE_PRIVATE is not set +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_USES_PG_ARCH_X=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +# CONFIG_TCP_CONG_CDG is not set +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +# CONFIG_BT is not set +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +CONFIG_RFKILL_GPIO=m +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_ARM_AMBA=y +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCI_SYSCALL=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +# CONFIG_PCI_PF_STUB is not set +CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +# CONFIG_HOTPLUG_PCI_SHPC is not set + +# +# PCI controller drivers +# +# CONFIG_PCIE_ALTERA is not set +CONFIG_PCI_HOST_THUNDER_PEM=y +CONFIG_PCI_HOST_THUNDER_ECAM=y +# CONFIG_PCI_FTPCI100 is not set +CONFIG_PCI_HOST_COMMON=y +CONFIG_PCI_HOST_GENERIC=y +# CONFIG_PCIE_HISI_ERR is not set +# CONFIG_PCIE_MICROCHIP_HOST is not set +CONFIG_PCI_XGENE=y +CONFIG_PCI_XGENE_MSI=y +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +CONFIG_PCIE_DW=y +CONFIG_PCIE_DW_HOST=y +# CONFIG_PCIE_AL is not set +# CONFIG_PCI_MESON is not set +CONFIG_PCI_HISI=y +# CONFIG_PCIE_KIRIN is not set +# CONFIG_PCIE_HISI_STB is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# CONFIG_PCIE_QCOM is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +CONFIG_FW_UPLOAD=y +# end of Firmware loader + +CONFIG_ALLOW_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +CONFIG_GENERIC_ARCH_TOPOLOGY=y +CONFIG_GENERIC_ARCH_NUMA=y +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_MOXTET is not set +CONFIG_HISILICON_LPC=y +# CONFIG_QCOM_EBI2 is not set +# CONFIG_QCOM_SSC_BLOCK_BUS is not set +CONFIG_VEXPRESS_CONFIG=y +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# CONFIG_ARM_SCMI_PROTOCOL is not set +# end of ARM System Control and Management Interface Protocol + +CONFIG_ARM_SCPI_PROTOCOL=m +CONFIG_ARM_SCPI_POWER_DOMAIN=m +CONFIG_ARM_SDE_INTERFACE=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +# CONFIG_ISCSI_IBFT is not set +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_QCOM_SCM=y +# CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_ARM_FFA_TRANSPORT is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_PARAMS_FROM_FDT=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +# CONFIG_EFI_ZBOOT is not set +CONFIG_EFI_ARMSTUB_DTB_LOADER=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_ARM=y +CONFIG_ARM_PSCI_FW=y +# CONFIG_ARM_PSCI_CHECKER is not set +CONFIG_HAVE_ARM_SMCCC=y +CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y +CONFIG_ARM_SMCCC_SOC_ID=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_AFS_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +CONFIG_MTD_PHYSMAP=m +# CONFIG_MTD_PHYSMAP_COMPAT is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_DYNAMIC=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +CONFIG_OF_RESOLVE=y +CONFIG_OF_OVERLAY=y +CONFIG_OF_NUMA=y +# CONFIG_PARPORT is not set +CONFIG_PNP=y +CONFIG_PNP_DEBUG_MESSAGES=y + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +# CONFIG_TIFM_7XX1 is not set +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +# CONFIG_APDS9802ALS is not set +# CONFIG_ISL29003 is not set +# CONFIG_ISL29020 is not set +# CONFIG_SENSORS_TSL2550 is not set +# CONFIG_SENSORS_BH1770 is not set +# CONFIG_SENSORS_APDS990X is not set +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +# CONFIG_SENSORS_LIS3_I2C is not set + +# +# Altera FPGA firmware download module (requires I2C) +# +# CONFIG_ALTERA_STAPL is not set +# CONFIG_VMWARE_VMCI is not set +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +# CONFIG_MISC_RTSX_PCI is not set +# CONFIG_MISC_RTSX_USB is not set +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_SCSI_BNX2X_FCOE is not set +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_HISI_SAS=m +CONFIG_SCSI_HISI_SAS_PCI=m +# CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +CONFIG_SCSI_IPR=m +CONFIG_SCSI_IPR_TRACE=y +CONFIG_SCSI_IPR_DUMP=y +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +CONFIG_AHCI_XGENE=m +CONFIG_SATA_AHCI_SEATTLE=m +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +# CONFIG_FIREWIRE is not set +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +CONFIG_NET_VENDOR_AMD=y +# CONFIG_AMD8111_ETH is not set +# CONFIG_PCNET32 is not set +CONFIG_AMD_XGBE=m +# CONFIG_AMD_XGBE_DCB is not set +# CONFIG_PDS_CORE is not set +CONFIG_NET_XGENE=m +CONFIG_NET_XGENE_V2=m +CONFIG_NET_VENDOR_AQUANTIA=y +# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +# CONFIG_ATL2 is not set +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +# CONFIG_CNIC is not set +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_THUNDER_NIC_BGX=m +CONFIG_THUNDER_NIC_RGX=m +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HISI_FEMAC is not set +# CONFIG_HIP04_ETH is not set +CONFIG_HNS_MDIO=m +CONFIG_HNS=m +CONFIG_HNS_DSAF=m +CONFIG_HNS_ENET=m +CONFIG_HNS3=m +CONFIG_HNS3_HCLGE=m +CONFIG_HNS3_DCB=y +CONFIG_HNS3_HCLGEVF=m +CONFIG_HNS3_ENET=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +# CONFIG_I40E_DCB is not set +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_MLXBF_GIGE is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +CONFIG_NET_VENDOR_MYRI=y +# CONFIG_MYRI10GE is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +# CONFIG_NET_VENDOR_BROCADE is not set +CONFIG_NET_VENDOR_QUALCOMM=y +# CONFIG_QCA7000_SPI is not set +CONFIG_QCOM_EMAC=m +# CONFIG_RMNET is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +# CONFIG_SFC is not set +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_AT803X_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_XGENE=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +CONFIG_MDIO_GPIO=m +CONFIG_MDIO_HISI_FEMAC=m +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_OCTEON=m +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SR9700=m +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +# CONFIG_IEEE802154_FAKELB is not set +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +# CONFIG_VMXNET3 is not set +# CONFIG_FUJITSU_ES is not set +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +# CONFIG_ISDN is not set + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +# CONFIG_INPUT_JOYDEV is not set +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +CONFIG_KEYBOARD_GPIO=m +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_PINEPHONE is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +# CONFIG_MOUSE_PS2 is not set +# CONFIG_MOUSE_SERIAL is not set +# CONFIG_MOUSE_APPLETOUCH is not set +# CONFIG_MOUSE_BCM5974 is not set +# CONFIG_MOUSE_CYAPA is not set +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +# CONFIG_MOUSE_VSXXXAA is not set +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +# CONFIG_INPUT_TABLET is not set +# CONFIG_INPUT_TOUCHSCREEN is not set +# CONFIG_INPUT_MISC is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_SERIO_SERPORT=y +CONFIG_SERIO_AMBAKMI=y +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_RT288X=y +CONFIG_SERIAL_8250_PERICOM=y +CONFIG_SERIAL_OF_PLATFORM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_AMBA_PL010 is not set +CONFIG_SERIAL_AMBA_PL011=y +CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +# CONFIG_SERIAL_JSM is not set +# CONFIG_SERIAL_MSM is not set +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +# CONFIG_SERIAL_ARC is not set +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +# CONFIG_NOZOMI is not set +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_HVC_DCC is not set +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +# CONFIG_IPMI_IPMB is not set +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +# CONFIG_SSIF_IPMI_BMC is not set +# CONFIG_IPMB_DEVICE_INTERFACE is not set +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_HW_RANDOM_HISI=y +CONFIG_HW_RANDOM_HISTB=y +CONFIG_HW_RANDOM_XGENE=m +CONFIG_HW_RANDOM_CAVIUM=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +CONFIG_HW_RANDOM_ARM_SMCCC_TRNG=y +CONFIG_HW_RANDOM_CN10K=y +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +# CONFIG_DEVPORT is not set +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +# CONFIG_TCG_TIS_I2C_ATMEL is not set +# CONFIG_TCG_TIS_I2C_INFINEON is not set +# CONFIG_TCG_TIS_I2C_NUVOTON is not set +CONFIG_TCG_ATMEL=m +# CONFIG_TCG_INFINEON is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +# CONFIG_TCG_TIS_ST33ZP24_I2C is not set +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +CONFIG_I2C_ARB_GPIO_CHALLENGE=m +CONFIG_I2C_MUX_GPIO=m +# CONFIG_I2C_MUX_GPMUX is not set +# CONFIG_I2C_MUX_LTC4306 is not set +CONFIG_I2C_MUX_PCA9541=m +CONFIG_I2C_MUX_PCA954x=m +CONFIG_I2C_MUX_PINCTRL=m +# CONFIG_I2C_MUX_REG is not set +# CONFIG_I2C_DEMUX_PINCTRL is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m + +# +# I2C Algorithms +# +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCF=m +CONFIG_I2C_ALGOPCA=m +# end of I2C Algorithms + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +# CONFIG_I2C_AMD756 is not set +# CONFIG_I2C_AMD8111 is not set +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_HIX5HD2 is not set +# CONFIG_I2C_I801 is not set +# CONFIG_I2C_ISCH is not set +# CONFIG_I2C_PIIX4 is not set +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +# CONFIG_I2C_SIS96X is not set +# CONFIG_I2C_VIA is not set +# CONFIG_I2C_VIAPRO is not set +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# +# CONFIG_I2C_SCMI is not set +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CADENCE is not set +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=m +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_HISI=m +# CONFIG_I2C_NOMADIK is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_QCOM_CCI is not set +CONFIG_I2C_QUP=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_VERSATILE=m +CONFIG_I2C_THUNDERX=m +# CONFIG_I2C_XILINX is not set +CONFIG_I2C_XLP9XX=m + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_MLXCPLD is not set +CONFIG_I2C_XGENE_SLIMPRO=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +CONFIG_I2C_SLAVE=y +CONFIG_I2C_SLAVE_EEPROM=m +# CONFIG_I2C_SLAVE_TESTUNIT is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +CONFIG_SPI_CADENCE=m +# CONFIG_SPI_CADENCE_QUADSPI is not set +CONFIG_SPI_DESIGNWARE=m +# CONFIG_SPI_DW_DMA is not set +# CONFIG_SPI_DW_PCI is not set +CONFIG_SPI_DW_MMIO=m +CONFIG_SPI_HISI_KUNPENG=m +CONFIG_SPI_HISI_SFC_V3XX=m +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +CONFIG_SPI_PL022=m +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_QCOM_QSPI is not set +CONFIG_SPI_QUP=y +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_THUNDERX is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +CONFIG_SPI_XLP=m +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=y +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +CONFIG_PINCTRL_MSM=y +# CONFIG_PINCTRL_IPQ5018 is not set +# CONFIG_PINCTRL_IPQ5332 is not set +# CONFIG_PINCTRL_IPQ8074 is not set +# CONFIG_PINCTRL_IPQ6018 is not set +# CONFIG_PINCTRL_IPQ9574 is not set +# CONFIG_PINCTRL_MDM9607 is not set +# CONFIG_PINCTRL_MSM8916 is not set +# CONFIG_PINCTRL_MSM8953 is not set +# CONFIG_PINCTRL_MSM8976 is not set +# CONFIG_PINCTRL_MSM8994 is not set +# CONFIG_PINCTRL_MSM8996 is not set +# CONFIG_PINCTRL_MSM8998 is not set +# CONFIG_PINCTRL_QCM2290 is not set +# CONFIG_PINCTRL_QCS404 is not set +CONFIG_PINCTRL_QDF2XXX=y +# CONFIG_PINCTRL_QDU1000 is not set +# CONFIG_PINCTRL_SA8775P is not set +# CONFIG_PINCTRL_SC7180 is not set +# CONFIG_PINCTRL_SC7280 is not set +# CONFIG_PINCTRL_SC8180X is not set +# CONFIG_PINCTRL_SC8280XP is not set +# CONFIG_PINCTRL_SDM660 is not set +# CONFIG_PINCTRL_SDM670 is not set +# CONFIG_PINCTRL_SDM845 is not set +# CONFIG_PINCTRL_SDX75 is not set +# CONFIG_PINCTRL_SM6115 is not set +# CONFIG_PINCTRL_SM6125 is not set +# CONFIG_PINCTRL_SM6350 is not set +# CONFIG_PINCTRL_SM6375 is not set +# CONFIG_PINCTRL_SM7150 is not set +# CONFIG_PINCTRL_SM8150 is not set +# CONFIG_PINCTRL_SM8250 is not set +# CONFIG_PINCTRL_SM8350 is not set +# CONFIG_PINCTRL_SM8450 is not set +# CONFIG_PINCTRL_SM8550 is not set +# CONFIG_PINCTRL_QCOM_SSBI_PMIC is not set +# CONFIG_PINCTRL_LPASS_LPI is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +CONFIG_GPIO_DWAPB=m +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +CONFIG_GPIO_GENERIC_PLATFORM=m +# CONFIG_GPIO_GRGPIO is not set +CONFIG_GPIO_HISI=m +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +# CONFIG_GPIO_MB86S7X is not set +CONFIG_GPIO_PL061=y +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_THUNDERX is not set +CONFIG_GPIO_XGENE=y +CONFIG_GPIO_XGENE_SB=m +# CONFIG_GPIO_XILINX is not set +CONFIG_GPIO_XLP=m +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_BRCMSTB is not set +CONFIG_POWER_RESET_GPIO=y +CONFIG_POWER_RESET_GPIO_RESTART=y +CONFIG_POWER_RESET_HISI=y +# CONFIG_POWER_RESET_MSM is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_REGULATOR is not set +CONFIG_POWER_RESET_RESTART=y +# CONFIG_POWER_RESET_VEXPRESS is not set +# CONFIG_POWER_RESET_XGENE is not set +CONFIG_POWER_RESET_SYSCON=y +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_MANAGER is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24190 is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +CONFIG_CHARGER_SMB347=m +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_RT9467 is not set +# CONFIG_CHARGER_RT9471 is not set +# CONFIG_CHARGER_UCS1002 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_AD7314=m +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM1177 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ARM_SCPI=m +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_FTSTEUTATES is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +CONFIG_SENSORS_G762=m +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +# CONFIG_SENSORS_IBMAEM is not set +# CONFIG_SENSORS_IBMPEX is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +CONFIG_SENSORS_POWR1220=m +# CONFIG_SENSORS_LINEAGE is not set +CONFIG_SENSORS_LTC2945=m +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +CONFIG_SENSORS_LTC4222=m +# CONFIG_SENSORS_LTC4245 is not set +CONFIG_SENSORS_LTC4260=m +# CONFIG_SENSORS_LTC4261 is not set +CONFIG_SENSORS_MAX1111=m +# CONFIG_SENSORS_MAX127 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +CONFIG_SENSORS_MAX31790=m +# CONFIG_SENSORS_MC34VR500 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +CONFIG_SENSORS_ADCXX=m +# CONFIG_SENSORS_LM63 is not set +CONFIG_SENSORS_LM70=m +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +CONFIG_SENSORS_NCT6683=m +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT6775_I2C is not set +CONFIG_SENSORS_NCT7802=m +CONFIG_SENSORS_NCT7904=m +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_PCF8591 is not set +CONFIG_PMBUS=m +# CONFIG_SENSORS_PMBUS is not set +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +# CONFIG_SENSORS_ADM1275 is not set +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +# CONFIG_SENSORS_LM25066 is not set +# CONFIG_SENSORS_LT7182S is not set +# CONFIG_SENSORS_LTC2978 is not set +CONFIG_SENSORS_LTC3815=m +# CONFIG_SENSORS_MAX15301 is not set +# CONFIG_SENSORS_MAX16064 is not set +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +CONFIG_SENSORS_MAX20751=m +# CONFIG_SENSORS_MAX31785 is not set +# CONFIG_SENSORS_MAX34440 is not set +# CONFIG_SENSORS_MAX8688 is not set +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +CONFIG_SENSORS_TPS40422=m +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +# CONFIG_SENSORS_UCD9000 is not set +# CONFIG_SENSORS_UCD9200 is not set +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +# CONFIG_SENSORS_ZL6100 is not set +CONFIG_SENSORS_PWM_FAN=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +CONFIG_SENSORS_SHTC1=m +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_STTS751 is not set +CONFIG_SENSORS_ADC128D818=m +# CONFIG_SENSORS_ADS7828 is not set +CONFIG_SENSORS_ADS7871=m +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +CONFIG_SENSORS_TC74=m +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +CONFIG_SENSORS_TMP103=m +# CONFIG_SENSORS_TMP108 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VEXPRESS=m +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83773G is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set +CONFIG_SENSORS_XGENE=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=y +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_CPU_THERMAL=y +CONFIG_CPU_FREQ_THERMAL=y +# CONFIG_THERMAL_EMULATION is not set +# CONFIG_THERMAL_MMIO is not set +CONFIG_HISI_THERMAL=m + +# +# Qualcomm thermal drivers +# +# CONFIG_QCOM_LMH is not set +# end of Qualcomm thermal drivers + +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +# CONFIG_WDAT_WDT is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_XILINX_WINDOW_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +CONFIG_ARM_SP805_WATCHDOG=m +CONFIG_ARM_SBSA_WATCHDOG=m +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_QCOM_WDT is not set +# CONFIG_ARM_SMC_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_HP_WATCHDOG is not set +CONFIG_MARVELL_GTI_WDT=y +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=m +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_MFD_HI655X_PMIC is not set +# CONFIG_LPC_ICH is not set +# CONFIG_LPC_SCH is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +# CONFIG_MFD_VIPERBOARD is not set +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_QCOM_RPM is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SI476X_CORE is not set +# CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +# CONFIG_MFD_VX855 is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_VEXPRESS_SYSREG is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +CONFIG_REGULATOR=y +# CONFIG_REGULATOR_DEBUG is not set +# CONFIG_REGULATOR_FIXED_VOLTAGE is not set +# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set +# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set +# CONFIG_REGULATOR_88PG86X is not set +# CONFIG_REGULATOR_ACT8865 is not set +# CONFIG_REGULATOR_AD5398 is not set +# CONFIG_REGULATOR_AW37503 is not set +# CONFIG_REGULATOR_DA9121 is not set +# CONFIG_REGULATOR_DA9210 is not set +# CONFIG_REGULATOR_DA9211 is not set +# CONFIG_REGULATOR_FAN53555 is not set +# CONFIG_REGULATOR_FAN53880 is not set +# CONFIG_REGULATOR_GPIO is not set +# CONFIG_REGULATOR_ISL9305 is not set +# CONFIG_REGULATOR_ISL6271A is not set +# CONFIG_REGULATOR_LP3971 is not set +# CONFIG_REGULATOR_LP3972 is not set +# CONFIG_REGULATOR_LP872X is not set +# CONFIG_REGULATOR_LP8755 is not set +# CONFIG_REGULATOR_LTC3589 is not set +# CONFIG_REGULATOR_LTC3676 is not set +# CONFIG_REGULATOR_MAX1586 is not set +# CONFIG_REGULATOR_MAX77857 is not set +# CONFIG_REGULATOR_MAX8649 is not set +# CONFIG_REGULATOR_MAX8660 is not set +# CONFIG_REGULATOR_MAX8893 is not set +# CONFIG_REGULATOR_MAX8952 is not set +# CONFIG_REGULATOR_MAX8973 is not set +# CONFIG_REGULATOR_MAX20086 is not set +# CONFIG_REGULATOR_MAX20411 is not set +# CONFIG_REGULATOR_MAX77826 is not set +# CONFIG_REGULATOR_MCP16502 is not set +# CONFIG_REGULATOR_MP5416 is not set +# CONFIG_REGULATOR_MP8859 is not set +# CONFIG_REGULATOR_MP886X is not set +# CONFIG_REGULATOR_MPQ7920 is not set +# CONFIG_REGULATOR_MT6311 is not set +# CONFIG_REGULATOR_PCA9450 is not set +# CONFIG_REGULATOR_PF8X00 is not set +# CONFIG_REGULATOR_PFUZE100 is not set +# CONFIG_REGULATOR_PV88060 is not set +# CONFIG_REGULATOR_PV88080 is not set +# CONFIG_REGULATOR_PV88090 is not set +# CONFIG_REGULATOR_PWM is not set +# CONFIG_REGULATOR_QCOM_REFGEN is not set +# CONFIG_REGULATOR_RAA215300 is not set +# CONFIG_REGULATOR_RASPBERRYPI_TOUCHSCREEN_ATTINY is not set +# CONFIG_REGULATOR_RT4801 is not set +# CONFIG_REGULATOR_RT4803 is not set +# CONFIG_REGULATOR_RT5190A is not set +# CONFIG_REGULATOR_RT5739 is not set +# CONFIG_REGULATOR_RT5759 is not set +# CONFIG_REGULATOR_RT6160 is not set +# CONFIG_REGULATOR_RT6190 is not set +# CONFIG_REGULATOR_RT6245 is not set +# CONFIG_REGULATOR_RTQ2134 is not set +# CONFIG_REGULATOR_RTMV20 is not set +# CONFIG_REGULATOR_RTQ6752 is not set +# CONFIG_REGULATOR_RTQ2208 is not set +# CONFIG_REGULATOR_SLG51000 is not set +# CONFIG_REGULATOR_SY8106A is not set +# CONFIG_REGULATOR_SY8824X is not set +# CONFIG_REGULATOR_SY8827N is not set +# CONFIG_REGULATOR_TPS51632 is not set +# CONFIG_REGULATOR_TPS62360 is not set +# CONFIG_REGULATOR_TPS6286X is not set +# CONFIG_REGULATOR_TPS6287X is not set +# CONFIG_REGULATOR_TPS65023 is not set +# CONFIG_REGULATOR_TPS6507X is not set +# CONFIG_REGULATOR_TPS65132 is not set +# CONFIG_REGULATOR_TPS6524X is not set +# CONFIG_REGULATOR_VCTRL is not set +# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR_VQMMC_IPQ4019 is not set +# CONFIG_RC_CORE is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_SUPPORT is not set +# end of CEC support + +# CONFIG_MEDIA_SUPPORT is not set + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +CONFIG_DRM=m +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_I2C_NXP_TDA998X=m +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_HDLCD is not set +# CONFIG_DRM_MALI_DISPLAY is not set +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y + +# +# ACP (Audio CoProcessor) Configuration +# +CONFIG_DRM_AMD_ACP=y +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_HSA_AMD=y +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +# CONFIG_DRM_VMWGFX is not set +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +# CONFIG_DRM_MSM is not set +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SHARP_LS037V7DW01 is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TD043MTEA1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_HISI_HIBMC=m +# CONFIG_DRM_HISI_KIRIN is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_PL111 is not set +# CONFIG_DRM_LIMA is not set +# CONFIG_DRM_PANFROST is not set +# CONFIG_DRM_TIDSS is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_ARMCLCD is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_SIMPLE=y +CONFIG_FB_SSD1307=m +# CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=m +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +CONFIG_BACKLIGHT_PWM=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +CONFIG_BACKLIGHT_GPIO=m +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +# CONFIG_HID_ASUS is not set +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +# CONFIG_HID_CMEDIA is not set +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=m +# CONFIG_HID_SENSOR_CUSTOM_SENSOR is not set +# CONFIG_HID_ALPS is not set +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +CONFIG_USB_ULPI_BUS=m +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +# CONFIG_USB_XHCI_DBGCAP is not set +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HISTB is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=m +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +# CONFIG_USB_SERIAL_CONSOLE is not set +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_SIMPLE=m +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_USB_QCOM_EUD is not set +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +CONFIG_USB_CHAOSKEY=m +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +# CONFIG_USB_SPEEDTOUCH is not set +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# CONFIG_USB_ULPI is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +# CONFIG_TYPEC_FUSB302 is not set +# CONFIG_TYPEC_QCOM_PMIC is not set +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +# CONFIG_PWRSEQ_EMMC is not set +# CONFIG_PWRSEQ_SIMPLE is not set +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_ARMMMCI=m +CONFIG_MMC_STM32_SDMMC=y +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +# CONFIG_MMC_SDHCI_MSM is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_DW=m +CONFIG_MMC_DW_PLTFM=m +CONFIG_MMC_DW_BLUEFIELD=m +# CONFIG_MMC_DW_EXYNOS is not set +# CONFIG_MMC_DW_HI3798CV200 is not set +# CONFIG_MMC_DW_K3 is not set +# CONFIG_MMC_DW_PCI is not set +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +CONFIG_MMC_TOSHIBA_PCI=m +CONFIG_MMC_MTK=m +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=m +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_REGULATOR is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_LT3593=m +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# +# CONFIG_LEDS_AAT1290 is not set +# CONFIG_LEDS_AS3645A is not set +# CONFIG_LEDS_KTD2692 is not set +# CONFIG_LEDS_LM3601X is not set +# CONFIG_LEDS_RT4505 is not set +# CONFIG_LEDS_RT8515 is not set +# CONFIG_LEDS_SGM3140 is not set + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +# CONFIG_LEDS_TRIGGER_DISK is not set +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +# CONFIG_LEDS_TRIGGER_AUDIO is not set +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HNS=m +CONFIG_INFINIBAND_HNS_HIP08=y +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_GHES=y +CONFIG_EDAC_THUNDERX=m +CONFIG_EDAC_XGENE=m +# CONFIG_EDAC_DMC520 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +CONFIG_RTC_DRV_ABB5ZES3=m +# CONFIG_RTC_DRV_ABEOZ9 is not set +CONFIG_RTC_DRV_ABX80X=m +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1374_WDT=y +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF85063=m +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8010=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +CONFIG_RTC_DRV_M41T93=m +CONFIG_RTC_DRV_M41T94=m +# CONFIG_RTC_DRV_DS1302 is not set +CONFIG_RTC_DRV_DS1305=m +CONFIG_RTC_DRV_DS1343=m +CONFIG_RTC_DRV_DS1347=m +CONFIG_RTC_DRV_DS1390=m +# CONFIG_RTC_DRV_MAX6916 is not set +CONFIG_RTC_DRV_R9701=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_RS5C348=m +CONFIG_RTC_DRV_MAX6902=m +CONFIG_RTC_DRV_PCF2123=m +CONFIG_RTC_DRV_MCP795=m +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +CONFIG_RTC_DRV_PCF2127=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1685_FAMILY=m +CONFIG_RTC_DRV_DS1685=y +# CONFIG_RTC_DRV_DS1689 is not set +# CONFIG_RTC_DRV_DS17285 is not set +# CONFIG_RTC_DRV_DS17485 is not set +# CONFIG_RTC_DRV_DS17885 is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=y +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_PL030 is not set +CONFIG_RTC_DRV_PL031=y +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +# CONFIG_RTC_DRV_XGENE is not set +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_AMBA_PL08X is not set +# CONFIG_BCM_SBA_RAID is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_FSL_QDMA is not set +# CONFIG_HISI_DMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_K3_DMA is not set +# CONFIG_MV_XOR_V2 is not set +# CONFIG_PL330_DMA is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XGENE_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_BAM_DMA is not set +# CONFIG_QCOM_GPI_DMA is not set +CONFIG_QCOM_HIDMA_MGMT=m +CONFIG_QCOM_HIDMA=m +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=m +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# CONFIG_HISI_ACC_VFIO_PCI is not set +# end of VFIO support for PCI devices + +# +# VFIO support for platform devices +# +CONFIG_VFIO_PLATFORM_BASE=m +CONFIG_VFIO_PLATFORM=m +# CONFIG_VFIO_AMBA is not set + +# +# VFIO platform reset drivers +# +# CONFIG_VFIO_PLATFORM_CALXEDAXGMAC_RESET is not set +# CONFIG_VFIO_PLATFORM_AMDXGBE_RESET is not set +# end of VFIO platform reset drivers +# end of VFIO support for platform devices + +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=m +CONFIG_VIRTIO_PCI_LIB=m +CONFIG_VIRTIO_PCI_LIB_LEGACY=m +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=m +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# CONFIG_HYPERV is not set +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +CONFIG_STAGING=y +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +# CONFIG_GOLDFISH is not set +CONFIG_CHROME_PLATFORMS=y +# CONFIG_CHROMEOS_ACPI is not set +# CONFIG_CHROMEOS_TBMC is not set +# CONFIG_CROS_EC is not set +# CONFIG_CROS_KBD_LED_BACKLIGHT is not set +# CONFIG_CROS_HPS_I2C is not set +# CONFIG_CHROMEOS_PRIVACY_SCREEN is not set +# CONFIG_MELLANOX_PLATFORM is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y + +# +# Clock driver for ARM Reference designs +# +# CONFIG_CLK_ICST is not set +CONFIG_CLK_SP810=y +CONFIG_CLK_VEXPRESS_OSC=y +# end of Clock driver for ARM Reference designs + +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +CONFIG_COMMON_CLK_SCPI=m +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +CONFIG_COMMON_CLK_HI3516CV300=y +CONFIG_COMMON_CLK_HI3519=y +CONFIG_COMMON_CLK_HI3559A=y +CONFIG_COMMON_CLK_HI3660=y +CONFIG_COMMON_CLK_HI3670=y +CONFIG_COMMON_CLK_HI3798CV200=y +# CONFIG_COMMON_CLK_HI6220 is not set +CONFIG_RESET_HISI=y +CONFIG_STUB_CLK_HI3660=y +# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +CONFIG_HWSPINLOCK=y +# CONFIG_HWSPINLOCK_QCOM is not set + +# +# Clock Source drivers +# +CONFIG_TIMER_OF=y +CONFIG_TIMER_ACPI=y +CONFIG_TIMER_PROBE=y +CONFIG_CLKSRC_MMIO=y +CONFIG_ARM_ARCH_TIMER=y +CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y +CONFIG_FSL_ERRATUM_A008585=y +CONFIG_HISILICON_ERRATUM_161010101=y +CONFIG_ARM64_ERRATUM_858921=y +CONFIG_ARM_TIMER_SP804=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_ARM_MHU=m +# CONFIG_ARM_MHU_V2 is not set +# CONFIG_PLATFORM_MHU is not set +# CONFIG_PL320_MBOX is not set +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_HI3660_MBOX=y +CONFIG_HI6220_MBOX=y +# CONFIG_MAILBOX_TEST is not set +# CONFIG_QCOM_APCS_IPC is not set +CONFIG_XGENE_SLIMPRO_MBOX=m +# CONFIG_QCOM_IPCC is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +CONFIG_IOMMU_IO_PGTABLE_LPAE=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set +# CONFIG_IOMMU_IO_PGTABLE_ARMV7S is not set +# CONFIG_IOMMU_IO_PGTABLE_DART is not set +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +# CONFIG_IOMMUFD is not set +CONFIG_ARM_SMMU=y +# CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS is not set +CONFIG_ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT=y +CONFIG_ARM_SMMU_QCOM=y +# CONFIG_ARM_SMMU_QCOM_DEBUG is not set +CONFIG_ARM_SMMU_V3=y +CONFIG_ARM_SMMU_V3_SVA=y +# CONFIG_QCOM_IOMMU is not set +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# CONFIG_SOC_BRCMSTB is not set +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# CONFIG_QUICC_ENGINE is not set +# CONFIG_FSL_RCPM is not set +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# CONFIG_A64FX_DIAG is not set +# end of fujitsu SoC drivers + +# +# Hisilicon SoC drivers +# +CONFIG_KUNPENG_HCCS=m +# end of Hisilicon SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# CONFIG_QCOM_AOSS_QMP is not set +# CONFIG_QCOM_COMMAND_DB is not set +# CONFIG_QCOM_CPR is not set +# CONFIG_QCOM_GENI_SE is not set +# CONFIG_QCOM_GSBI is not set +# CONFIG_QCOM_LLCC is not set +CONFIG_QCOM_KRYO_L2_ACCESSORS=y +# CONFIG_QCOM_OCMEM is not set +# CONFIG_QCOM_RAMP_CTRL is not set +# CONFIG_QCOM_RMTFS_MEM is not set +# CONFIG_QCOM_RPM_MASTER_STATS is not set +# CONFIG_QCOM_RPMH is not set +# CONFIG_QCOM_SMEM is not set +# CONFIG_QCOM_SPM is not set +# CONFIG_QCOM_ICC_BWMON is not set +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +CONFIG_EXTCON=y + +# +# Extcon Device Drivers +# +# CONFIG_EXTCON_FSA9480 is not set +CONFIG_EXTCON_GPIO=m +# CONFIG_EXTCON_MAX3355 is not set +# CONFIG_EXTCON_PTN5150 is not set +# CONFIG_EXTCON_QCOM_SPMI_MISC is not set +# CONFIG_EXTCON_RT8973A is not set +# CONFIG_EXTCON_SM5502 is not set +# CONFIG_EXTCON_USB_GPIO is not set +# CONFIG_EXTCON_USBC_TUSB320 is not set +# CONFIG_MEMORY is not set +# CONFIG_IIO is not set +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_HIBVT is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +CONFIG_ARM_GIC=y +CONFIG_ARM_GIC_MAX_NR=1 +CONFIG_ARM_GIC_V2M=y +CONFIG_ARM_GIC_V3=y +CONFIG_ARM_GIC_V3_ITS=y +CONFIG_ARM_GIC_V3_ITS_PCI=y +CONFIG_ARM_GIC_PHYTIUM_2500=y +# CONFIG_AL_FIC is not set +CONFIG_HISILICON_IRQ_MBIGEN=y +# CONFIG_XILINX_INTC is not set +CONFIG_PARTITION_PERCPU=y +CONFIG_QCOM_IRQ_COMBINER=y +# CONFIG_QCOM_PDC is not set +# CONFIG_QCOM_MPM is not set +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_QCOM_AOSS is not set +# CONFIG_RESET_QCOM_PDC is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set +# CONFIG_COMMON_RESET_HI3660 is not set +CONFIG_COMMON_RESET_HI6220=m + +# +# PHY Subsystem +# +CONFIG_GENERIC_PHY=y +CONFIG_PHY_XGENE=y +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +CONFIG_PHY_HI6220_USB=m +# CONFIG_PHY_HI3660_USB is not set +# CONFIG_PHY_HI3670_USB is not set +# CONFIG_PHY_HI3670_PCIE is not set +# CONFIG_PHY_HISTB_COMBPHY is not set +# CONFIG_PHY_HISI_INNO_USB2 is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_QCOM_APQ8064_SATA is not set +# CONFIG_PHY_QCOM_EDP is not set +# CONFIG_PHY_QCOM_IPQ4019_USB is not set +# CONFIG_PHY_QCOM_IPQ806X_SATA is not set +# CONFIG_PHY_QCOM_PCIE2 is not set +# CONFIG_PHY_QCOM_QMP is not set +# CONFIG_PHY_QCOM_QUSB2 is not set +# CONFIG_PHY_QCOM_SNPS_EUSB2 is not set +# CONFIG_PHY_QCOM_EUSB2_REPEATER is not set +# CONFIG_PHY_QCOM_M31_USB is not set +# CONFIG_PHY_QCOM_USB_HS is not set +# CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2 is not set +# CONFIG_PHY_QCOM_USB_HSIC is not set +# CONFIG_PHY_QCOM_USB_HS_28NM is not set +# CONFIG_PHY_QCOM_USB_SS is not set +# CONFIG_PHY_QCOM_IPQ806X_USB is not set +# CONFIG_PHY_QCOM_SGMII_ETH is not set +# CONFIG_PHY_TUSB1210 is not set +# end of PHY Subsystem + +# CONFIG_POWERCAP is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_ARM_CCI_PMU is not set +CONFIG_ARM_CCN=y +CONFIG_ARM_CMN=y +CONFIG_ARM_PMU=y +CONFIG_ARM_PMU_ACPI=y +CONFIG_ARM_SMMU_V3_PMU=m +CONFIG_ARM_PMUV3=y +CONFIG_ARM_DSU_PMU=y +CONFIG_QCOM_L2_PMU=y +CONFIG_QCOM_L3_PMU=y +CONFIG_THUNDERX2_PMU=m +CONFIG_XGENE_PMU=y +CONFIG_ARM_SPE_PMU=m +# CONFIG_ARM_DMC620_PMU is not set +# CONFIG_MARVELL_CN10K_TAD_PMU is not set +CONFIG_ALIBABA_UNCORE_DRW_PMU=m +CONFIG_HISI_PMU=m +CONFIG_HISI_PCIE_PMU=m +# CONFIG_HNS3_PMU is not set +# CONFIG_MARVELL_CN10K_DDR_PMU is not set +CONFIG_DWC_PCIE_PMU=m +# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=m +CONFIG_BLK_DEV_PMEM=m +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=m +CONFIG_BTT=y +CONFIG_ND_PFN=m +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_OF_PMEM=m +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_PMEM=m +CONFIG_DEV_DAX_HMEM=m +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +# CONFIG_DEV_DAX_KMEM is not set +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_QCOM_QFPROM is not set +# CONFIG_NVMEM_QCOM_SEC_QFPROM is not set +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +CONFIG_STM=m +# CONFIG_STM_PROTO_BASIC is not set +# CONFIG_STM_PROTO_SYS_T is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_STM_SOURCE_HEARTBEAT is not set +# CONFIG_STM_SOURCE_FTRACE is not set +# CONFIG_INTEL_TH is not set +# CONFIG_HISI_PTT is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_TEE=m +# CONFIG_OPTEE is not set +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# CONFIG_CDX_BUS is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=m +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=m +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=m +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +# CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +# CONFIG_DLM_DEBUG is not set +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_CURVE25519=m +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=y +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y +# CONFIG_CRYPTO_NHPOLY1305_NEON is not set +CONFIG_CRYPTO_CHACHA20_NEON=m + +# +# Accelerated Cryptographic Algorithms for CPU (arm64) +# +CONFIG_CRYPTO_GHASH_ARM64_CE=m +CONFIG_CRYPTO_POLY1305_NEON=m +CONFIG_CRYPTO_SHA1_ARM64_CE=m +CONFIG_CRYPTO_SHA256_ARM64=m +CONFIG_CRYPTO_SHA2_ARM64_CE=m +# CONFIG_CRYPTO_SHA512_ARM64 is not set +# CONFIG_CRYPTO_SHA512_ARM64_CE is not set +# CONFIG_CRYPTO_SHA3_ARM64 is not set +CONFIG_CRYPTO_SM3_NEON=m +CONFIG_CRYPTO_SM3_ARM64_CE=m +# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set +CONFIG_CRYPTO_AES_ARM64=y +CONFIG_CRYPTO_AES_ARM64_CE=y +CONFIG_CRYPTO_AES_ARM64_CE_BLK=y +CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y +CONFIG_CRYPTO_AES_ARM64_BS=m +CONFIG_CRYPTO_SM4_ARM64_CE=m +CONFIG_CRYPTO_SM4_ARM64_CE_BLK=m +CONFIG_CRYPTO_SM4_ARM64_NEON_BLK=m +CONFIG_CRYPTO_AES_ARM64_CE_CCM=y +CONFIG_CRYPTO_SM4_ARM64_CE_CCM=m +CONFIG_CRYPTO_SM4_ARM64_CE_GCM=m +CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m +# end of Accelerated Cryptographic Algorithms for CPU (arm64) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +# CONFIG_CRYPTO_DEV_CCP is not set +CONFIG_CRYPTO_DEV_CPT=m +CONFIG_CAVIUM_CPT=m +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_OCTEONTX_CPT is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CAVIUM_ZIP=m +# CONFIG_CRYPTO_DEV_QCE is not set +# CONFIG_CRYPTO_DEV_QCOM_RNG is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +CONFIG_CRYPTO_DEV_HISI_SEC=m +CONFIG_CRYPTO_DEV_HISI_SEC2=m +CONFIG_CRYPTO_DEV_HISI_QM=m +CONFIG_CRYPTO_DEV_HISI_ZIP=m +CONFIG_CRYPTO_DEV_HISI_HPRE=m +CONFIG_CRYPTO_DEV_HISI_TRNG=m +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_LINEAR_RANGES=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y +CONFIG_INDIRECT_PIO=y +# CONFIG_TRACE_MMIO_ACCESS is not set + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=y +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=y +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y +CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_ARCH_HAS_SETUP_DMA_OPS=y +CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y +CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y +CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_NONCOHERENT_MMAP=y +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_DIRECT_REMAP=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=64 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_SPLIT=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_IOREMAP=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_FRAME_POINTER=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x0 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_HW_TAGS=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +CONFIG_KFENCE_DEFERRABLE=y +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +CONFIG_SDEI_WATCHDOG=y +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_NOP_TRACER=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_CALL_OPS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FUNCTION_PROFILER is not set +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# arm64 Debugging +# +CONFIG_PID_IN_CONTEXTIDR=y +# CONFIG_DEBUG_EFI is not set +# CONFIG_ARM64_RELOC_TEST is not set +CONFIG_CORESIGHT=m +CONFIG_CORESIGHT_LINKS_AND_SINKS=m +CONFIG_CORESIGHT_LINK_AND_SINK_TMC=m +CONFIG_CORESIGHT_CATU=m +CONFIG_CORESIGHT_SINK_TPIU=m +CONFIG_CORESIGHT_SINK_ETBV10=m +CONFIG_CORESIGHT_SOURCE_ETM4X=m +CONFIG_ETM4X_IMPDEF_FEATURE=y +CONFIG_CORESIGHT_STM=m +CONFIG_CORESIGHT_CPU_DEBUG=m +# CONFIG_CORESIGHT_CPU_DEBUG_DEFAULT_ON is not set +CONFIG_CORESIGHT_CTI=m +CONFIG_CORESIGHT_CTI_INTEGRATION_REGS=y +# CONFIG_CORESIGHT_TRBE is not set +# CONFIG_ULTRASOC_SMB is not set +# CONFIG_CORESIGHT_TPDM is not set +# CONFIG_CORESIGHT_TPDA is not set +# CONFIG_CORESIGHT_DUMMY is not set +# end of arm64 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_FREE_PAGES is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/arm64/include/asm/asm-extable.h b/arch/arm64/include/asm/asm-extable.h index 980d1dd8e1a32bfc249af74a8719bc56dfee4111..819044fefbe7641d3f1246503cff4cde5f61c1d9 100644 --- a/arch/arm64/include/asm/asm-extable.h +++ b/arch/arm64/include/asm/asm-extable.h @@ -10,6 +10,7 @@ #define EX_TYPE_UACCESS_ERR_ZERO 2 #define EX_TYPE_KACCESS_ERR_ZERO 3 #define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 +#define EX_TYPE_COPY_MC_PAGE_ERR_ZERO 5 /* Data fields for EX_TYPE_UACCESS_ERR_ZERO */ #define EX_DATA_REG_ERR_SHIFT 0 @@ -51,6 +52,16 @@ #define _ASM_EXTABLE_UACCESS(insn, fixup) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, wzr, wzr) +#define _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, err, zero) \ + __ASM_EXTABLE_RAW(insn, fixup, \ + EX_TYPE_COPY_MC_PAGE_ERR_ZERO, \ + ( \ + EX_DATA_REG(ERR, err) | \ + EX_DATA_REG(ZERO, zero) \ + )) + +#define _ASM_EXTABLE_COPY_MC_PAGE(insn, fixup) \ + _ASM_EXTABLE_COPY_MC_PAGE_ERR_ZERO(insn, fixup, wzr, wzr) /* * Create an exception table entry for uaccess `insn`, which will branch to `fixup` * when an unhandled fault is taken. @@ -59,6 +70,10 @@ _ASM_EXTABLE_UACCESS(\insn, \fixup) .endm + .macro _asm_extable_copy_mc_page, insn, fixup + _ASM_EXTABLE_COPY_MC_PAGE(\insn, \fixup) + .endm + /* * Create an exception table entry for `insn` if `fixup` is provided. Otherwise * do nothing. diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 376a980f2bad08bb5a823ef4d3e4eccb96892080..547ab2f858886a71b0b19dca90655a41d5566d56 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -154,6 +154,10 @@ lr .req x30 // link register #define CPU_LE(code...) code #endif +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_extable_copy_mc_page 9999b, l + /* * Define a macro that constructs a 64-bit value by concatenating two * 32-bit registers. Note that on big endian systems the order of the diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 5bba393760557d0b390ff39927de19d861af006d..24c2564268e5d259eb03093f03032ffc7cb7801c 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -619,6 +619,13 @@ static inline bool id_aa64pfr1_sme(u64 pfr1) return val > 0; } +static inline bool id_aa64pfr0_mpam(u64 pfr0) +{ + u32 val = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + + return val > 0; +} + static inline bool id_aa64pfr1_mte(u64 pfr1) { u32 val = cpuid_feature_extract_unsigned_field(pfr1, ID_AA64PFR1_EL1_MTE_SHIFT); diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 7c7493cb571f97bf98b0b4841aeb756d43990718..a938e7b32761c878d57bea1b738e30892ebff258 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -54,6 +54,8 @@ #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 +#define ARM_CPU_IMP_PHYTIUM 0x70 + #define ARM_CPU_IMP_BRCM 0x42 #define ARM_CPU_IMP_QCOM 0x51 #define ARM_CPU_IMP_NVIDIA 0x4E @@ -88,6 +90,11 @@ #define APM_CPU_PART_XGENE 0x000 #define APM_CPU_VAR_POTENZA 0x00 +#define PHYTIUM_CPU_PART_1500A 0X660 +#define PHYTIUM_CPU_PART_2000AHK 0X661 +#define PHYTIUM_CPU_PART_2000PLUS 0X662 +#define PHYTIUM_CPU_PART_2004 0X663 +#define PHYTIUM_CPU_PART_2500 0X663 #define CAVIUM_CPU_PART_THUNDERX 0x0A1 #define CAVIUM_CPU_PART_THUNDERX_81XX 0x0A2 @@ -140,6 +147,12 @@ #define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72) #define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73) #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75) +#define MIDR_FT_1500A MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_1500A) +#define MIDR_FT_2000AHK MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000AHK) +#define MIDR_FT_2000PLUS MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2000PLUS) +#define MIDR_FT_2004 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2004) +#define MIDR_FT_2500 MIDR_CPU_MODEL(ARM_CPU_IMP_PHYTIUM, PHYTIUM_CPU_PART_2500) + #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35) #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55) #define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b7afaa026842b7ebce94228e6031ce99f5cbb2a8..1e2181820a0afc3aefe0127dbcaf072a12c2da81 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -208,6 +208,21 @@ msr spsr_el2, x0 .endm +.macro __init_el2_mpam +#ifdef CONFIG_ARM64_MPAM + /* Memory Partioning And Monitoring: disable EL2 traps */ + mrs x1, id_aa64pfr0_el1 + ubfx x0, x1, #ID_AA64PFR0_EL1_MPAM_SHIFT, #4 + cbz x0, .Lskip_mpam_\@ // skip if no MPAM + msr_s SYS_MPAM2_EL2, xzr // use the default partition + // and disable lower traps + mrs_s x0, SYS_MPAMIDR_EL1 + tbz x0, #17, .Lskip_mpam_\@ // skip if no MPAMHCR reg + msr_s SYS_MPAMHCR_EL2, xzr // clear TRAP_MPAMIDR_EL1 -> EL2 +.Lskip_mpam_\@: +#endif /* CONFIG_ARM64_MPAM */ +.endm + /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as @@ -225,6 +240,7 @@ __init_el2_stage2 __init_el2_gicv3 __init_el2_hstr + __init_el2_mpam __init_el2_nvhe_idregs __init_el2_cptr __init_el2_fgt diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h index 72b0e71cc3de88bbc3673d19feeb56e924ae2069..f80ebd0addfdea9af592e2c42712dd4c05dacd9a 100644 --- a/arch/arm64/include/asm/extable.h +++ b/arch/arm64/include/asm/extable.h @@ -46,4 +46,5 @@ bool ex_handler_bpf(const struct exception_table_entry *ex, #endif /* !CONFIG_BPF_JIT */ bool fixup_exception(struct pt_regs *regs); +bool fixup_exception_mc(struct pt_regs *regs); #endif diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index a81937fae9f6da772c6a3f0fc94fb19f019525a0..44994e2a6d88e8920fd8d8bb45d343777f7772f1 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,25 +8,38 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#ifdef CONFIG_KFENCE +#include + #include -static inline bool arch_kfence_init_pool(void) { return true; } +extern bool kfence_early_init; -static inline bool kfence_protect_page(unsigned long addr, bool protect) +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { - set_memory_valid(addr, 1, !protect); + unsigned long addr = (unsigned long)kpa->addr; + + if (!can_set_block_and_cont_map()) + return false; + + /* + * If the allocated range is block and contiguous mapping, split it + * to pte level before re-initializing kfence pages. + */ + split_linear_mapping_after_init(addr, kpa->pool_size, PAGE_KERNEL); return true; } -#ifdef CONFIG_KFENCE -extern bool kfence_early_init; -static inline bool arm64_kfence_can_set_direct_map(void) +static inline bool kfence_protect_page(unsigned long addr, bool protect) { - return !kfence_early_init; + set_memory_valid(addr, 1, !protect); + + return true; } -#else /* CONFIG_KFENCE */ -static inline bool arm64_kfence_can_set_direct_map(void) { return false; } + +static inline bool arch_kfence_free_pool(unsigned long addr) { return false; } + #endif /* CONFIG_KFENCE */ #endif /* __ASM_KFENCE_H */ diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 1095c6647e9665267e6aa67bac2dd7bb11b091f1..2d8b243a86cd4aea48744f0563996b6872f45d18 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -104,6 +104,7 @@ #define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) +#define MPAMHCR_HOST_FLAGS 0 /* TCR_EL2 Registers bits */ #define TCR_EL2_RES1 ((1U << 31) | (1 << 23)) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 94b68850cb9f009acbe513f97cfd0dae30ae8020..bad28b274467ab789ac3cd0fdb279126687d6b21 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -12,6 +12,10 @@ #define USER_ASID_FLAG (UL(1) << USER_ASID_BIT) #define TTBR_ASID_MASK (UL(0xffff) << 48) +#define NO_BLOCK_MAPPINGS BIT(0) +#define NO_CONT_MAPPINGS BIT(1) +#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ + #ifndef __ASSEMBLY__ #include @@ -72,7 +76,9 @@ extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); extern bool kaslr_requires_kpti(void); - +extern void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot); +extern void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot); #define INIT_MM_CONTEXT(name) \ .pgd = init_pg_dir, diff --git a/arch/arm64/include/asm/mpam.h b/arch/arm64/include/asm/mpam.h new file mode 100644 index 0000000000000000000000000000000000000000..9abe1fe58c34317c16bbc3f4aa6da2fabad74639 --- /dev/null +++ b/arch/arm64/include/asm/mpam.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#ifndef __ASM__MPAM_H +#define __ASM__MPAM_H + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* CPU Registers */ +#define MPAM_SYSREG_EN BIT_ULL(63) +#define MPAM_SYSREG_TRAP_IDR BIT_ULL(58) +#define MPAM_SYSREG_TRAP_MPAM0_EL1 BIT_ULL(49) +#define MPAM_SYSREG_TRAP_MPAM1_EL1 BIT_ULL(48) +#define MPAM_SYSREG_PMG_D GENMASK(47, 40) +#define MPAM_SYSREG_PMG_I GENMASK(39, 32) +#define MPAM_SYSREG_PARTID_D GENMASK(31, 16) +#define MPAM_SYSREG_PARTID_I GENMASK(15, 0) + +#define MPAMIDR_PMG_MAX GENMASK(40, 32) +#define MPAMIDR_PMG_MAX_SHIFT 32 +#define MPAMIDR_PMG_MAX_LEN 8 +#define MPAMIDR_VPMR_MAX GENMASK(20, 18) +#define MPAMIDR_VPMR_MAX_SHIFT 18 +#define MPAMIDR_VPMR_MAX_LEN 3 +#define MPAMIDR_HAS_HCR BIT(17) +#define MPAMIDR_HAS_HCR_SHIFT 17 +#define MPAMIDR_PARTID_MAX GENMASK(15, 0) +#define MPAMIDR_PARTID_MAX_SHIFT 0 +#define MPAMIDR_PARTID_MAX_LEN 15 + +#define MPAMHCR_EL0_VPMEN BIT_ULL(0) +#define MPAMHCR_EL1_VPMEN BIT_ULL(1) +#define MPAMHCR_GSTAPP_PLK BIT_ULL(8) +#define MPAMHCR_TRAP_MPAMIDR BIT_ULL(31) + +/* Properties of the VPM registers */ +#define MPAM_VPM_NUM_REGS 8 +#define MPAM_VPM_PARTID_LEN 16 +#define MPAM_VPM_PARTID_MASK 0xffff +#define MPAM_VPM_REG_LEN 64 +#define MPAM_VPM_PARTIDS_PER_REG (MPAM_VPM_REG_LEN / MPAM_VPM_PARTID_LEN) +#define MPAM_VPM_MAX_PARTID (MPAM_VPM_NUM_REGS * MPAM_VPM_PARTIDS_PER_REG) + + +DECLARE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DECLARE_STATIC_KEY_FALSE(mpam_enabled); +DECLARE_PER_CPU(u64, arm64_mpam_default); +DECLARE_PER_CPU(u64, arm64_mpam_current); + +/* check whether all CPUs have MPAM support */ +static __always_inline bool mpam_cpus_have_feature(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return cpus_have_final_cap(ARM64_MPAM); + return false; +} + +/* check whether all CPUs have MPAM virtualisation support */ +static __always_inline bool mpam_cpus_have_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + return static_branch_unlikely(&arm64_mpam_has_hcr); + return false; +} + +/* enable MPAM virtualisation support */ +static inline void __init __enable_mpam_hcr(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM)) + static_branch_enable(&arm64_mpam_has_hcr); +} + +/* + * The resctrl filesystem writes to the partid/pmg values for threads and CPUs, + * which may race with reads in __mpam_sched_in(). Ensure only one of the old + * or new values are used. Particular care should be taken with the pmg field + * as __mpam_sched_in() may read a partid and pmg that don't match, causing + * this value to be stored with cache allocations, despite being considered + * 'free' by resctrl. + * + * A value in struct thread_info is used instead of struct task_struct as the + * cpu's u64 register format is used, but struct task_struct has two u32'. + */ + static inline void mpam_set_cpu_defaults(int cpu, u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ + u64 default_val; + + default_val = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + default_val |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(per_cpu(arm64_mpam_default, cpu), default_val); +} + +static inline void mpam_set_task_partid_pmg(struct task_struct *tsk, + u16 partid_d, u16 partid_i, + u8 pmg_d, u8 pmg_i) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval; + + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d); + regval |= FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, pmg_d); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, pmg_i); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + +static inline u64 mpam_get_regval(struct task_struct *tsk) +{ +#ifdef CONFIG_ARM64_MPAM + return READ_ONCE(task_thread_info(tsk)->mpam_partid_pmg); +#else + return 0; +#endif +} + +static inline void resctrl_arch_set_rmid(struct task_struct *tsk, u32 rmid) +{ +#ifdef CONFIG_ARM64_MPAM + u64 regval = mpam_get_regval(tsk); + + regval &= ~MPAM_SYSREG_PMG_D; + regval &= ~MPAM_SYSREG_PMG_I; + regval |= FIELD_PREP(MPAM_SYSREG_PMG_D, rmid); + regval |= FIELD_PREP(MPAM_SYSREG_PMG_I, rmid); + + WRITE_ONCE(task_thread_info(tsk)->mpam_partid_pmg, regval); +#endif +} + +static inline void mpam_thread_switch(struct task_struct *tsk) +{ + u64 oldregval; + int cpu = smp_processor_id(); + u64 regval = mpam_get_regval(tsk); + + if (!IS_ENABLED(CONFIG_ARM64_MPAM) || + !static_branch_likely(&mpam_enabled)) + return; + + if (regval == READ_ONCE(mpam_resctrl_default_group)) + regval = READ_ONCE(per_cpu(arm64_mpam_default, cpu)); + + oldregval = READ_ONCE(per_cpu(arm64_mpam_current, cpu)); + if (oldregval == regval) + return; + + /* Synchronising this write is left until the ERET to EL0 */ + write_sysreg_s(regval, SYS_MPAM0_EL1); + WRITE_ONCE(per_cpu(arm64_mpam_current, cpu), regval); +} +#endif /* __ASM__MPAM_H */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 4cedbaa16f419641f45c316be067947c279794c5..79474232d413d1f1b9e854b0366df056a7fa8d46 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -93,6 +93,7 @@ void mte_zero_clear_page_tags(void *addr); void mte_sync_tags(pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); +int mte_copy_mc_page_tags(void *kto, const void *kfrom); void mte_thread_switch(struct task_struct *next); void mte_cpu_setup(void); void mte_suspend_enter(void); @@ -131,6 +132,10 @@ static inline void mte_copy_page_tags(void *kto, const void *kfrom) static inline void mte_thread_init_user(void) { } +static inline int mte_copy_mc_page_tags(void *kto, const void *kfrom) +{ + return 0; +} static inline void mte_thread_switch(struct task_struct *next) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 2312e6ee595fda5b818e4ea9e2f057b44ffd735c..62bdc843e3e79cc7bde0be4767a74d86988e0761 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -36,6 +36,16 @@ struct folio *vma_alloc_zeroed_movable_folio(struct vm_area_struct *vma, void tag_clear_highpage(struct page *to); #define __HAVE_ARCH_TAG_CLEAR_HIGHPAGE +#ifdef CONFIG_ARCH_HAS_COPY_MC +int copy_mc_page(void *to, const void *from); +int copy_mc_highpage(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_MC_HIGHPAGE + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_MC_USER_HIGHPAGE +#endif + #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 7f7d9b1df4e5ad03e56f5395a0b52507c4d17c08..976b9304023290d38194f299ecf9c18e481ebb4c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -256,6 +256,11 @@ static inline pmd_t pmd_mkcont(pmd_t pmd) return __pmd(pmd_val(pmd) | PMD_SECT_CONT); } +static inline pmd_t pmd_mknoncont(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~PMD_SECT_CONT); +} + static inline pte_t pte_mkdevmap(pte_t pte) { return set_pte_bit(pte, __pgprot(PTE_DEVMAP | PTE_SPECIAL)); @@ -491,6 +496,7 @@ static inline int pmd_trans_huge(pmd_t pmd) #define pmd_mkclean(pmd) pte_pmd(pte_mkclean(pmd_pte(pmd))) #define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd))) #define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd))) +#define pmd_exec(pmd) (!(pmd_val(pmd) & PMD_TABLE_PXN)) static inline pmd_t pmd_mkinvalid(pmd_t pmd) { @@ -685,6 +691,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd) #define pud_valid(pud) pte_valid(pud_pte(pud)) #define pud_user(pud) pte_user(pud_pte(pud)) #define pud_user_exec(pud) pte_user_exec(pud_pte(pud)) +#define pud_exec(pud) (!(pud_val(pud) & PUD_TABLE_PXN)) static inline void set_pud(pud_t *pudp, pud_t pud) { @@ -752,6 +759,7 @@ static inline pmd_t *pud_pgtable(pud_t pud) #define p4d_none(p4d) (!p4d_val(p4d)) #define p4d_bad(p4d) (!(p4d_val(p4d) & 2)) #define p4d_present(p4d) (p4d_val(p4d)) +#define p4d_exec(p4d) (!(p4d_val(p4d) & P4D_TABLE_PXN)) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { @@ -798,6 +806,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) #define pgd_page_paddr(pgd) ({ BUILD_BUG(); 0;}) /* Match pud_offset folding in */ +#define pud_offset_phys(dir, addr) NULL #define pud_set_fixmap(addr) NULL #define pud_set_fixmap_offset(pgdp, addr) ((pud_t *)pgdp) #define pud_clear_fixmap() diff --git a/arch/arm64/include/asm/phytium_machine_types.h b/arch/arm64/include/asm/phytium_machine_types.h new file mode 100644 index 0000000000000000000000000000000000000000..8aed50daca4bce4ec55421b63b6b811446a90954 --- /dev/null +++ b/arch/arm64/include/asm/phytium_machine_types.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Authors: Wang Yinfeng . + */ + +#ifndef _MACHINE_TYPE_H_ +#define _MACHINE_TYPE_H_ + +#include +#include + +static inline bool phytium_part(u32 cpuid) +{ + return ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) == cpuid); +} + +#define typeof_ft1500a() phytium_part(MIDR_FT_1500A) +#define typeof_ft2000ahk() phytium_part(MIDR_FT_2000AHK) +#define typeof_ft2000plus() phytium_part(MIDR_FT_2000PLUS) +#define typeof_ft2004() phytium_part(MIDR_FT_2004) +#define typeof_s2500() phytium_part(MIDR_FT_2500) + +#endif diff --git a/arch/arm64/include/asm/resctrl.h b/arch/arm64/include/asm/resctrl.h new file mode 100644 index 0000000000000000000000000000000000000000..b506e95cf6e374690a71e0f1d142fe0032ae6e45 --- /dev/null +++ b/arch/arm64/include/asm/resctrl.h @@ -0,0 +1,2 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h index 0f740b7811871cbd5b35fc99fbe550830739738e..3f5d866b98d0da86301ead5abc63a1d20f9487d9 100644 --- a/arch/arm64/include/asm/set_memory.h +++ b/arch/arm64/include/asm/set_memory.h @@ -5,8 +5,7 @@ #include -bool can_set_direct_map(void); -#define can_set_direct_map can_set_direct_map +bool can_set_block_and_cont_map(void); int set_memory_valid(unsigned long addr, int numpages, int enable); diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd54661ea7ffaa47c8ea89bffdea8d..94633246d31138eaa4caa6ea2d032d1e2910980a 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -515,6 +515,13 @@ #define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) #define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) + +#define __VPMn_op2(n) ((n) & 0x7) +#define SYS_MPAM_VPMn_EL2(n) sys_reg(3, 4, 10, 6, __VPMn_op2(n)) + #define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) #define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) #define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) @@ -579,6 +586,7 @@ #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) +#define SYS_MPAM1_EL12 sys_reg(3, 5, 10, 5, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 553d1bc559c603f5d9933651463b9025a663e7d6..c57b33de0ed10223464dff76bd906b54947d4364 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -41,6 +41,9 @@ struct thread_info { #ifdef CONFIG_SHADOW_CALL_STACK void *scs_base; void *scs_sp; +#endif +#ifdef CONFIG_ARM64_MPAM + u64 mpam_partid_pmg; #endif u32 cpu; }; diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b149cf9f91bc965d53f97689a83ec653e8278a8d..e54f4343543a8191a4f6942a3a409feb23bb72ec 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -223,6 +223,10 @@ static inline unsigned long get_trans_granule(void) * determined by 'stride' and only affect any walk-cache entries * if 'last_level' is equal to false. * + * __flush_tlb_kernel_pgtable_entry(addr) + * Invalidate a single kernel mapping for address "addr" on all + * CPUs. Must be called if the corresponding page table is + * last_level entry. * * Finally, take a look at asm/tlb.h to see how tlb_flush() is implemented * on top of these routines, since that is our interface to the mmu_gather @@ -479,6 +483,20 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ish); isb(); } + +/* + * Used to invalidate the TLB entries to the last level page table + * (pud/pmd/pte). + */ +static inline void __flush_tlb_kernel_pgtable_entry(unsigned long kaddr) +{ + unsigned long addr = __TLBI_VADDR(kaddr, 0); + + dsb(ishst); + __tlbi(vaale1is, addr); + dsb(ish); + isb(); +} #endif #endif diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index d95b3d6b471a7d63957c47151fd6cb404ca0f4c7..7c67e2f29206604a8392c984c018e43d70766944 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -65,10 +65,13 @@ obj-$(CONFIG_KEXEC_CORE) += machine_kexec.o relocate_kernel.o \ obj-$(CONFIG_KEXEC_FILE) += machine_kexec_file.o kexec_image.o obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o + obj-$(CONFIG_CRASH_DUMP) += crash_dump.o obj-$(CONFIG_CRASH_CORE) += crash_core.o obj-$(CONFIG_ARM_SDE_INTERFACE) += sdei.o +obj-$(CONFIG_SDEI_WATCHDOG) += watchdog_sdei.o obj-$(CONFIG_ARM64_PTR_AUTH) += pointer_auth.o +obj-$(CONFIG_ARM64_MPAM) += mpam.o obj-$(CONFIG_ARM64_MTE) += mte.o obj-y += vdso-wrap.o obj-$(CONFIG_COMPAT_VDSO) += vdso32-wrap.o diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 444a73c2e63858cea758fb89c5d65b786bc6f5f3..7beb2491ec8235e258ea18b589a3b92a32b19de9 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -2719,6 +2719,15 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .matches = has_cpuid_feature, ARM64_CPUID_FIELDS(ID_AA64MMFR2_EL1, EVT, IMP) }, +#ifdef CONFIG_ARM64_MPAM + { + .desc = "Memory Partitioning And Monitoring", + .type = ARM64_CPUCAP_SYSTEM_FEATURE, + .capability = ARM64_MPAM, + .matches = has_cpuid_feature, + ARM64_CPUID_FIELDS(ID_AA64PFR0_EL1, MPAM, 1) + }, +#endif {}, }; diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 98fda8500535341a063d3ee308d77788602308d6..07ee88abfa0de20ed1766b0662df2a4b5bb4c2e5 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -178,17 +178,24 @@ static int c_show(struct seq_file *m, void *v) { int i, j; bool compat = personality(current->personality) == PER_LINUX32; + unsigned int cpu, index, total; + bool rich_container = false; for_each_online_cpu(i) { struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); u32 midr = cpuinfo->reg_midr; + index = cpu = i; + + if (check_rich_container(cpu, &index, &rich_container, &total)) + continue; + /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with * "processor". Give glibc what it expects. */ - seq_printf(m, "processor\t: %d\n", i); + seq_printf(m, "processor\t: %d\n", index); if (compat) seq_printf(m, "model name\t: ARMv8 Processor rev %d (%s)\n", MIDR_REVISION(midr), COMPAT_ELF_PLATFORM); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 35f3c795951373549faad3ed2d1bd30ad427eb78..6999668e9ecf0811258a02acefabde99543bae49 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -64,6 +64,12 @@ KVM_NVHE_ALIAS(nvhe_hyp_panic_handler); /* Vectors installed by hyp-init on reset HVC. */ KVM_NVHE_ALIAS(__hyp_stub_vectors); +/* Additional static keys for cpufeatures */ +#ifdef CONFIG_ARM64_MPAM +KVM_NVHE_ALIAS(arm64_mpam_has_hcr); +KVM_NVHE_ALIAS(mpam_enabled); +#endif + /* Static keys which are set if a vGIC trap should be handled in hyp. */ KVM_NVHE_ALIAS(vgic_v2_cpuif_trap); KVM_NVHE_ALIAS(vgic_v3_cpuif_trap); diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c index 078910db77a41b6ffa60c665debc927dff0c7ca5..cfa6b0dafc88bed1220372547f8c9c8e8dac8339 100644 --- a/arch/arm64/kernel/machine_kexec.c +++ b/arch/arm64/kernel/machine_kexec.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -262,6 +263,15 @@ void machine_crash_shutdown(struct pt_regs *regs) /* shutdown non-crashing cpus */ crash_smp_send_stop(); + /* + * when we panic in hardlockup detected by sdei_watchdog, the secure + * timer interrupt remains activate here because firmware clear eoi + * after dispatch is completed. This will cause arm_arch_timer + * interrupt failed to trigger in the second kernel. So we clear eoi + * of the secure timer before booting the second kernel. + */ + sdei_watchdog_clear_eoi(); + /* for crashing cpu */ crash_save_cpu(regs, smp_processor_id()); machine_kexec_mask_interrupts(); diff --git a/arch/arm64/kernel/mpam.c b/arch/arm64/kernel/mpam.c new file mode 100644 index 0000000000000000000000000000000000000000..134b44118553c85ec6798380c5b1d99c9ab766ef --- /dev/null +++ b/arch/arm64/kernel/mpam.c @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Arm Ltd. */ + +#include + +#include +#include +#include + +DEFINE_STATIC_KEY_FALSE(arm64_mpam_has_hcr); +DEFINE_STATIC_KEY_FALSE(mpam_enabled); +DEFINE_PER_CPU(u64, arm64_mpam_default); +DEFINE_PER_CPU(u64, arm64_mpam_current); diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 0fcc4eb1a7abccaf537ee054646e2420bbe62392..34f39e4aa4d106f06379b9f8eb37467c65d18ded 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -49,6 +49,7 @@ #include #include #include +#include #include #include #include @@ -552,6 +553,12 @@ struct task_struct *__switch_to(struct task_struct *prev, if (prev->thread.sctlr_user != next->thread.sctlr_user) update_sctlr_el1(next->thread.sctlr_user); + /* + * MPAM thread switch happens after the DSB to ensure prev's accesses + * use prev's MPAM settings. + */ + mpam_thread_switch(next); + /* the actual thread switch */ last = cpu_switch_to(prev, next); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 960b98b43506dd2246aac628e2a92ab455f44043..b8d2763c5dd7f4acef8213fc5b2add2fadecede6 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -501,6 +502,34 @@ static bool bootcpu_valid __initdata; static unsigned int cpu_count = 1; #ifdef CONFIG_ACPI + +#ifdef CONFIG_ARCH_PHYTIUM +/* + * On phytium S2500 multi-socket server, for example 2-socket(2P), there are + * socekt0 and socket1 on the server: + * If storage device(like SAS controller and disks to save vmcore into) is + * installed on socket1 and second kernel brings up 2 CPUs both on socket0 with + * nr_cpus=2, then vmcore will fail to be saved into the disk as interrupts like + * SPI and LPI(except SGI) can't communicate across cpu sockets in this server + * platform. + * To avoid this issue, Bypass other non-cpu0 to ensure that each cpu0 on each + * socket can boot up and handle interrupt when booting the second kernel. + */ +static bool __init is_phytium_kdump_cpu_need_bypass(u64 hwid) +{ + if ((read_cpuid_id() & MIDR_CPU_MODEL_MASK) != MIDR_FT_2500) + return false; + + /* + * Bypass other non-cpu0 to ensure second kernel can bring up each cpu0 + * on each socket + */ + if (is_kdump_kernel() && (hwid & 0xffff) != (cpu_logical_map(0) & 0xffff)) + return true; + return false; +} +#endif + static struct acpi_madt_generic_interrupt cpu_madt_gicc[NR_CPUS]; struct acpi_madt_generic_interrupt *acpi_cpu_get_madt_gicc(int cpu) @@ -550,6 +579,11 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor) if (cpu_count >= NR_CPUS) return; +#ifdef CONFIG_ARCH_PHYTIUM + if (is_phytium_kdump_cpu_need_bypass(hwid)) + return; +#endif + /* map the logical cpu id to cpu MPIDR */ set_cpu_logical_map(cpu_count, hwid); diff --git a/arch/arm64/kernel/watchdog_sdei.c b/arch/arm64/kernel/watchdog_sdei.c new file mode 100644 index 0000000000000000000000000000000000000000..c7b12806364e0fcb5a68295ddd8bc979ecb7a179 --- /dev/null +++ b/arch/arm64/kernel/watchdog_sdei.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Detect hard lockups on a system + * + * Note: Most of this code is borrowed heavily from the perf hardlockup + * detector, so thanks to Don for the initial implementation. + */ + +#define pr_fmt(fmt) "SDEI NMI watchdog: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +/* We use the secure physical timer as SDEI NMI watchdog timer */ +#define SDEI_NMI_WATCHDOG_HWIRQ 29 + +static int sdei_watchdog_event_num; +bool disable_sdei_nmi_watchdog; +static bool sdei_watchdog_registered; +static DEFINE_PER_CPU(ktime_t, last_check_time); + +void sdei_watchdog_hardlockup_enable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + /* Skip the first hardlockup check incase BIOS didn't init the + * secure timer correctly */ + watchdog_hardlockup_touch_cpu(cpu); + sdei_api_set_secure_timer_period(watchdog_thresh); + __this_cpu_write(last_check_time, ktime_get_mono_fast_ns()); + + ret = sdei_api_event_enable(sdei_watchdog_event_num); + if (ret) { + pr_err("Enable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); + } +} + +void sdei_watchdog_hardlockup_disable(unsigned int cpu) +{ + int ret; + + if (!sdei_watchdog_registered) + return; + + ret = sdei_api_event_disable(sdei_watchdog_event_num); + if (ret) + pr_err("Disable NMI Watchdog failed on cpu%d\n", + smp_processor_id()); +} + +static int sdei_watchdog_callback(u32 event, + struct pt_regs *regs, void *arg) +{ + ktime_t delta, now = ktime_get_mono_fast_ns(); + + delta = now - __this_cpu_read(last_check_time); + __this_cpu_write(last_check_time, now); + + /* + * Set delta to 4/5 of the actual watchdog threshold period so the + * hrtimer is guaranteed to fire at least once within the real + * watchdog threshold. + */ + if (delta < watchdog_thresh * (u64)NSEC_PER_SEC * 4 / 5) { + pr_err(FW_BUG "SDEI Watchdog event triggered too soon, " + "time to last check:%lld ns\n", delta); + return 0; + } + + watchdog_hardlockup_check(smp_processor_id(), regs); + + return 0; +} +NOKPROBE_SYMBOL(sdei_watchdog_callback); + +static void sdei_nmi_watchdog_bind(void *data) +{ + int ret; + + ret = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (ret < 0) + pr_err("SDEI bind failed on cpu%d, return %d\n", + smp_processor_id(), ret); +} + +static int __init disable_sdei_nmi_watchdog_setup(char *str) +{ + disable_sdei_nmi_watchdog = true; + return 1; +} +__setup("disable_sdei_nmi_watchdog", disable_sdei_nmi_watchdog_setup); + +void sdei_watchdog_clear_eoi(void) +{ + if (sdei_watchdog_registered) + sdei_api_clear_eoi(SDEI_NMI_WATCHDOG_HWIRQ); +} + +int __init sdei_watchdog_hardlockup_probe(void) +{ + int ret; + + if (disable_sdei_nmi_watchdog) + return -EINVAL; + + if (!is_hyp_mode_available()) { + pr_err("Disable SDEI NMI Watchdog in VM\n"); + return -EINVAL; + } + + sdei_watchdog_event_num = sdei_api_event_interrupt_bind(SDEI_NMI_WATCHDOG_HWIRQ); + if (sdei_watchdog_event_num < 0) { + pr_err("Bind interrupt failed. Firmware may not support SDEI !\n"); + return sdei_watchdog_event_num; + } + + /* + * After we introduced 'sdei_api_set_secure_timer_period', we disselect + * 'CONFIG_HARDLOCKUP_CHECK_TIMESTAMP'. So we need to make sure that + * firmware can set the period of the secure timer and the timer + * interrupt doesn't trigger too soon. + */ + if (sdei_api_set_secure_timer_period(watchdog_thresh)) { + pr_err("Firmware doesn't support setting the secure timer period, please update your BIOS !\n"); + return -EINVAL; + } + + on_each_cpu(sdei_nmi_watchdog_bind, NULL, true); + + ret = sdei_event_register(sdei_watchdog_event_num, + sdei_watchdog_callback, NULL); + if (ret) { + pr_err("SDEI Watchdog register callback failed\n"); + return ret; + } + + sdei_watchdog_registered = true; + pr_info("SDEI Watchdog registered successfully\n"); + + return 0; +} diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe459cb3588bccd94359369a546947e..da30acce63082a4fff8f7d694e5ec89436e4d241 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -172,6 +173,35 @@ static inline void __deactivate_traps_hfgxtr(struct kvm_vcpu *vcpu) write_sysreg_s(ctxt_sys_reg(hctxt, HDFGWTR_EL2), SYS_HDFGWTR_EL2); } +static inline void __activate_traps_mpam(struct kvm_vcpu *vcpu) +{ + u64 r = MPAM_SYSREG_TRAP_MPAM0_EL1 | MPAM_SYSREG_TRAP_MPAM1_EL1; + + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) + return; + + /* trap guest access to MPAMIDR_EL1 */ + if (mpam_cpus_have_mpam_hcr()) { + write_sysreg_s(MPAMHCR_TRAP_MPAMIDR, SYS_MPAMHCR_EL2); + } else { + /* From v1.1 TIDR can trap MPAMIDR, set it unconditionally */ + r |= MPAM_SYSREG_TRAP_IDR; + } + + write_sysreg_s(r, SYS_MPAM2_EL2); +} + +static inline void __deactivate_traps_mpam(void) +{ + if (!mpam_cpus_have_feature() || !static_branch_likely(&mpam_enabled)) + return; + + write_sysreg_s(0, SYS_MPAM2_EL2); + + if (mpam_cpus_have_mpam_hcr()) + write_sysreg_s(MPAMHCR_HOST_FLAGS, SYS_MPAMHCR_EL2); +} + static inline void __activate_traps_common(struct kvm_vcpu *vcpu) { /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ @@ -212,6 +242,7 @@ static inline void __activate_traps_common(struct kvm_vcpu *vcpu) } __activate_traps_hfgxtr(vcpu); + __activate_traps_mpam(vcpu); } static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) @@ -231,6 +262,7 @@ static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); __deactivate_traps_hfgxtr(vcpu); + __deactivate_traps_mpam(); } static inline void ___activate_traps(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h index bb6b571ec627dede466c5fa1d05785a9c9f78764..8e99f66b377bd37f3622ae9b5c9bf881517659b8 100644 --- a/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h +++ b/arch/arm64/kvm/hyp/include/hyp/sysreg-sr.h @@ -15,6 +15,7 @@ #include #include #include +#include static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) { @@ -243,4 +244,32 @@ static inline void __sysreg32_restore_state(struct kvm_vcpu *vcpu) write_sysreg(__vcpu_sys_reg(vcpu, DBGVCR32_EL2), dbgvcr32_el2); } +/* + * The _EL0 value was written by the host's context switch, copy this into the + * guest's EL1. + */ +static inline void __mpam_guest_load(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) + write_sysreg_el1(read_sysreg_s(SYS_MPAM0_EL1), SYS_MPAM1); +} + +/* + * Copy the _EL2 register back to _EL1, clearing any trap bits EL2 may have set. + * nVHE world-switch copies the _EL1 register to _EL2. A VHE host writes to the + * _EL2 register as it is aliased by the hardware when TGE is set. + */ +static inline void __mpam_guest_put(void) +{ + u64 val, mask = MPAM_SYSREG_PMG_D | MPAM_SYSREG_PMG_I | + MPAM_SYSREG_PARTID_D | MPAM_SYSREG_PARTID_I; + + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) { + val = FIELD_GET(mask, read_sysreg_s(SYS_MPAM2_EL2)); + write_sysreg_el1(val, SYS_MPAM1); + } +} + #endif /* __ARM64_KVM_HYP_SYSREG_SR_H__ */ diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6d624b41997021379b7b4cf77453d..04b7f83c2ae36be1efe1c4567df1be5d5ca336c2 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -242,6 +242,14 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) } } +/* Use the host thread's partid and pmg for world switch */ +static void __mpam_copy_el1_to_el2(void) +{ + if (IS_ENABLED(CONFIG_ARM64_MPAM) && mpam_cpus_have_feature() && + static_branch_likely(&mpam_enabled)) + write_sysreg_s(read_sysreg_s(SYS_MPAM1_EL1), SYS_MPAM2_EL2); +} + /* Switch to the guest for legacy non-VHE systems */ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) { @@ -251,6 +259,8 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) bool pmu_switch_needed; u64 exit_code; + __mpam_copy_el1_to_el2(); + /* * Having IRQs masked via PMR when entering the guest means the GIC * will not signal the CPU of interrupts of lower priority, and the @@ -310,6 +320,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __timer_enable_traps(vcpu); __debug_switch_to_guest(vcpu); + __mpam_guest_load(); do { /* Jump in the fire! */ @@ -320,6 +331,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_save_state_nvhe(guest_ctxt); __sysreg32_save_state(vcpu); + __mpam_guest_put(); __timer_disable_traps(vcpu); __hyp_vgic_save_state(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e0db0571ff6aba5a4fcaa08fc70749f..6b407cd3230d4dca06dece7a9347d2f1587025ab 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -90,6 +90,7 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg32_restore_state(vcpu); __sysreg_restore_user_state(guest_ctxt); __sysreg_restore_el1_state(guest_ctxt); + __mpam_guest_load(); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0afd6136e2759cbe0773fc5a036577e7e9e09db2..9424fa7351bf610dec3c7a6d614bdeb95337007c 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -411,6 +411,31 @@ static bool trap_oslar_el1(struct kvm_vcpu *vcpu, return true; } +static bool trap_mpam(struct kvm_vcpu *vcpu, + struct sys_reg_params *p, + const struct sys_reg_desc *r) +{ + u64 aa64pfr0_el1 = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); + + /* + * What did we expose to the guest? + * Earlier guests may have seen the ID bits, which can't be removed + * without breaking migration, but MPAMIDR_EL1 can advertise all-zeroes, + * indicating there are zero PARTID/PMG supported by the CPU, allowing + * the other two trapped registers (MPAM1_EL1 and MPAM0_EL1) to be + * treated as RAZ/WI. + * Emulating MPAM1_EL1 as RAZ/WI means the guest sees the MPAMEN bit + * as clear, and realises MPAM isn't usable on this CPU. + */ + if (FIELD_GET(ID_AA64PFR0_EL1_MPAM_MASK, aa64pfr0_el1)) { + p->regval = 0; + return true; + } + + kvm_inject_undefined(vcpu); + return false; +} + static bool trap_oslsr_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) @@ -1228,6 +1253,36 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, return arm64_ftr_safe_value(&kvm_ftr, new, cur); } +static u64 kvm_arm64_ftr_max(struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) +{ + u64 pfr0, val = rd->reset(vcpu, rd); + u32 field, id = reg_to_encoding(rd); + + /* + * Some values may reset to a lower value than can be supported, + * get the maximum feature value. + */ + switch (id) { + case SYS_ID_AA64PFR0_EL1: + pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + + /* + * MPAM resets to 0, but migration of MPAM=1 guests is needed. + * See trap_mpam() for more. + */ + field = cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_MPAM_SHIFT); + if (field == ID_AA64PFR0_EL1_MPAM_1) { + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, ID_AA64PFR0_EL1_MPAM_1); + } + + break; + } + + return val; +} + /** * arm64_check_features() - Check if a feature register value constitutes * a subset of features indicated by the idreg's KVM sanitised limit. @@ -1248,8 +1303,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, const struct arm64_ftr_bits *ftrp = NULL; u32 id = reg_to_encoding(rd); u64 writable_mask = rd->val; - u64 limit = rd->reset(vcpu, rd); - u64 mask = 0; + u64 limit, mask = 0; /* * Hidden and unallocated ID registers may not have a corresponding @@ -1263,6 +1317,7 @@ static int arm64_check_features(struct kvm_vcpu *vcpu, if (!ftr_reg) return -EINVAL; + limit = kvm_arm64_ftr_max(vcpu, rd); ftrp = ftr_reg->ftr_bits; for (; ftrp && ftrp->width; ftrp++) { @@ -1466,6 +1521,14 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, val &= ~ID_AA64PFR0_EL1_AMU_MASK; + /* + * MPAM is disabled by default as KVM also needs a set of PARTID to + * program the MPAMVPMx_EL2 PARTID remapping registers with. But some + * older kernels let the guest see the ID bit. Turning it on causes + * the registers to be emulated as RAZ/WI. See trap_mpam() for more. + */ + val &= ~ID_AA64PFR0_EL1_MPAM_MASK; + return val; } @@ -2130,8 +2193,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_LOREA_EL1), trap_loregion }, { SYS_DESC(SYS_LORN_EL1), trap_loregion }, { SYS_DESC(SYS_LORC_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAMIDR_EL1), trap_mpam }, { SYS_DESC(SYS_LORID_EL1), trap_loregion }, + { SYS_DESC(SYS_MPAM1_EL1), trap_mpam }, + { SYS_DESC(SYS_MPAM0_EL1), trap_mpam }, { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 }, { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 }, diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 29490be2546bfce91862735e2b66a83c8d682afe..a2fd865b816db7029ca956b70b5b4934b6baa569 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -15,6 +15,8 @@ endif lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o +lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc_page.o + obj-$(CONFIG_CRC32) += crc32.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_mc_page.S b/arch/arm64/lib/copy_mc_page.S new file mode 100644 index 0000000000000000000000000000000000000000..656d831ef4b87c0813515a53680b119e3ea20603 --- /dev/null +++ b/arch/arm64/lib/copy_mc_page.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + * Returns: + * x0 - Return 0 if copy success, or -EFAULT if anything goes wrong + * while copying. + */ +SYM_FUNC_START(__pi_copy_mc_page) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret + +SYM_FUNC_END(__pi_copy_mc_page) +SYM_FUNC_ALIAS(copy_mc_page, __pi_copy_mc_page) +EXPORT_SYMBOL(copy_mc_page) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 5018ac03b6bf39330c09f686656c6f1a72fa9f42..2b748e83f6cf01359fd1b8dda0e7c1b481d4acf0 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -80,6 +80,33 @@ SYM_FUNC_START(mte_copy_page_tags) ret SYM_FUNC_END(mte_copy_page_tags) +/* + * Copy the tags from the source page to the destination one wiht machine check safe + * x0 - address of the destination page + * x1 - address of the source page + * Returns: + * x0 - Return 0 if copy success, or + * -EFAULT if anything goes wrong while copying. + */ +SYM_FUNC_START(mte_copy_mc_page_tags) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: +CPY_MC(2f, ldgm x4, [x3]) +CPY_MC(2f, stgm x4, [x2]) + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b + + mov x0, #0 + ret + +2: mov x0, #-EFAULT + ret +SYM_FUNC_END(mte_copy_mc_page_tags) + /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index a7bb20055ce0948a6f586e29fd1a05bbbc1cc75f..b062c925daa4dcb471de37326a64a59e8d253453 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -14,6 +14,21 @@ #include #include +static int do_mte(struct page *to, struct page *from, void *kto, void *kfrom, bool mc) +{ + int ret = 0; + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + if (mc) + ret = mte_copy_mc_page_tags(kto, kfrom); + else + mte_copy_page_tags(kto, kfrom); + } + + return ret; +} + void copy_highpage(struct page *to, struct page *from) { void *kto = page_address(to); @@ -24,12 +39,7 @@ void copy_highpage(struct page *to, struct page *from) if (kasan_hw_tags_enabled()) page_kasan_tag_reset(to); - if (system_supports_mte() && page_mte_tagged(from)) { - /* It's a new page, shouldn't have been tagged yet */ - WARN_ON_ONCE(!try_page_mte_tagging(to)); - mte_copy_page_tags(kto, kfrom); - set_page_mte_tagged(to); - } + do_mte(to, from, kto, kfrom, false); } EXPORT_SYMBOL(copy_highpage); @@ -40,3 +50,40 @@ void copy_user_highpage(struct page *to, struct page *from, flush_dcache_page(to); } EXPORT_SYMBOL_GPL(copy_user_highpage); + +#ifdef CONFIG_ARCH_HAS_COPY_MC +/* + * Return -EFAULT if anything goes wrong while copying page or mte. + */ +int copy_mc_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + int ret; + + ret = copy_mc_page(kto, kfrom); + if (ret) + return -EFAULT; + + ret = do_mte(to, from, kto, kfrom, true); + if (ret) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL(copy_mc_highpage); + +int copy_mc_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + int ret; + + ret = copy_mc_highpage(to, from); + + if (!ret) + flush_dcache_page(to); + + return ret; +} +EXPORT_SYMBOL_GPL(copy_mc_user_highpage); +#endif diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c index 228d681a871594f05acea36f572b31363f8000c6..bdc81518d20788c1fc99aa5a49267ed14d957c9d 100644 --- a/arch/arm64/mm/extable.c +++ b/arch/arm64/mm/extable.c @@ -16,7 +16,7 @@ get_ex_fixup(const struct exception_table_entry *ex) return ((unsigned long)&ex->fixup + ex->fixup); } -static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, +static bool ex_handler_fixup_err_zero(const struct exception_table_entry *ex, struct pt_regs *regs) { int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data); @@ -69,10 +69,27 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: case EX_TYPE_KACCESS_ERR_ZERO: - return ex_handler_uaccess_err_zero(ex, regs); + return ex_handler_fixup_err_zero(ex, regs); case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: return ex_handler_load_unaligned_zeropad(ex, regs); } BUG(); } + +bool fixup_exception_mc(struct pt_regs *regs) +{ + const struct exception_table_entry *ex; + + ex = search_exception_tables(instruction_pointer(regs)); + if (!ex) + return false; + + switch (ex->type) { + case EX_TYPE_UACCESS_ERR_ZERO: + case EX_TYPE_COPY_MC_PAGE_ERR_ZERO: + return ex_handler_fixup_err_zero(ex, regs); + } + + return false; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2e5d1e238af958e8dcdd07b498f990421b27cc02..a6d1c333719f50bd1c7d5e69daec2981d00c53e6 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -728,6 +728,31 @@ static int do_bad(unsigned long far, unsigned long esr, struct pt_regs *regs) return 1; /* "fault" */ } +static bool arm64_do_kernel_sea(unsigned long addr, unsigned int esr, + struct pt_regs *regs, int sig, int code) +{ + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) + return false; + + if (user_mode(regs)) + return false; + + if (apei_claim_sea(regs) < 0) + return false; + + if (!fixup_exception_mc(regs)) + return false; + + if (current->flags & PF_KTHREAD) + return true; + + set_thread_esr(0, esr); + arm64_force_sig_fault(sig, code, addr, + "Uncorrected memory error on access to user memory\n"); + + return true; +} + static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) { const struct fault_info *inf; @@ -753,7 +778,9 @@ static int do_sea(unsigned long far, unsigned long esr, struct pt_regs *regs) */ siaddr = untagged_addr(far); } - arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + if (!arm64_do_kernel_sea(siaddr, esr, regs, inf->sig, inf->code)) + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); return 0; } diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 47781bec61719dee864d216774fd3d99e3fb53d3..1884c70f359b412b87e72065b79168d6a517e8ae 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -41,10 +42,6 @@ #include #include -#define NO_BLOCK_MAPPINGS BIT(0) -#define NO_CONT_MAPPINGS BIT(1) -#define NO_EXEC_MAPPINGS BIT(2) /* assumes FEAT_HPDS is not used */ - int idmap_t0sz __ro_after_init; #if VA_BITS > 48 @@ -75,6 +72,15 @@ EXPORT_SYMBOL(empty_zero_page); static DEFINE_SPINLOCK(swapper_pgdir_lock); static DEFINE_MUTEX(fixmap_lock); +static DEFINE_MUTEX(split_linear_mapping_lock); + +static struct split_memory_params { + unsigned long virt; + phys_addr_t size; + pgprot_t prot; + + atomic_t cpu_count; +} split_memory_param; void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) { @@ -169,6 +175,49 @@ bool pgattr_change_is_safe(u64 old, u64 new) return ((old ^ new) & ~mask) == 0; } +/* + * If the physical address of block-mapping pud/pmd or contiguous mapping pmd/pte + * entry is located in the physical range it points to, clearing the entry would + * cause the corresponding physcial range can not be accessed any longer. The + * remapping process of this range can not be done because of inaccessible. + * For this case, it should be mapped with PTE level when initializing the page + * table. + */ +static bool should_clear_cont_pte(pmd_t *pmdp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pte_offset_phys(pmdp, addr); + + return (pa >> CONT_PTE_SHIFT) == (phys >> CONT_PTE_SHIFT); +} + +static bool should_clear_cont_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> CONT_PMD_SHIFT) == (phys >> CONT_PMD_SHIFT); +} + +static bool should_split_pmd(pud_t *pudp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pmd_offset_phys(pudp, addr); + + return (pa >> PMD_SHIFT) == (phys >> PMD_SHIFT); +} + +#ifndef __PAGETABLE_PUD_FOLDED +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + phys_addr_t pa = pud_offset_phys(p4dp, addr); + + return (pa >> PUD_SHIFT) == (phys >> PUD_SHIFT); +} +#else +static bool should_split_pud(p4d_t *p4dp, unsigned long addr, phys_addr_t phys) +{ + return false; +} +#endif + static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, phys_addr_t phys, pgprot_t prot) { @@ -223,7 +272,8 @@ static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, /* use a contiguous mapping if the range is suitably aligned */ if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && - (flags & NO_CONT_MAPPINGS) == 0) + (flags & NO_CONT_MAPPINGS) == 0 && + !should_clear_cont_pte(pmdp, addr, phys)) __prot = __pgprot(pgprot_val(prot) | PTE_CONT); init_pte(pmdp, addr, next, phys, __prot); @@ -240,6 +290,14 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, pmd_t *pmdp; pmdp = pmd_set_fixmap_offset(pudp, addr); + /* + * the physical address of PMDs with contiguous flag might locate in the + * physical range they point to. Thus clear the CONT flag earlier to + * avoid inaccessiable situation. + */ + if (should_clear_cont_pmd(pudp, addr, phys)) + prot = __pgprot(pgprot_val(prot) & ~PTE_CONT); + do { pmd_t old_pmd = READ_ONCE(*pmdp); @@ -247,7 +305,8 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, /* try section mapping first */ if (((addr | next | phys) & ~PMD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pmd(pudp, addr, phys)) { pmd_set_huge(pmdp, phys, prot); /* @@ -340,11 +399,14 @@ static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, next = pud_addr_end(addr, end); /* - * For 4K granule only, attempt to put down a 1GB block + * For 4K granule only, attempt to put down a 1GB block. If the + * physical address of pudp is included in the range where + * itself points to, split the block of pudp earlier. */ if (pud_sect_supported() && ((addr | next | phys) & ~PUD_MASK) == 0 && - (flags & NO_BLOCK_MAPPINGS) == 0) { + (flags & NO_BLOCK_MAPPINGS) == 0 && + !should_split_pud(p4dp, addr, phys)) { pud_set_huge(pudp, phys, prot); /* @@ -511,6 +573,9 @@ void __init mark_linear_text_alias_ro(void) #ifdef CONFIG_KFENCE +static unsigned long __ro_after_init +kfence_pool_size = ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE); + bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL; /* early_param() will be parsed before map_mem() below. */ @@ -531,7 +596,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) if (!kfence_early_init) return 0; - kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + kfence_pool = memblock_phys_alloc(kfence_pool_size, PAGE_SIZE); if (!kfence_pool) { pr_err("failed to allocate kfence pool\n"); kfence_early_init = false; @@ -539,7 +604,7 @@ static phys_addr_t __init arm64_kfence_alloc_pool(void) } /* Temporarily mark as NOMAP. */ - memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE); + memblock_mark_nomap(kfence_pool, kfence_pool_size); return kfence_pool; } @@ -550,11 +615,11 @@ static void __init arm64_kfence_map_pool(phys_addr_t kfence_pool, pgd_t *pgdp) return; /* KFENCE pool needs page-level mapping. */ - __map_memblock(pgdp, kfence_pool, kfence_pool + KFENCE_POOL_SIZE, + __map_memblock(pgdp, kfence_pool, kfence_pool + kfence_pool_size, pgprot_tagged(PAGE_KERNEL), - NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); - memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = phys_to_virt(kfence_pool); + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS | NO_EXEC_MAPPINGS); + memblock_clear_nomap(kfence_pool, kfence_pool_size); + __kfence_pool_early_init = phys_to_virt(kfence_pool); } #else /* CONFIG_KFENCE */ @@ -584,7 +649,7 @@ static void __init map_mem(pgd_t *pgdp) early_kfence_pool = arm64_kfence_alloc_pool(); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1310,7 +1375,7 @@ int arch_add_memory(int nid, u64 start, u64 size, VM_BUG_ON(!mhp_range_allowed(start, size, true)); - if (can_set_direct_map()) + if (!can_set_block_and_cont_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), @@ -1487,3 +1552,328 @@ void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, pte { set_pte_at(vma->vm_mm, addr, ptep, pte); } + +static void clear_cont_pte_mapping(pmd_t *pmdp, unsigned long addr, + unsigned long end) +{ + pte_t *ptep, *sptep, pte; + unsigned long saddr, next; + int i; + + /* + * Clear the CONT flag of ptes at the input range. CONT flag should be + * cleared at the granularity of CONT_PTE. + */ + addr &= CONT_PTE_MASK; + if (end & ~CONT_PTE_MASK) + end = (end + CONT_PTE_SIZE) & CONT_PTE_MASK; + + do { + pgprot_t prot; + unsigned long pfn; + + saddr = addr; + next = pte_cont_addr_end(addr, end); + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + if (pte_none(pte)) + continue; + + if (pte_cont(READ_ONCE(*ptep))) { + sptep = ptep; + prot = pte_pgprot(pte_mknoncont(pte)); + pfn = pte_pfn(pte); + + /* + * Changing the bit of contiguous entries requires to + * follow Break-Before-Make approach. See ARM DDI + * 0487A.k_iss10775, "Misprogramming of the Contiguous bit", + * page D4-1762. + */ + for (i = 0; i < CONT_PTES; i++, ptep++) + pte_clear(&init_mm, addr, ptep); + + for (i = 0; i < CONT_PTES; i++, saddr += PAGE_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PTES; i++, sptep++, pfn++) + set_pte(sptep, pfn_pte(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void clear_cont_pmd_mapping(pud_t *pudp, unsigned long addr, + unsigned long end) +{ + pmd_t *pmdp, *spmdp, pmd; + unsigned long saddr, next; + int i; + + addr &= CONT_PMD_MASK; + if (end & ~CONT_PMD_MASK) + end = (end + CONT_PMD_SIZE) & CONT_PMD_MASK; + + do { + pgprot_t prot; + unsigned long pfn, pfn_offset = PMD_SIZE >> PAGE_SHIFT; + + saddr = addr; + next = pmd_cont_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (pte_cont(pmd_pte(pmd))) { + spmdp = pmdp; + prot = pte_pgprot(pmd_pte(pmd_mknoncont(pmd))); + pfn = pmd_pfn(pmd); + + for (i = 0; i < CONT_PMDS; i++, pmdp++) + pmd_clear(pmdp); + + for (i = 0; i < CONT_PMDS; i++, saddr += PMD_SIZE) + __flush_tlb_kernel_pgtable_entry(saddr); + + for (i = 0; i < CONT_PMDS; i++, spmdp++, pfn += pfn_offset) + set_pmd(spmdp, pfn_pmd(pfn, prot)); + } + } while (addr = next, addr < end); +} + +static void split_pmd_mapping(pud_t *pudp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pmd_t *pmdp, pmd, split_pmd; + unsigned long next; + int new_flags = 0; + + /* + * Clear the contiguous pmd if there is any splitting request located in + * the corresponding range. + */ + if (flags & NO_CONT_MAPPINGS) + clear_cont_pmd_mapping(pudp, addr, end); + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + + if (!pmd_exec(pmd)) + flags |= NO_EXEC_MAPPINGS; + + if (pmd_sect(pmd)) { + phys_addr_t phys, pte_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + /* + * Get the original protections except PMD_SECT. + */ + orig_prot = __pgprot(pgprot_val(pte_pgprot(pmd_pte(pmd))) | + PMD_TYPE_TABLE); + + /* + * Allocate a new pmd page to re-initialize + * corresponding ptes. + */ + pte_phys = pgd_pgtable_alloc(PAGE_SHIFT); + split_pmd = pfn_pmd(__phys_to_pfn(pte_phys), orig_prot); + + /* + * If addr/next is not PMD aligned, create contiguous + * mapping at the rest of specific split range. + */ + if (addr & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, addr & PMD_MASK, addr, + phys & PMD_MASK, prot, + pgd_pgtable_alloc, new_flags); + if (next & ~PMD_MASK) + alloc_init_cont_pte(&split_pmd, next, + (next + PMD_SIZE) & PMD_MASK, + phys + next - addr, prot, + pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pte(&split_pmd, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pmd entry and flush it, then set + * the newly allocated pmd page. + */ + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pmd(pmdp, split_pmd); + } else { + clear_cont_pte_mapping(pmdp, addr, next); + } + } while (addr = next, addr < end); +} + +static void split_pud_mapping(p4d_t *p4dp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + pud_t *pudp, pud, split_pud; + unsigned long next; + int new_flags = 0; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + + if (!pud_exec(pud)) + flags |= NO_EXEC_MAPPINGS; + + if (pud_sect(pud)) { + phys_addr_t phys, pmd_phys; + pgprot_t orig_prot; + + phys = __virt_to_phys(addr); + + orig_prot = __pgprot(pgprot_val(pte_pgprot(pud_pte(pud))) | + PUD_TYPE_TABLE); + + pmd_phys = pgd_pgtable_alloc(PMD_SHIFT); + split_pud = pfn_pud(__phys_to_pfn(pmd_phys), orig_prot); + + /* + * If addr/next is not PUD aligned, create block and + * contiguous mapping at the rest of specific split range. + */ + if (addr & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, addr & PUD_MASK, + addr, phys & PUD_MASK, + prot, pgd_pgtable_alloc, new_flags); + if (next & ~PUD_MASK) + alloc_init_cont_pmd(&split_pud, next, + (next + PUD_SIZE) & PUD_MASK, + phys + next - addr, + prot, pgd_pgtable_alloc, new_flags); + + alloc_init_cont_pmd(&split_pud, addr, next, phys, prot, + pgd_pgtable_alloc, flags); + + /* + * Obey the break-before-make rule to split the page + * table, otherwise it might trigger CONSTRAINED + * UNPREDICTABLE behaviors because TLB conflict. Thus + * clear the original pud entry and flush it, then set + * the newly allocated pud page. + */ + pud_clear(pudp); + __flush_tlb_kernel_pgtable_entry(addr); + set_pud(pudp, split_pud); + } else { + split_pmd_mapping(pudp, addr, next, prot, flags); + } + } while (addr = next, addr < end); +} + +static void split_p4d_mapping(pgd_t *pgdp, unsigned long addr, unsigned long end, + pgprot_t prot, int flags) +{ + p4d_t *p4dp, p4d; + unsigned long next; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + +#if CONFIG_PGTABLE_LEVELS > 3 + /* + * If the original p4d mapping is not executable, remain it even + * splitting. + */ + if (!p4d_exec(p4d)) + flags |= NO_EXEC_MAPPINGS; +#endif + + split_pud_mapping(p4dp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +void split_linear_mapping(unsigned long virt, phys_addr_t size, pgprot_t prot) +{ + pgd_t *pgdp, pgd; + unsigned long addr, next, end; + int flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + addr = virt & PAGE_MASK; + end = PAGE_ALIGN(virt + size); + prot = pgprot_tagged(prot); + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + + split_p4d_mapping(pgdp, addr, next, prot, flags); + } while (addr = next, addr < end); +} + +static int __split_linear_mapping_after_init(void *data) +{ + struct split_memory_params *param = data; + + if (atomic_inc_return(¶m->cpu_count) == 1) { + split_linear_mapping(param->virt, param->size, param->prot); + atomic_inc(¶m->cpu_count); + } else { + while (atomic_read(¶m->cpu_count) <= num_online_cpus()) + cpu_relax(); + } + return 0; +} + +/* + * When splitting the kernel page table through the Break-Before-Make principle, + * other CPUs might access address that mapped by a cleared entry before + * remapping. Thus the stop machine is used to avoid kernel page fault + * caused by inter-CPU synchronization. + */ +void split_linear_mapping_after_init(unsigned long virt, phys_addr_t size, + pgprot_t prot) + +{ + mutex_lock(&split_linear_mapping_lock); + + split_memory_param.virt = virt; + split_memory_param.size = size; + split_memory_param.prot = prot; + atomic_set(&split_memory_param.cpu_count, 0); + + stop_machine(__split_linear_mapping_after_init, &split_memory_param, cpu_online_mask); + + mutex_unlock(&split_linear_mapping_lock); +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 924843f1f661bfe1ff5c6b8f9eff753872416040..801ac339298afcbcf7982dba996668b72dd10a17 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -20,17 +20,13 @@ struct page_change_data { bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); -bool can_set_direct_map(void) +/* + * If rodata_full is enabled, the mapping of linear mapping range can also be + * block & cont mapping, here decouples the rodata_full and debug_pagealloc. + */ +bool can_set_block_and_cont_map(void) { - /* - * rodata_full and DEBUG_PAGEALLOC require linear map to be - * mapped at page granularity, so that it is possible to - * protect/unprotect single pages. - * - * KFENCE pool requires page-granular mapping if initialized late. - */ - return rodata_full || debug_pagealloc_enabled() || - arm64_kfence_can_set_direct_map(); + return !debug_pagealloc_enabled(); } static int change_page_range(pte_t *ptep, unsigned long addr, void *data) @@ -108,6 +104,16 @@ static int change_memory_common(unsigned long addr, int numpages, if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || pgprot_val(clear_mask) == PTE_RDONLY)) { for (i = 0; i < area->nr_pages; i++) { + unsigned long virt = (unsigned long)page_address(area->pages[i]); + + /* + * Only split the linear mapping when the attribute is + * changed to read only. Other situations do not suffer + * the mapping type. + */ + if (pgprot_val(set_mask) == PTE_RDONLY && can_set_block_and_cont_map()) + split_linear_mapping_after_init(virt, PAGE_SIZE, PAGE_KERNEL); + __change_memory_common((u64)page_address(area->pages[i]), PAGE_SIZE, set_mask, clear_mask); } @@ -162,6 +168,18 @@ int set_memory_valid(unsigned long addr, int numpages, int enable) __pgprot(PTE_VALID)); } +int set_memory_np(unsigned long addr, int numpages) +{ + /* + * If the addr belongs to linear mapping range, split it to pte level + * before changing the attribute of the page table. + */ + if (can_set_block_and_cont_map() && __is_lm_address(addr)) + split_linear_mapping_after_init(addr, PAGE_SIZE * numpages, PAGE_KERNEL); + + return set_memory_valid(addr, numpages, 0); +} + int set_direct_map_invalid_noflush(struct page *page) { struct page_change_data data = { @@ -169,8 +187,9 @@ int set_direct_map_invalid_noflush(struct page *page) .clear_mask = __pgprot(PTE_VALID), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -184,8 +203,9 @@ int set_direct_map_default_noflush(struct page *page) .clear_mask = __pgprot(PTE_RDONLY), }; - if (!can_set_direct_map()) - return 0; + if (can_set_block_and_cont_map()) + split_linear_mapping_after_init((unsigned long)page_address(page), + PAGE_SIZE, PAGE_KERNEL); return apply_to_page_range(&init_mm, (unsigned long)page_address(page), @@ -195,7 +215,7 @@ int set_direct_map_default_noflush(struct page *page) #ifdef CONFIG_DEBUG_PAGEALLOC void __kernel_map_pages(struct page *page, int numpages, int enable) { - if (!can_set_direct_map()) + if (can_set_block_and_cont_map()) return; set_memory_valid((unsigned long)page_address(page), numpages, enable); @@ -219,9 +239,6 @@ bool kernel_page_present(struct page *page) pte_t *ptep; unsigned long addr = (unsigned long)page_address(page); - if (!can_set_direct_map()) - return true; - pgdp = pgd_offset_k(addr); if (pgd_none(READ_ONCE(*pgdp))) return false; diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index dea3dc89234b023e919f6ca3d2866e8e1e9d5bb8..3b0f4c58b07ccf696615db1917b25573aaae6efc 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -56,6 +56,7 @@ HW_DBM KVM_HVHE KVM_PROTECTED_MODE MISMATCHED_CACHE_TYPE +MPAM MTE MTE_ASYMM SME diff --git a/arch/arm64/tools/sysreg b/arch/arm64/tools/sysreg index 76ce150e7347e56e2f497e905c02dc28733e68e3..0e7d7f327410ab33cbdaf090dbae92a2bf149eca 100644 --- a/arch/arm64/tools/sysreg +++ b/arch/arm64/tools/sysreg @@ -2530,6 +2530,22 @@ Res0 1 Field 0 EN EndSysreg +Sysreg MPAMIDR_EL1 3 0 10 4 4 +Res0 63:62 +Field 61 HAS_SDEFLT +Field 60 HAS_FORCE_NS +Field 59 SP4 +Field 58 HAS_TIDR +Field 57 HAS_ALTSP +Res0 56:40 +Field 39:32 PMG_MAX +Res0 31:21 +Field 20:18 VPMR_MAX +Field 17 HAS_HCR +Res0 16 +Field 15:0 PARTID_MAX +EndSysreg + Sysreg LORID_EL1 3 0 10 4 7 Res0 63:24 Field 23:16 LD @@ -2537,6 +2553,22 @@ Res0 15:8 Field 7:0 LR EndSysreg +Sysreg MPAM1_EL1 3 0 10 5 0 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + +Sysreg MPAM0_EL1 3 0 10 5 1 +Res0 63:48 +Field 47:40 PMG_D +Field 39:32 PMG_I +Field 31:16 PARTID_D +Field 15:0 PARTID_I +EndSysreg + Sysreg ISR_EL1 3 0 12 1 0 Res0 63:11 Field 10 IS @@ -2550,6 +2582,7 @@ EndSysreg Sysreg ICC_NMIAR1_EL1 3 0 12 9 5 Res0 63:24 Field 23:0 INTID + EndSysreg Sysreg TRBLIMITR_EL1 3 0 9 11 0 diff --git a/arch/loongarch/Kbuild b/arch/loongarch/Kbuild index b01f5cdb27e03d778dfa400e370037c39cd7abed..beb8499dd8ed84330beecbcd61977df0aa3474f8 100644 --- a/arch/loongarch/Kbuild +++ b/arch/loongarch/Kbuild @@ -3,5 +3,7 @@ obj-y += mm/ obj-y += net/ obj-y += vdso/ +obj-$(CONFIG_KVM) += kvm/ + # for cleaning subdir- += boot diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig index e14396a2ddcbfc6d6130b63dca258343a03f35cb..b313ca390ee4211d338e127d47e996434cbad621 100644 --- a/arch/loongarch/Kconfig +++ b/arch/loongarch/Kconfig @@ -129,6 +129,7 @@ config LOONGARCH select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES + select HAVE_KVM select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI select HAVE_PCI @@ -142,6 +143,7 @@ config LOONGARCH select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_SETUP_PER_CPU_AREA if NUMA select HAVE_STACKPROTECTOR + select ARCH_HAS_PHYS_TO_DMA select HAVE_SYSCALL_TRACEPOINTS select HAVE_TIF_NOHZ select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP @@ -263,6 +265,9 @@ config AS_HAS_LASX_EXTENSION config AS_HAS_LBT_EXTENSION def_bool $(as-instr,movscr2gr \$a0$(comma)\$scr0) +config AS_HAS_LVZ_EXTENSION + def_bool $(as-instr,hvcl 0) + menu "Kernel type and options" source "kernel/Kconfig.hz" @@ -421,7 +426,6 @@ config SMP config HOTPLUG_CPU bool "Support for hot-pluggable CPUs" depends on SMP - select GENERIC_IRQ_MIGRATION help Say Y here to allow turning CPUs off and on. CPUs can be controlled through /sys/devices/system/cpu. @@ -559,6 +563,15 @@ config CPU_HAS_PREFETCH bool default y +config PARAVIRT + bool "Enable paravirtualization code" + depends on AS_HAS_LVZ_EXTENSION + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. However, when run without a hypervisor + the kernel is theoretically slower and slightly larger. + config ARCH_SUPPORTS_KEXEC def_bool y @@ -672,7 +685,10 @@ config ARCH_SUSPEND_POSSIBLE config ARCH_HIBERNATION_POSSIBLE def_bool y +source "drivers/cpufreq/Kconfig" source "kernel/power/Kconfig" source "drivers/acpi/Kconfig" endmenu + +source "arch/loongarch/kvm/Kconfig" diff --git a/arch/loongarch/configs/anolis-debug_defconfig b/arch/loongarch/configs/anolis-debug_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..365f27c124b4bb4d25582a06687db1584b7844c7 --- /dev/null +++ b/arch/loongarch/configs/anolis-debug_defconfig @@ -0,0 +1,8851 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +# CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set +CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=y +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=y + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# +CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y +CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# +CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +# CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/loongarch/configs/anolis_defconfig b/arch/loongarch/configs/anolis_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..365f27c124b4bb4d25582a06687db1584b7844c7 --- /dev/null +++ b/arch/loongarch/configs/anolis_defconfig @@ -0,0 +1,8851 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/loongarch 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +# CONFIG_KERNEL_ZSTD is not set +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_GENERIC_IRQ_CHIP=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_GENERIC_IRQ_MULTI_HANDLER=y +CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +CONFIG_NO_HZ_IDLE=y +# CONFIG_NO_HZ_FULL is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT_ALWAYS_ON is not set +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_USERMODE_DRIVER=y +# CONFIG_BPF_PRELOAD is not set +# CONFIG_BPF_LSM is not set +# end of BPF subsystem + +CONFIG_PREEMPT_VOLUNTARY_BUILD=y +# CONFIG_PREEMPT_NONE is not set +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_PREEMPT is not set +# CONFIG_SCHED_CORE is not set + +# +# CPU/Task time and stats accounting +# +CONFIG_TICK_CPU_ACCOUNTING=y +# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +# CONFIG_PSI_DEFAULT_DISABLED is not set +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +# end of RCU Subsystem + +# CONFIG_IKCONFIG is not set +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_GENERIC_SCHED_CLOCK=y + +# +# Scheduler features +# +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN=y +CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW=y +CONFIG_EXPERT=y +CONFIG_MULTIUSER=y +# CONFIG_SGETMASK_SYSCALL is not set +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +# CONFIG_DEBUG_RSEQ is not set +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_PERF_USE_VMALLOC=y +# CONFIG_PC104 is not set + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +# end of Kexec and crash features +# end of General setup + +CONFIG_LOONGARCH=y +CONFIG_64BIT=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_L1_CACHE_SHIFT=6 +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MACH_LOONGSON64=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_PGTABLE_3LEVEL=y +CONFIG_PGTABLE_LEVELS=3 +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_AS_HAS_EXPLICIT_RELOCS=y +CONFIG_AS_HAS_FCSR_CLASS=y +CONFIG_AS_HAS_LSX_EXTENSION=y +CONFIG_AS_HAS_LASX_EXTENSION=y +CONFIG_AS_HAS_LBT_EXTENSION=y +CONFIG_AS_HAS_LVZ_EXTENSION=y + +# +# Kernel type and options +# +# CONFIG_HZ_100 is not set +CONFIG_HZ_250=y +# CONFIG_HZ_300 is not set +# CONFIG_HZ_1000 is not set +CONFIG_HZ=250 +CONFIG_SCHED_HRTICK=y +# CONFIG_4KB_3LEVEL is not set +# CONFIG_4KB_4LEVEL is not set +# CONFIG_16KB_2LEVEL is not set +CONFIG_16KB_3LEVEL=y +# CONFIG_64KB_2LEVEL is not set +# CONFIG_64KB_3LEVEL is not set +CONFIG_CMDLINE="" +CONFIG_CMDLINE_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set +# CONFIG_CMDLINE_FORCE is not set +CONFIG_DMI=y +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_SCHED_SMT=y +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_NR_CPUS=256 +CONFIG_NUMA=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_FORCE_MAX_ORDER=11 +CONFIG_ARCH_IOREMAP=y +CONFIG_ARCH_WRITECOMBINE=y +CONFIG_ARCH_STRICT_ALIGN=y +CONFIG_CPU_HAS_FPU=y +CONFIG_CPU_HAS_LSX=y +CONFIG_CPU_HAS_LASX=y +CONFIG_CPU_HAS_LBT=y +CONFIG_CPU_HAS_PREFETCH=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SELECTS_CRASH_DUMP=y +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_RANDOMIZE_BASE_MAX_OFFSET=0x01000000 +CONFIG_SECCOMP=y +# end of Kernel type and options + +CONFIG_ARCH_SELECT_MEMORY_MODEL=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_ARCH_MEMORY_PROBE=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=12 +CONFIG_ARCH_MMAP_RND_BITS_MAX=18 +CONFIG_ARCH_SUPPORTS_UPROBES=y + +# +# Power management options +# +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +# CONFIG_CPU_FREQ_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set +# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set +# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set + +# +# CPU frequency scaling drivers +# +# CONFIG_CPUFREQ_DT is not set +# CONFIG_CPUFREQ_DT_PLATDEV is not set +CONFIG_LOONGSON3_ACPI_CPUFREQ=y +# end of CPU Frequency scaling + +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_GENERIC_GSI=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_SLEEP=y +# CONFIG_ACPI_EC_DEBUGFS is not set +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=y +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=y +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_MCFG=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_THERMAL=y +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +# CONFIG_ACPI_HED is not set +# CONFIG_ACPI_CUSTOM_METHOD is not set +# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set +CONFIG_ACPI_NUMA=y +# CONFIG_ACPI_HMAT is not set +CONFIG_ACPI_WATCHDOG=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PPTT=y +# CONFIG_ACPI_FFH is not set +# CONFIG_PMIC_OPREGION is not set +# end of Power management options + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y + +# +# General architecture-dependent options +# +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_64BIT_ALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_TIF_NOHZ=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=12 +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y +# CONFIG_COMPAT_32BIT_TIME is not set +CONFIG_ARCH_HAS_PHYS_TO_DMA=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT=0 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +CONFIG_MODULE_SIG_ALL=y +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +# CONFIG_TRIM_UNUSED_KSYMS is not set +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_MQ=y +# CONFIG_BLK_CGROUP_IOLATENCY is not set +# CONFIG_BLK_CGROUP_FC_APPID is not set +# CONFIG_BLK_CGROUP_IOCOST is not set +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +# CONFIG_OSF_PARTITION is not set +# CONFIG_AMIGA_PARTITION is not set +# CONFIG_ATARI_PARTITION is not set +# CONFIG_MAC_PARTITION is not set +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +# CONFIG_MINIX_SUBPARTITION is not set +# CONFIG_SOLARIS_X86_PARTITION is not set +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +# CONFIG_SGI_PARTITION is not set +# CONFIG_ULTRIX_PARTITION is not set +# CONFIG_SUN_PARTITION is not set +# CONFIG_KARMA_PARTITION is not set +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y +CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK=y +CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_READ_LOCK=y +CONFIG_ARCH_INLINE_READ_LOCK_BH=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_READ_UNLOCK=y +CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_INLINE_WRITE_LOCK=y +CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_SPIN_TRYLOCK=y +CONFIG_INLINE_SPIN_TRYLOCK_BH=y +CONFIG_INLINE_SPIN_LOCK=y +CONFIG_INLINE_SPIN_LOCK_BH=y +CONFIG_INLINE_SPIN_LOCK_IRQ=y +CONFIG_INLINE_SPIN_LOCK_IRQSAVE=y +CONFIG_INLINE_SPIN_UNLOCK_BH=y +CONFIG_INLINE_SPIN_UNLOCK_IRQ=y +CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_READ_LOCK=y +CONFIG_INLINE_READ_LOCK_BH=y +CONFIG_INLINE_READ_LOCK_IRQ=y +CONFIG_INLINE_READ_LOCK_IRQSAVE=y +CONFIG_INLINE_READ_UNLOCK=y +CONFIG_INLINE_READ_UNLOCK_BH=y +CONFIG_INLINE_READ_UNLOCK_IRQ=y +CONFIG_INLINE_READ_UNLOCK_IRQRESTORE=y +CONFIG_INLINE_WRITE_LOCK=y +CONFIG_INLINE_WRITE_LOCK_BH=y +CONFIG_INLINE_WRITE_LOCK_IRQ=y +CONFIG_INLINE_WRITE_LOCK_IRQSAVE=y +CONFIG_INLINE_WRITE_UNLOCK=y +CONFIG_INLINE_WRITE_UNLOCK_BH=y +CONFIG_INLINE_WRITE_UNLOCK_IRQ=y +CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_ARCH_BINFMT_ELF_STATE=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="zstd" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +CONFIG_Z3FOLD=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLUB_TINY is not set +CONFIG_SLAB_MERGE_DEFAULT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_SELECT_MEMORY_MODEL=y +CONFIG_SPARSEMEM_MANUAL=y +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_ARCH_KEEP_MEMBLOCK=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +# CONFIG_READ_ONLY_THP_FOR_FS is not set +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ZONE_DMA32=y +CONFIG_HMM_MIRROR=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MEMFD_CREATE=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +# CONFIG_LRU_GEN is not set +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +# CONFIG_DAMON is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_XFRM_ESPINTCP=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_IP_PNP_RARP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +CONFIG_NET_FOU=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_RENO=y +CONFIG_DEFAULT_TCP_CONG="reno" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_FOU=m +CONFIG_IPV6_FOU_TUNNEL=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +# CONFIG_NF_CONNTRACK_PROCFS is not set +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_MATCH_SRH=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BPFILTER=y +CONFIG_BPFILTER_UMH=m +CONFIG_IP_DCCP=m +CONFIG_INET_DCCP_DIAG=m + +# +# DCCP CCIDs Configuration +# +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_TFRC_LIB=y +CONFIG_IP_DCCP_TFRC_DEBUG=y +# end of DCCP CCIDs Configuration + +# +# DCCP Kernel Hacking +# +CONFIG_IP_DCCP_DEBUG=y +# end of DCCP Kernel Hacking + +CONFIG_IP_SCTP=m +CONFIG_SCTP_DBG_OBJCNT=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y +CONFIG_L2TP=m +# CONFIG_L2TP_DEBUGFS is not set +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +# CONFIG_BRIDGE_CFM is not set +CONFIG_NET_DSA=m +# CONFIG_NET_DSA_TAG_NONE is not set +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM_COMMON=m +CONFIG_NET_DSA_TAG_BRCM=m +# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +# CONFIG_NET_DSA_TAG_HELLCREEK is not set +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA_COMMON=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +# CONFIG_NET_DSA_TAG_RTL8_4 is not set +# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m +# CONFIG_NET_DSA_TAG_XRS700X is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_MQPRIO_LIB=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_FQ_PIE is not set +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +# CONFIG_NET_EMATCH_CANID is not set +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_BATMAN_V=y +CONFIG_BATMAN_ADV_BLA=y +CONFIG_BATMAN_ADV_DAT=y +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_MCAST=y +CONFIG_BATMAN_ADV_DEBUG=y +# CONFIG_BATMAN_ADV_TRACING is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y +# CONFIG_NCSI_OEM_CMD_KEEP_PHY is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +# CONFIG_NET_DROP_MONITOR is not set +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +CONFIG_CAN=m +CONFIG_CAN_RAW=m +CONFIG_CAN_BCM=m +CONFIG_CAN_GW=m +# CONFIG_CAN_J1939 is not set +# CONFIG_CAN_ISOTP is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +# CONFIG_BT_HCIBTUSB_BCM is not set +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_WEXT_CORE=y +CONFIG_WEXT_PROC=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +# CONFIG_CFG80211_CERTIFICATION_ONUS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +CONFIG_CFG80211_WEXT=y +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +# CONFIG_MAC80211_DEBUGFS is not set +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_FD=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_NET_9P_RDMA is not set +# CONFIG_NET_9P_DEBUG is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCI_DOMAINS_GENERIC=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +# CONFIG_PCIE_EDR is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_MSI_ARCH_FALLBACKS=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m +CONFIG_PCI_ATS=y +CONFIG_PCI_ECAM=y +CONFIG_PCI_IOV=y +# CONFIG_PCI_PRI is not set +# CONFIG_PCI_PASID is not set +CONFIG_PCI_LABEL=y +# CONFIG_PCI_DYNAMIC_OF_NODES is not set +# CONFIG_PCIE_BUS_TUNE_OFF is not set +CONFIG_PCIE_BUS_DEFAULT=y +# CONFIG_PCIE_BUS_SAFE is not set +# CONFIG_PCIE_BUS_PERFORMANCE is not set +# CONFIG_PCIE_BUS_PEER2PEER is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +# CONFIG_HOTPLUG_PCI_ACPI_IBM is not set +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +# CONFIG_PCI_FTPCI100 is not set +# CONFIG_PCI_HOST_GENERIC is not set +CONFIG_PCI_LOONGSON=y +# CONFIG_PCIE_MICROCHIP_HOST is not set +# CONFIG_PCIE_XILINX is not set + +# +# Cadence-based PCIe controllers +# +# CONFIG_PCIE_CADENCE_PLAT_HOST is not set +# CONFIG_PCI_J721E_HOST is not set +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +# CONFIG_CXL_BUS is not set +CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +CONFIG_RAPIDIO=y +CONFIG_RAPIDIO_TSI721=y +CONFIG_RAPIDIO_DISC_TIMEOUT=30 +CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS=y +# CONFIG_RAPIDIO_DMA_ENGINE is not set +# CONFIG_RAPIDIO_DEBUG is not set +CONFIG_RAPIDIO_ENUM_BASIC=m +CONFIG_RAPIDIO_CHMAN=m +CONFIG_RAPIDIO_MPORT_CDEV=m + +# +# RapidIO Switch drivers +# +# CONFIG_RAPIDIO_CPS_XX is not set +# CONFIG_RAPIDIO_CPS_GEN2 is not set +# CONFIG_RAPIDIO_RXS_GEN3 is not set +# end of RapidIO Switch drivers + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +CONFIG_UEVENT_HELPER=y +CONFIG_UEVENT_HELPER_PATH="" +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_EXTRA_FIRMWARE="" +# CONFIG_FW_LOADER_USER_HELPER is not set +CONFIG_FW_LOADER_COMPRESS=y +CONFIG_FW_LOADER_COMPRESS_XZ=y +# CONFIG_FW_LOADER_COMPRESS_ZSTD is not set +CONFIG_FW_CACHE=y +# CONFIG_FW_UPLOAD is not set +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_SOC_BUS=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_REGMAP_MMIO=y +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MOXTET is not set +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +# CONFIG_FIRMWARE_MEMMAP is not set +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT=m +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=m +# CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE is not set +CONFIG_EFI_RUNTIME_WRAPPERS=y +CONFIG_EFI_GENERIC_STUB=y +CONFIG_EFI_ZBOOT=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m +# CONFIG_RESET_ATTACK_MITIGATION is not set +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +# CONFIG_EFI_COCO_SECRET is not set +# end of EFI (Extensible Firmware Interface) Support + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +CONFIG_MTD_OF_PARTS=m +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +CONFIG_MTD_CFI=m +CONFIG_MTD_JEDECPROBE=m +CONFIG_MTD_GEN_PROBE=m +# CONFIG_MTD_CFI_ADV_OPTIONS is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI_INTELEXT=m +CONFIG_MTD_CFI_AMDSTD=m +CONFIG_MTD_CFI_STAA=m +CONFIG_MTD_CFI_UTIL=m +CONFIG_MTD_RAM=m +CONFIG_MTD_ROM=m +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +CONFIG_MTD_BLOCK2MTD=m + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +CONFIG_MTD_SPI_NOR=m +CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y +# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set +CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y +# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_MTD_UBI_BLOCK=y +# CONFIG_MTD_HYPERBUS is not set +CONFIG_DTC=y +CONFIG_OF=y +# CONFIG_OF_UNITTEST is not set +CONFIG_OF_FLATTREE=y +CONFIG_OF_EARLY_FLATTREE=y +CONFIG_OF_KOBJ=y +CONFIG_OF_ADDRESS=y +CONFIG_OF_IRQ=y +CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +CONFIG_PARPORT_PC_FIFO=y +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +# CONFIG_ZRAM_DEF_COMP_LZORLE is not set +CONFIG_ZRAM_DEF_COMP_ZSTD=y +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +# CONFIG_ZRAM_DEF_COMP_842 is not set +CONFIG_ZRAM_DEF_COMP="zstd" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_DRBD=m +# CONFIG_DRBD_FAULT_INJECTION is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=8192 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_VIRTIO_BLK=m +CONFIG_BLK_DEV_RBD=m +# CONFIG_BLK_DEV_UBLK is not set + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +CONFIG_NVME_TARGET_PASSTHRU=y +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +# CONFIG_HP_ILO is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_HISI_HIKEY_USB is not set +# CONFIG_OPEN_DICE is not set +# CONFIG_VCPU_STALL_DETECTOR is not set +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +CONFIG_EEPROM_AT24=m +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_ALTERA_STAPL=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=y +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=y +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +CONFIG_SCSI_MVSAS=y +# CONFIG_SCSI_MVSAS_DEBUG is not set +CONFIG_SCSI_MVSAS_TASKLET=y +CONFIG_SCSI_MVUMI=y +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +CONFIG_MEGARAID_NEWGEN=y +CONFIG_MEGARAID_MM=y +CONFIG_MEGARAID_MAILBOX=y +CONFIG_MEGARAID_LEGACY=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +# CONFIG_SCSI_MPI3MR is not set +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_FC=m +CONFIG_TCM_QLA2XXX=m +# CONFIG_TCM_QLA2XXX_DEBUG is not set +CONFIG_SCSI_QLA_ISCSI=m +# CONFIG_SCSI_LPFC is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=y +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=y +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=y +# CONFIG_AHCI_DWC is not set +# CONFIG_AHCI_CEVA is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +# CONFIG_SATA_ZHAOXIN is not set + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +CONFIG_PATA_ATIIXP=y +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_OF_PLATFORM is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +# CONFIG_MD_CLUSTER is not set +CONFIG_BCACHE=m +# CONFIG_BCACHE_DEBUG is not set +# CONFIG_BCACHE_CLOSURES_DEBUG is not set +# CONFIG_BCACHE_ASYNC_REGISTRATION is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +# CONFIG_DM_DEBUG is not set +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_NTB_NETDEV=m +CONFIG_RIONET=m +CONFIG_RIONET_TX_SIZE=128 +CONFIG_RIONET_RX_SIZE=128 +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set + +# +# Distributed Switch Architecture drivers +# +# CONFIG_B53 is not set +# CONFIG_NET_DSA_BCM_SF2 is not set +# CONFIG_NET_DSA_LOOP is not set +# CONFIG_NET_DSA_HIRSCHMANN_HELLCREEK is not set +# CONFIG_NET_DSA_LANTIQ_GSWIP is not set +# CONFIG_NET_DSA_MT7530 is not set +# CONFIG_NET_DSA_MV88E6060 is not set +# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set +# CONFIG_NET_DSA_MV88E6XXX is not set +# CONFIG_NET_DSA_AR9331 is not set +# CONFIG_NET_DSA_QCA8K is not set +# CONFIG_NET_DSA_SJA1105 is not set +# CONFIG_NET_DSA_XRS700X_I2C is not set +# CONFIG_NET_DSA_XRS700X_MDIO is not set +# CONFIG_NET_DSA_REALTEK is not set +# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set +# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set +# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set +# end of Distributed Switch Architecture drivers + +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +# CONFIG_NET_VENDOR_ATHEROS is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=y +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +CONFIG_NET_VENDOR_CADENCE=y +# CONFIG_MACB is not set +# CONFIG_NET_VENDOR_CAVIUM is not set +CONFIG_NET_VENDOR_CHELSIO=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +CONFIG_CHELSIO_T3=m +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +# CONFIG_CRYPTO_DEV_CHELSIO_TLS is not set +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_NET_VENDOR_HUAWEI=y +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_FM10K=m +# CONFIG_IGC is not set +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_LITEX_LITEETH is not set +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +CONFIG_MLX5_CORE_IPOIB=y +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_NET_VENDOR_NETERION=y +# CONFIG_S2IO is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m +CONFIG_NET_VENDOR_PACKET_ENGINES=y +# CONFIG_HAMACHI is not set +# CONFIG_YELLOWFIN is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +CONFIG_NET_VENDOR_STMICRO=y +CONFIG_STMMAC_ETH=y +# CONFIG_STMMAC_SELFTESTS is not set +CONFIG_STMMAC_PLATFORM=y +# CONFIG_DWMAC_DWC_QOS_ETH is not set +CONFIG_DWMAC_GENERIC=y +# CONFIG_DWMAC_INTEL_PLAT is not set +CONFIG_DWMAC_LOONGSON=m +# CONFIG_STMMAC_PCI is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_NET_VENDOR_XILINX is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=y +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_PHYLIB_LEDS=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=y + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +# CONFIG_AX88796B_PHY is not set +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +# CONFIG_PSE_CONTROLLER is not set +CONFIG_CAN_DEV=m +CONFIG_CAN_VCAN=m +# CONFIG_CAN_VXCAN is not set +CONFIG_CAN_NETLINK=y +CONFIG_CAN_CALC_BITTIMING=y +# CONFIG_CAN_CAN327 is not set +# CONFIG_CAN_FLEXCAN is not set +# CONFIG_CAN_GRCAN is not set +# CONFIG_CAN_KVASER_PCIEFD is not set +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +# CONFIG_CAN_CC770_ISA is not set +CONFIG_CAN_CC770_PLATFORM=m +# CONFIG_CAN_CTUCANFD_PCI is not set +# CONFIG_CAN_CTUCANFD_PLATFORM is not set +# CONFIG_CAN_IFI_CANFD is not set +# CONFIG_CAN_M_CAN is not set +# CONFIG_CAN_PEAK_PCIEFD is not set +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +# CONFIG_CAN_F81601 is not set +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PEAK_PCIEC=y +CONFIG_CAN_PLX_PCI=m +# CONFIG_CAN_SJA1000_ISA is not set +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m + +# +# CAN SPI interfaces +# +# CONFIG_CAN_HI311X is not set +# CONFIG_CAN_MCP251X is not set +# CONFIG_CAN_MCP251XFD is not set +# end of CAN SPI interfaces + +# +# CAN USB interfaces +# +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +# CONFIG_CAN_ESD_USB is not set +# CONFIG_CAN_ETAS_ES58X is not set +# CONFIG_CAN_F81604 is not set +# CONFIG_CAN_GS_USB is not set +CONFIG_CAN_KVASER_USB=m +# CONFIG_CAN_MCBA_USB is not set +CONFIG_CAN_PEAK_USB=m +# CONFIG_CAN_UCAN is not set +# end of CAN USB interfaces + +# CONFIG_CAN_DEBUG_DEVICES is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_OF_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +# CONFIG_MDIO_BCM_UNIMAC is not set +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +# CONFIG_MDIO_HISI_FEMAC is not set +CONFIG_MDIO_I2C=y +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_MSCC_MIIM=m +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_IPQ4019 is not set +# CONFIG_MDIO_IPQ8064 is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# +# CONFIG_MDIO_BUS_MUX_GPIO is not set +# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=y +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +# CONFIG_USB_NET_AX8817X is not set +# CONFIG_USB_NET_AX88179_178A is not set +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +# CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +# CONFIG_USB_BELKIN is not set +# CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +# CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +# CONFIG_USB_CDC_PHONET is not set +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +# CONFIG_ATH9K_DEBUGFS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_AHB is not set +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +# CONFIG_RTW88 is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +CONFIG_WLAN_VENDOR_ZYDAS=y +# CONFIG_USB_ZD1201 is not set +CONFIG_ZD1211RW=m +# CONFIG_ZD1211RW_DEBUG is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +# CONFIG_HDLC_X25 is not set +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +# CONFIG_LAPBETHER is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=y +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_OMAP4 is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +CONFIG_KEYBOARD_XTKBD=m +# CONFIG_KEYBOARD_CAP11XX is not set +# CONFIG_KEYBOARD_BCM is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=y +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_AR1021_I2C is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8318 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_IMX6UL_TSC is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_WM97XX is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SUR40 is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_ATMEL_CAPTOUCH is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +# CONFIG_INPUT_MMA8450 is not set +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +CONFIG_RMI4_F34=y +# CONFIG_RMI4_F3A is not set +# CONFIG_RMI4_F54 is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=m +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +# CONFIG_SERIO_APBPS2 is not set +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +CONFIG_LEGACY_PTYS=y +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +CONFIG_SERIAL_8250_16550A_VARIANTS=y +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=16 +CONFIG_SERIAL_8250_RUNTIME_UARTS=16 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_PERICOM=y +# CONFIG_SERIAL_OF_PLATFORM is not set + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_SIFIVE is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +# CONFIG_SERIAL_XILINX_PS_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +# CONFIG_SERIAL_DEV_BUS is not set +# CONFIG_TTY_PRINTK is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=y +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIRTIO=m +# CONFIG_HW_RANDOM_CCTRNG is not set +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +CONFIG_DEVMEM=y +CONFIG_DEVPORT=y +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=m +# CONFIG_TCG_TIS is not set +CONFIG_TCG_TIS_SPI=m +# CONFIG_TCG_TIS_SPI_CR50 is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=y +CONFIG_ACPI_I2C_OPREGION=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_MUX is not set +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=y +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +# CONFIG_I2C_I801 is not set +CONFIG_I2C_ISCH=m +CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +# CONFIG_I2C_ZHAOXIN is not set + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m +# CONFIG_I2C_ZHAOXIN_SMBUS is not set + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=y +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +CONFIG_I2C_GPIO=y +# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set +CONFIG_I2C_LS2X=m +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +# CONFIG_I2C_RK3X is not set +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +CONFIG_SPI_MEM=y + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_CADENCE_XSPI is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +CONFIG_SPI_LOONGSON_CORE=y +CONFIG_SPI_LOONGSON_PCI=y +CONFIG_SPI_LOONGSON_PLATFORM=m +# CONFIG_SPI_FSL_SPI is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_SN_F_OSPI is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +CONFIG_PINCTRL_LOONGSON2=y +# CONFIG_PINCTRL_MCP23S08 is not set +# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set +# CONFIG_PINCTRL_OCELOT is not set +# CONFIG_PINCTRL_SINGLE is not set +# CONFIG_PINCTRL_STMFX is not set +# CONFIG_PINCTRL_SX150X is not set + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_OF_GPIO=y +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_SYSFS=y +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=y + +# +# Memory mapped GPIO drivers +# +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_CADENCE is not set +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_FTGPIO010 is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +# CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_HLWD is not set +# CONFIG_GPIO_LOGICVC is not set +CONFIG_GPIO_LOONGSON_64BIT=y +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_SIFIVE is not set +# CONFIG_GPIO_SYSCON is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_ADNP is not set +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_GW_PLD is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_GPIO is not set +# CONFIG_POWER_RESET_GPIO_RESTART is not set +# CONFIG_POWER_RESET_LTC2952 is not set +# CONFIG_POWER_RESET_RESTART is not set +# CONFIG_POWER_RESET_SYSCON is not set +# CONFIG_POWER_RESET_SYSCON_POWEROFF is not set +# CONFIG_SYSCON_REBOOT_MODE is not set +# CONFIG_NVMEM_REBOOT_MODE is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_DETECTOR_MAX14656 is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_PWM_FAN is not set +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y +# CONFIG_THERMAL_WRITABLE_TRIPS is not set +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +# CONFIG_THERMAL_GOV_BANG_BANG is not set +# CONFIG_THERMAL_GOV_USER_SPACE is not set +# CONFIG_CPU_THERMAL is not set +# CONFIG_DEVFREQ_THERMAL is not set +CONFIG_THERMAL_EMULATION=y +# CONFIG_THERMAL_MMIO is not set +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_ACT8945A is not set +# CONFIG_MFD_AS3711 is not set +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_AS3722 is not set +# CONFIG_PMIC_ADP5520 is not set +# CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_MAX5970 is not set +# CONFIG_PMIC_DA903X is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9052_I2C is not set +# CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_GATEWORKS_GSC is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_LPC_ICH is not set +CONFIG_LPC_SCH=m +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_88PM860X is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77541 is not set +# CONFIG_MFD_MAX77620 is not set +# CONFIG_MFD_MAX77650 is not set +# CONFIG_MFD_MAX77686 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77714 is not set +# CONFIG_MFD_MAX77843 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MAX8925 is not set +# CONFIG_MFD_MAX8997 is not set +# CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +# CONFIG_MFD_CPCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_NTXEC is not set +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_RC5T583 is not set +# CONFIG_MFD_RK8XX_I2C is not set +# CONFIG_MFD_RK8XX_SPI is not set +# CONFIG_MFD_RN5T618 is not set +# CONFIG_MFD_SEC_CORE is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_STMPE is not set +CONFIG_MFD_SYSCON=y +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_LP8788 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_MFD_PALMAS is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TPS65090 is not set +# CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TI_LP87565 is not set +# CONFIG_MFD_TPS65218 is not set +# CONFIG_MFD_TPS65219 is not set +# CONFIG_MFD_TPS6586X is not set +# CONFIG_MFD_TPS65910 is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_TWL4030_CORE is not set +# CONFIG_TWL6040_CORE is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TC3589X is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_LOCHNAGAR is not set +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM8400 is not set +# CONFIG_MFD_WM831X_I2C is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8350_I2C is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ROHM_BD718XX is not set +# CONFIG_MFD_ROHM_BD71828 is not set +# CONFIG_MFD_ROHM_BD957XMUF is not set +# CONFIG_MFD_STPMIC1 is not set +# CONFIG_MFD_STMFX is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_QCOM_PM8008 is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# CONFIG_MFD_RSMU_I2C is not set +# CONFIG_MFD_RSMU_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +CONFIG_LIRC=y +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +CONFIG_IR_SHARP_DECODER=m +CONFIG_IR_SONY_DECODER=m +CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_GPIO_CIR is not set +# CONFIG_IR_GPIO_TX is not set +# CONFIG_IR_HIX5HD2 is not set +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +# CONFIG_IR_PWM_TX is not set +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +# CONFIG_IR_SPI is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +# CONFIG_MEDIA_SUPPORT_FILTER is not set +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set + +# +# Media device types +# +CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_ANALOG_TV_SUPPORT=y +CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y +CONFIG_MEDIA_RADIO_SUPPORT=y +CONFIG_MEDIA_SDR_SUPPORT=y +CONFIG_MEDIA_PLATFORM_SUPPORT=y +CONFIG_MEDIA_TEST_SUPPORT=y +# end of Media device types + +# +# Media core support +# +CONFIG_VIDEO_DEV=m +CONFIG_MEDIA_CONTROLLER=y +CONFIG_DVB_CORE=m +# end of Media core support + +# +# Video4Linux options +# +CONFIG_VIDEO_V4L2_I2C=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y +# CONFIG_VIDEO_ADV_DEBUG is not set +# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set +CONFIG_VIDEO_TUNER=m +CONFIG_V4L2_FWNODE=m +CONFIG_V4L2_ASYNC=m +# end of Video4Linux options + +# +# Media controller options +# +CONFIG_MEDIA_CONTROLLER_DVB=y +# end of Media controller options + +# +# Digital TV options +# +# CONFIG_DVB_MMAP is not set +CONFIG_DVB_NET=y +CONFIG_DVB_MAX_ADAPTERS=8 +CONFIG_DVB_DYNAMIC_MINORS=y +# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set +# CONFIG_DVB_ULE_DEBUG is not set +# end of Digital TV options + +# +# Media drivers +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y + +# +# Webcam devices +# +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +# CONFIG_USB_GSPCA_DTCS033 is not set +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +# CONFIG_USB_GSPCA_KINECT is not set +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +# CONFIG_USB_GSPCA_STK1135 is not set +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +# CONFIG_USB_GSPCA_TOUPTEK is not set +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +# CONFIG_USB_PWC_DEBUG is not set +CONFIG_USB_PWC_INPUT_EVDEV=y +CONFIG_USB_S2255=m +# CONFIG_VIDEO_USBTV is not set +CONFIG_USB_VIDEO_CLASS=m +CONFIG_USB_VIDEO_CLASS_INPUT_EVDEV=y + +# +# Analog TV USB devices +# +# CONFIG_VIDEO_GO7007 is not set +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_PVRUSB2_SYSFS=y +CONFIG_VIDEO_PVRUSB2_DVB=y +# CONFIG_VIDEO_PVRUSB2_DEBUGIFC is not set +# CONFIG_VIDEO_STK1160 is not set + +# +# Analog/digital TV USB devices +# +CONFIG_VIDEO_AU0828=m +CONFIG_VIDEO_AU0828_V4L2=y +# CONFIG_VIDEO_AU0828_RC is not set + +# +# Digital TV USB devices +# +# CONFIG_DVB_AS102 is not set +CONFIG_DVB_B2C2_FLEXCOP_USB=m +# CONFIG_DVB_B2C2_FLEXCOP_USB_DEBUG is not set +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +# CONFIG_DVB_USB_DVBSKY is not set +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +# CONFIG_DVB_USB_ZD1301 is not set +CONFIG_DVB_USB=m +# CONFIG_DVB_USB_DEBUG is not set +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +# CONFIG_DVB_USB_CXUSB_ANALOG is not set +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIB3000MC=m +CONFIG_DVB_USB_DIBUSB_MB=m +# CONFIG_DVB_USB_DIBUSB_MB_FAULTY is not set +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m + +# +# Webcam, TV (analog/digital) USB devices +# +CONFIG_VIDEO_EM28XX=m +# CONFIG_VIDEO_EM28XX_V4L2 is not set +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m +CONFIG_VIDEO_EM28XX_RC=m + +# +# Software defined radio USB devices +# +# CONFIG_USB_AIRSPY is not set +# CONFIG_USB_HACKRF is not set +# CONFIG_USB_MSI2500 is not set +CONFIG_MEDIA_PCI_SUPPORT=y + +# +# Media capture support +# +# CONFIG_VIDEO_SOLO6X10 is not set +# CONFIG_VIDEO_TW5864 is not set +# CONFIG_VIDEO_TW68 is not set +# CONFIG_VIDEO_TW686X is not set +# CONFIG_VIDEO_ZORAN is not set + +# +# Media capture/analog TV support +# +# CONFIG_VIDEO_DT3155 is not set +CONFIG_VIDEO_IVTV=m +# CONFIG_VIDEO_IVTV_ALSA is not set +CONFIG_VIDEO_FB_IVTV=m +# CONFIG_VIDEO_HEXIUM_GEMINI is not set +# CONFIG_VIDEO_HEXIUM_ORION is not set +# CONFIG_VIDEO_MXB is not set + +# +# Media capture/analog/hybrid TV support +# +CONFIG_VIDEO_BT848=m +CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +# CONFIG_VIDEO_CX18_ALSA is not set +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +# CONFIG_VIDEO_CX25821 is not set +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_CX88_MPEG=m +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_RC=y +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m + +# +# Media digital TV PCI Adapters +# +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +# CONFIG_DVB_B2C2_FLEXCOP_PCI_DEBUG is not set +CONFIG_DVB_DDBRIDGE=m +# CONFIG_DVB_DDBRIDGE_MSIENABLE is not set +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +# CONFIG_DVB_NETUP_UNIDVB is not set +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +# CONFIG_DVB_PT3 is not set +# CONFIG_DVB_SMIPCIE is not set +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +# CONFIG_IPU_BRIDGE is not set +CONFIG_RADIO_ADAPTERS=m +# CONFIG_RADIO_MAXIRADIO is not set +# CONFIG_RADIO_SAA7706H is not set +# CONFIG_RADIO_SHARK is not set +# CONFIG_RADIO_SHARK2 is not set +# CONFIG_RADIO_SI4713 is not set +CONFIG_RADIO_TEA575X=m +# CONFIG_RADIO_TEA5764 is not set +# CONFIG_RADIO_TEF6862 is not set +# CONFIG_RADIO_WL1273 is not set +# CONFIG_USB_DSBR is not set +# CONFIG_USB_KEENE is not set +# CONFIG_USB_MA901 is not set +# CONFIG_USB_MR800 is not set +# CONFIG_USB_RAREMONO is not set +# CONFIG_RADIO_SI470X is not set +CONFIG_MEDIA_PLATFORM_DRIVERS=y +# CONFIG_V4L_PLATFORM_DRIVERS is not set +# CONFIG_SDR_PLATFORM_DRIVERS is not set +# CONFIG_DVB_PLATFORM_DRIVERS is not set +# CONFIG_V4L_MEM2MEM_DRIVERS is not set + +# +# Allegro DVT media platform drivers +# + +# +# Amlogic media platform drivers +# + +# +# Amphion drivers +# + +# +# Aspeed media platform drivers +# + +# +# Atmel media platform drivers +# + +# +# Cadence media platform drivers +# +# CONFIG_VIDEO_CADENCE_CSI2RX is not set +# CONFIG_VIDEO_CADENCE_CSI2TX is not set + +# +# Chips&Media media platform drivers +# + +# +# Intel media platform drivers +# + +# +# Marvell media platform drivers +# + +# +# Mediatek media platform drivers +# + +# +# Microchip Technology, Inc. media platform drivers +# + +# +# NVidia media platform drivers +# + +# +# NXP media platform drivers +# + +# +# Qualcomm media platform drivers +# + +# +# Renesas media platform drivers +# + +# +# Rockchip media platform drivers +# + +# +# Samsung media platform drivers +# + +# +# STMicroelectronics media platform drivers +# + +# +# Sunxi media platform drivers +# + +# +# Texas Instruments drivers +# + +# +# Verisilicon media platform drivers +# + +# +# VIA media platform drivers +# + +# +# Xilinx media platform drivers +# + +# +# MMC/SDIO DVB adapters +# +CONFIG_SMS_SDIO_DRV=m +# CONFIG_V4L_TEST_DRIVERS is not set +# CONFIG_DVB_TEST_DRIVERS is not set + +# +# FireWire (IEEE 1394) Adapters +# +CONFIG_DVB_FIREDTV=m +CONFIG_DVB_FIREDTV_INPUT=y +CONFIG_MEDIA_COMMON_OPTIONS=y + +# +# common driver options +# +CONFIG_CYPRESS_FIRMWARE=m +CONFIG_TTPCI_EEPROM=m +CONFIG_UVC_COMMON=m +CONFIG_VIDEO_CX2341X=m +CONFIG_VIDEO_TVEEPROM=m +CONFIG_DVB_B2C2_FLEXCOP=m +CONFIG_VIDEO_SAA7146=m +CONFIG_VIDEO_SAA7146_VV=m +CONFIG_SMS_SIANO_MDTV=m +CONFIG_SMS_SIANO_RC=y +# CONFIG_SMS_SIANO_DEBUGFS is not set +CONFIG_VIDEOBUF2_CORE=m +CONFIG_VIDEOBUF2_V4L2=m +CONFIG_VIDEOBUF2_MEMOPS=m +CONFIG_VIDEOBUF2_VMALLOC=m +CONFIG_VIDEOBUF2_DMA_SG=m +CONFIG_VIDEOBUF2_DVB=m +# end of Media drivers + +# +# Media ancillary drivers +# +CONFIG_MEDIA_ATTACH=y +CONFIG_VIDEO_IR_I2C=m +CONFIG_VIDEO_CAMERA_SENSOR=y +# CONFIG_VIDEO_AR0521 is not set +# CONFIG_VIDEO_HI556 is not set +# CONFIG_VIDEO_HI846 is not set +# CONFIG_VIDEO_HI847 is not set +# CONFIG_VIDEO_IMX208 is not set +# CONFIG_VIDEO_IMX214 is not set +# CONFIG_VIDEO_IMX219 is not set +# CONFIG_VIDEO_IMX258 is not set +# CONFIG_VIDEO_IMX274 is not set +# CONFIG_VIDEO_IMX290 is not set +# CONFIG_VIDEO_IMX296 is not set +# CONFIG_VIDEO_IMX319 is not set +# CONFIG_VIDEO_IMX334 is not set +# CONFIG_VIDEO_IMX335 is not set +# CONFIG_VIDEO_IMX355 is not set +# CONFIG_VIDEO_IMX412 is not set +# CONFIG_VIDEO_IMX415 is not set +# CONFIG_VIDEO_MT9M001 is not set +# CONFIG_VIDEO_MT9M111 is not set +# CONFIG_VIDEO_MT9P031 is not set +# CONFIG_VIDEO_MT9T112 is not set +# CONFIG_VIDEO_MT9V011 is not set +# CONFIG_VIDEO_MT9V032 is not set +# CONFIG_VIDEO_MT9V111 is not set +# CONFIG_VIDEO_OG01A1B is not set +# CONFIG_VIDEO_OV01A10 is not set +# CONFIG_VIDEO_OV02A10 is not set +# CONFIG_VIDEO_OV08D10 is not set +# CONFIG_VIDEO_OV08X40 is not set +# CONFIG_VIDEO_OV13858 is not set +# CONFIG_VIDEO_OV13B10 is not set +# CONFIG_VIDEO_OV2640 is not set +# CONFIG_VIDEO_OV2659 is not set +# CONFIG_VIDEO_OV2680 is not set +# CONFIG_VIDEO_OV2685 is not set +# CONFIG_VIDEO_OV2740 is not set +# CONFIG_VIDEO_OV4689 is not set +# CONFIG_VIDEO_OV5640 is not set +# CONFIG_VIDEO_OV5645 is not set +# CONFIG_VIDEO_OV5647 is not set +# CONFIG_VIDEO_OV5648 is not set +# CONFIG_VIDEO_OV5670 is not set +# CONFIG_VIDEO_OV5675 is not set +# CONFIG_VIDEO_OV5693 is not set +# CONFIG_VIDEO_OV5695 is not set +# CONFIG_VIDEO_OV6650 is not set +# CONFIG_VIDEO_OV7251 is not set +# CONFIG_VIDEO_OV7640 is not set +# CONFIG_VIDEO_OV7670 is not set +# CONFIG_VIDEO_OV772X is not set +# CONFIG_VIDEO_OV7740 is not set +# CONFIG_VIDEO_OV8856 is not set +# CONFIG_VIDEO_OV8858 is not set +# CONFIG_VIDEO_OV8865 is not set +# CONFIG_VIDEO_OV9282 is not set +# CONFIG_VIDEO_OV9640 is not set +# CONFIG_VIDEO_OV9650 is not set +# CONFIG_VIDEO_OV9734 is not set +# CONFIG_VIDEO_RDACM20 is not set +# CONFIG_VIDEO_RDACM21 is not set +# CONFIG_VIDEO_RJ54N1 is not set +# CONFIG_VIDEO_S5C73M3 is not set +# CONFIG_VIDEO_S5K5BAF is not set +# CONFIG_VIDEO_S5K6A3 is not set +# CONFIG_VIDEO_ST_VGXY61 is not set +# CONFIG_VIDEO_CCS is not set +# CONFIG_VIDEO_ET8EK8 is not set + +# +# Lens drivers +# +# CONFIG_VIDEO_AD5820 is not set +# CONFIG_VIDEO_AK7375 is not set +# CONFIG_VIDEO_DW9714 is not set +# CONFIG_VIDEO_DW9719 is not set +# CONFIG_VIDEO_DW9768 is not set +# CONFIG_VIDEO_DW9807_VCM is not set +# end of Lens drivers + +# +# Flash devices +# +# CONFIG_VIDEO_ADP1653 is not set +# CONFIG_VIDEO_LM3560 is not set +# CONFIG_VIDEO_LM3646 is not set +# end of Flash devices + +# +# Audio decoders, processors and mixers +# +CONFIG_VIDEO_CS3308=m +CONFIG_VIDEO_CS5345=m +CONFIG_VIDEO_CS53L32A=m +CONFIG_VIDEO_MSP3400=m +# CONFIG_VIDEO_SONY_BTF_MPX is not set +# CONFIG_VIDEO_TDA1997X is not set +# CONFIG_VIDEO_TDA7432 is not set +# CONFIG_VIDEO_TDA9840 is not set +# CONFIG_VIDEO_TEA6415C is not set +# CONFIG_VIDEO_TEA6420 is not set +# CONFIG_VIDEO_TLV320AIC23B is not set +# CONFIG_VIDEO_TVAUDIO is not set +# CONFIG_VIDEO_UDA1342 is not set +CONFIG_VIDEO_VP27SMPX=m +CONFIG_VIDEO_WM8739=m +CONFIG_VIDEO_WM8775=m +# end of Audio decoders, processors and mixers + +# +# RDS decoders +# +# CONFIG_VIDEO_SAA6588 is not set +# end of RDS decoders + +# +# Video decoders +# +# CONFIG_VIDEO_ADV7180 is not set +# CONFIG_VIDEO_ADV7183 is not set +# CONFIG_VIDEO_ADV748X is not set +# CONFIG_VIDEO_ADV7604 is not set +# CONFIG_VIDEO_ADV7842 is not set +# CONFIG_VIDEO_BT819 is not set +# CONFIG_VIDEO_BT856 is not set +# CONFIG_VIDEO_BT866 is not set +# CONFIG_VIDEO_ISL7998X is not set +# CONFIG_VIDEO_KS0127 is not set +# CONFIG_VIDEO_ML86V7667 is not set +# CONFIG_VIDEO_SAA7110 is not set +CONFIG_VIDEO_SAA711X=m +# CONFIG_VIDEO_TC358743 is not set +# CONFIG_VIDEO_TC358746 is not set +# CONFIG_VIDEO_TVP514X is not set +# CONFIG_VIDEO_TVP5150 is not set +# CONFIG_VIDEO_TVP7002 is not set +# CONFIG_VIDEO_TW2804 is not set +# CONFIG_VIDEO_TW9903 is not set +# CONFIG_VIDEO_TW9906 is not set +# CONFIG_VIDEO_TW9910 is not set +# CONFIG_VIDEO_VPX3220 is not set + +# +# Video and audio decoders +# +CONFIG_VIDEO_SAA717X=m +CONFIG_VIDEO_CX25840=m +# end of Video decoders + +# +# Video encoders +# +# CONFIG_VIDEO_ADV7170 is not set +# CONFIG_VIDEO_ADV7175 is not set +# CONFIG_VIDEO_ADV7343 is not set +# CONFIG_VIDEO_ADV7393 is not set +# CONFIG_VIDEO_ADV7511 is not set +# CONFIG_VIDEO_AK881X is not set +CONFIG_VIDEO_SAA7127=m +# CONFIG_VIDEO_SAA7185 is not set +# CONFIG_VIDEO_THS8200 is not set +# end of Video encoders + +# +# Video improvement chips +# +CONFIG_VIDEO_UPD64031A=m +CONFIG_VIDEO_UPD64083=m +# end of Video improvement chips + +# +# Audio/Video compression chips +# +# CONFIG_VIDEO_SAA6752HS is not set +# end of Audio/Video compression chips + +# +# SDR tuner chips +# +# CONFIG_SDR_MAX2175 is not set +# end of SDR tuner chips + +# +# Miscellaneous helper chips +# +# CONFIG_VIDEO_I2C is not set +CONFIG_VIDEO_M52790=m +# CONFIG_VIDEO_ST_MIPID02 is not set +# CONFIG_VIDEO_THS7303 is not set +# end of Miscellaneous helper chips + +# +# Video serializers and deserializers +# +# CONFIG_VIDEO_DS90UB913 is not set +# CONFIG_VIDEO_DS90UB953 is not set +# CONFIG_VIDEO_DS90UB960 is not set +# end of Video serializers and deserializers + +# +# Media SPI Adapters +# +CONFIG_CXD2880_SPI_DRV=m +# CONFIG_VIDEO_GS1662 is not set +# end of Media SPI Adapters + +CONFIG_MEDIA_TUNER=m + +# +# Customize TV tuners +# +CONFIG_MEDIA_TUNER_E4000=m +CONFIG_MEDIA_TUNER_FC0011=m +CONFIG_MEDIA_TUNER_FC0012=m +CONFIG_MEDIA_TUNER_FC0013=m +CONFIG_MEDIA_TUNER_FC2580=m +CONFIG_MEDIA_TUNER_IT913X=m +CONFIG_MEDIA_TUNER_M88RS6000T=m +CONFIG_MEDIA_TUNER_MAX2165=m +CONFIG_MEDIA_TUNER_MC44S803=m +CONFIG_MEDIA_TUNER_MSI001=m +CONFIG_MEDIA_TUNER_MT2060=m +CONFIG_MEDIA_TUNER_MT2063=m +CONFIG_MEDIA_TUNER_MT20XX=m +CONFIG_MEDIA_TUNER_MT2131=m +CONFIG_MEDIA_TUNER_MT2266=m +CONFIG_MEDIA_TUNER_MXL301RF=m +CONFIG_MEDIA_TUNER_MXL5005S=m +CONFIG_MEDIA_TUNER_MXL5007T=m +CONFIG_MEDIA_TUNER_QM1D1B0004=m +CONFIG_MEDIA_TUNER_QM1D1C0042=m +CONFIG_MEDIA_TUNER_QT1010=m +CONFIG_MEDIA_TUNER_R820T=m +CONFIG_MEDIA_TUNER_SI2157=m +CONFIG_MEDIA_TUNER_SIMPLE=m +CONFIG_MEDIA_TUNER_TDA18212=m +CONFIG_MEDIA_TUNER_TDA18218=m +CONFIG_MEDIA_TUNER_TDA18250=m +CONFIG_MEDIA_TUNER_TDA18271=m +CONFIG_MEDIA_TUNER_TDA827X=m +CONFIG_MEDIA_TUNER_TDA8290=m +CONFIG_MEDIA_TUNER_TDA9887=m +CONFIG_MEDIA_TUNER_TEA5761=m +CONFIG_MEDIA_TUNER_TEA5767=m +CONFIG_MEDIA_TUNER_TUA9001=m +CONFIG_MEDIA_TUNER_XC2028=m +CONFIG_MEDIA_TUNER_XC4000=m +CONFIG_MEDIA_TUNER_XC5000=m +# end of Customize TV tuners + +# +# Customise DVB Frontends +# + +# +# Multistandard (satellite) frontends +# +CONFIG_DVB_MXL5XX=m +CONFIG_DVB_STB0899=m +CONFIG_DVB_STB6100=m +CONFIG_DVB_STV090x=m +CONFIG_DVB_STV0910=m +CONFIG_DVB_STV6110x=m +CONFIG_DVB_STV6111=m + +# +# Multistandard (cable + terrestrial) frontends +# +CONFIG_DVB_DRXK=m +CONFIG_DVB_MN88472=m +CONFIG_DVB_MN88473=m +CONFIG_DVB_SI2165=m +CONFIG_DVB_TDA18271C2DD=m + +# +# DVB-S (satellite) frontends +# +CONFIG_DVB_CX24110=m +CONFIG_DVB_CX24116=m +CONFIG_DVB_CX24117=m +CONFIG_DVB_CX24120=m +CONFIG_DVB_CX24123=m +CONFIG_DVB_DS3000=m +CONFIG_DVB_MB86A16=m +CONFIG_DVB_MT312=m +CONFIG_DVB_S5H1420=m +CONFIG_DVB_SI21XX=m +CONFIG_DVB_STB6000=m +CONFIG_DVB_STV0288=m +CONFIG_DVB_STV0299=m +CONFIG_DVB_STV0900=m +CONFIG_DVB_STV6110=m +CONFIG_DVB_TDA10071=m +CONFIG_DVB_TDA10086=m +CONFIG_DVB_TDA8083=m +CONFIG_DVB_TDA8261=m +CONFIG_DVB_TDA826X=m +CONFIG_DVB_TS2020=m +CONFIG_DVB_TUA6100=m +CONFIG_DVB_TUNER_CX24113=m +CONFIG_DVB_TUNER_ITD1000=m +CONFIG_DVB_VES1X93=m +CONFIG_DVB_ZL10036=m +CONFIG_DVB_ZL10039=m + +# +# DVB-T (terrestrial) frontends +# +CONFIG_DVB_CX22700=m +CONFIG_DVB_CX22702=m +CONFIG_DVB_CXD2820R=m +CONFIG_DVB_CXD2841ER=m +CONFIG_DVB_DIB3000MB=m +CONFIG_DVB_DIB3000MC=m +CONFIG_DVB_DIB7000M=m +CONFIG_DVB_DIB7000P=m +CONFIG_DVB_DIB9000=m +CONFIG_DVB_DRXD=m +CONFIG_DVB_EC100=m +CONFIG_DVB_GP8PSK_FE=m +CONFIG_DVB_L64781=m +CONFIG_DVB_MT352=m +CONFIG_DVB_NXT6000=m +CONFIG_DVB_S5H1432=m +CONFIG_DVB_SP887X=m +CONFIG_DVB_STV0367=m +CONFIG_DVB_TDA10048=m +CONFIG_DVB_TDA1004X=m +CONFIG_DVB_ZD1301_DEMOD=m +CONFIG_DVB_ZL10353=m +CONFIG_DVB_CXD2880=m + +# +# DVB-C (cable) frontends +# +CONFIG_DVB_STV0297=m +CONFIG_DVB_TDA10021=m +CONFIG_DVB_TDA10023=m +CONFIG_DVB_VES1820=m + +# +# ATSC (North American/Korean Terrestrial/Cable DTV) frontends +# +CONFIG_DVB_AU8522=m +CONFIG_DVB_AU8522_DTV=m +CONFIG_DVB_AU8522_V4L=m +CONFIG_DVB_BCM3510=m +CONFIG_DVB_LG2160=m +CONFIG_DVB_LGDT3305=m +CONFIG_DVB_LGDT330X=m +CONFIG_DVB_MXL692=m +CONFIG_DVB_NXT200X=m +CONFIG_DVB_OR51132=m +CONFIG_DVB_OR51211=m +CONFIG_DVB_S5H1409=m +CONFIG_DVB_S5H1411=m + +# +# ISDB-T (terrestrial) frontends +# +CONFIG_DVB_DIB8000=m +CONFIG_DVB_MB86A20S=m +CONFIG_DVB_S921=m + +# +# ISDB-S (satellite) & ISDB-T (terrestrial) frontends +# +CONFIG_DVB_MN88443X=m +CONFIG_DVB_TC90522=m + +# +# Digital terrestrial only tuners/PLL +# +CONFIG_DVB_PLL=m +CONFIG_DVB_TUNER_DIB0070=m +CONFIG_DVB_TUNER_DIB0090=m + +# +# SEC control devices for DVB-S +# +CONFIG_DVB_A8293=m +CONFIG_DVB_AF9033=m +CONFIG_DVB_ASCOT2E=m +CONFIG_DVB_ATBM8830=m +CONFIG_DVB_HELENE=m +CONFIG_DVB_HORUS3A=m +CONFIG_DVB_ISL6405=m +CONFIG_DVB_ISL6421=m +CONFIG_DVB_ISL6423=m +CONFIG_DVB_IX2505V=m +CONFIG_DVB_LGS8GL5=m +CONFIG_DVB_LGS8GXX=m +CONFIG_DVB_LNBH25=m +CONFIG_DVB_LNBH29=m +CONFIG_DVB_LNBP21=m +CONFIG_DVB_LNBP22=m +CONFIG_DVB_M88RS2000=m +CONFIG_DVB_TDA665x=m +CONFIG_DVB_DRX39XYJ=m + +# +# Common Interface (EN50221) controller drivers +# +CONFIG_DVB_CXD2099=m +CONFIG_DVB_SP2=m +# end of Customise DVB Frontends + +# +# Tools to develop new frontends +# +# CONFIG_DVB_DUMMY_FE is not set +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +CONFIG_DRM=y +# CONFIG_DRM_DEBUG_MM is not set +CONFIG_DRM_KMS_HELPER=y +# CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS is not set +# CONFIG_DRM_DEBUG_MODESET_LOCK is not set +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +# CONFIG_DRM_FBDEV_LEAK_PHYS_SMEM is not set +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +CONFIG_DRM_TTM=y +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=y +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# CONFIG_DRM_KOMEDA is not set +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_AMDGPU_SI=y +CONFIG_DRM_AMDGPU_CIK=y +CONFIG_DRM_AMDGPU_USERPTR=y +# CONFIG_DRM_AMDGPU_WERROR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DRM_AMD_DC_SI is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_ABT_Y030XX067A is not set +# CONFIG_DRM_PANEL_ARM_VERSATILE is not set +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_LVDS is not set +# CONFIG_DRM_PANEL_SIMPLE is not set +# CONFIG_DRM_PANEL_EDP is not set +# CONFIG_DRM_PANEL_ILITEK_IL9322 is not set +# CONFIG_DRM_PANEL_ILITEK_ILI9341 is not set +# CONFIG_DRM_PANEL_INNOLUX_EJ030NA is not set +# CONFIG_DRM_PANEL_SAMSUNG_LD9040 is not set +# CONFIG_DRM_PANEL_LG_LB035Q02 is not set +# CONFIG_DRM_PANEL_LG_LG4573 is not set +# CONFIG_DRM_PANEL_NEC_NL8048HL11 is not set +# CONFIG_DRM_PANEL_NEWVISION_NV3052C is not set +# CONFIG_DRM_PANEL_NOVATEK_NT39016 is not set +# CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_SAMSUNG_ATNA33XC20 is not set +# CONFIG_DRM_PANEL_SAMSUNG_DB7430 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D27A1 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6D7AA0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E63M0 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01 is not set +# CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0 is not set +# CONFIG_DRM_PANEL_SEIKO_43WVF1G is not set +# CONFIG_DRM_PANEL_SITRONIX_ST7789V is not set +# CONFIG_DRM_PANEL_SONY_ACX565AKM is not set +# CONFIG_DRM_PANEL_TPO_TD028TTEC1 is not set +# CONFIG_DRM_PANEL_TPO_TPG110 is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_CHIPONE_ICN6211 is not set +# CONFIG_DRM_CHRONTEL_CH7033 is not set +# CONFIG_DRM_DISPLAY_CONNECTOR is not set +# CONFIG_DRM_ITE_IT6505 is not set +# CONFIG_DRM_LONTIUM_LT8912B is not set +# CONFIG_DRM_LONTIUM_LT9211 is not set +# CONFIG_DRM_LONTIUM_LT9611 is not set +# CONFIG_DRM_LONTIUM_LT9611UXC is not set +# CONFIG_DRM_ITE_IT66121 is not set +# CONFIG_DRM_LVDS_CODEC is not set +# CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW is not set +# CONFIG_DRM_NWL_MIPI_DSI is not set +# CONFIG_DRM_NXP_PTN3460 is not set +# CONFIG_DRM_PARADE_PS8622 is not set +# CONFIG_DRM_PARADE_PS8640 is not set +# CONFIG_DRM_SAMSUNG_DSIM is not set +# CONFIG_DRM_SIL_SII8620 is not set +# CONFIG_DRM_SII902X is not set +# CONFIG_DRM_SII9234 is not set +# CONFIG_DRM_SIMPLE_BRIDGE is not set +# CONFIG_DRM_THINE_THC63LVD1024 is not set +# CONFIG_DRM_TOSHIBA_TC358762 is not set +# CONFIG_DRM_TOSHIBA_TC358764 is not set +# CONFIG_DRM_TOSHIBA_TC358767 is not set +# CONFIG_DRM_TOSHIBA_TC358768 is not set +# CONFIG_DRM_TOSHIBA_TC358775 is not set +# CONFIG_DRM_TI_DLPC3433 is not set +# CONFIG_DRM_TI_TFP410 is not set +# CONFIG_DRM_TI_SN65DSI83 is not set +# CONFIG_DRM_TI_SN65DSI86 is not set +# CONFIG_DRM_TI_TPD12S015 is not set +# CONFIG_DRM_ANALOGIX_ANX6345 is not set +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_ANALOGIX_ANX7625 is not set +# CONFIG_DRM_I2C_ADV7511 is not set +# CONFIG_DRM_CDNS_DSI is not set +# CONFIG_DRM_CDNS_MHDP8546 is not set +# end of Display Interface Bridges + +CONFIG_DRM_LOONGSON=y +# CONFIG_DRM_ETNAVIV is not set +# CONFIG_DRM_LOGICVC is not set +# CONFIG_DRM_ARCPGU is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +# CONFIG_HYDCU_FIXUP_HEADER is not set + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_EFI=y +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_MATROX is not set +CONFIG_FB_RADEON=y +CONFIG_FB_RADEON_I2C=y +CONFIG_FB_RADEON_BACKLIGHT=y +# CONFIG_FB_RADEON_DEBUG is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +CONFIG_FB_LS2K500=m +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +# CONFIG_FIRMWARE_EDID is not set +CONFIG_FB_DEVICE=y +CONFIG_FB_DDC=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +CONFIG_FB_BACKLIGHT=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_BACKLIGHT_LED is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +# CONFIG_VGA_CONSOLE is not set +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=y +CONFIG_SOUND_OSS_CORE=y +CONFIG_SOUND_OSS_CORE_PRECLAIM=y +CONFIG_SND=y +CONFIG_SND_TIMER=m +CONFIG_SND_PCM=m +CONFIG_SND_HWDEP=m +CONFIG_SND_SEQ_DEVICE=m +CONFIG_SND_RAWMIDI=m +CONFIG_SND_JACK=y +CONFIG_SND_JACK_INPUT_DEV=y +CONFIG_SND_OSSEMUL=y +# CONFIG_SND_MIXER_OSS is not set +# CONFIG_SND_PCM_OSS is not set +CONFIG_SND_PCM_TIMER=y +CONFIG_SND_HRTIMER=m +CONFIG_SND_DYNAMIC_MINORS=y +CONFIG_SND_MAX_CARDS=32 +# CONFIG_SND_SUPPORT_OLD_API is not set +CONFIG_SND_PROC_FS=y +CONFIG_SND_VERBOSE_PROCFS=y +# CONFIG_SND_VERBOSE_PRINTK is not set +CONFIG_SND_CTL_FAST_LOOKUP=y +# CONFIG_SND_DEBUG is not set +# CONFIG_SND_CTL_INPUT_VALIDATION is not set +CONFIG_SND_VMASTER=y +CONFIG_SND_CTL_LED=m +CONFIG_SND_SEQUENCER=m +CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_SEQ_HRTIMER_DEFAULT=y +CONFIG_SND_SEQ_MIDI_EVENT=m +CONFIG_SND_SEQ_MIDI=m +CONFIG_SND_SEQ_MIDI_EMUL=m +CONFIG_SND_SEQ_VIRMIDI=m +# CONFIG_SND_SEQ_UMP is not set +CONFIG_SND_MPU401_UART=m +CONFIG_SND_OPL3_LIB=m +CONFIG_SND_OPL3_LIB_SEQ=m +CONFIG_SND_VX_LIB=m +CONFIG_SND_AC97_CODEC=m +CONFIG_SND_DRIVERS=y +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +# CONFIG_SND_PCMTEST is not set +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +# CONFIG_SND_MTS64 is not set +# CONFIG_SND_SERIAL_U16550 is not set +CONFIG_SND_MPU401=m +# CONFIG_SND_PORTMAN2X4 is not set +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_PCI=y +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m +# CONFIG_SND_AW2 is not set +CONFIG_SND_BT87X=m +CONFIG_SND_BT87X_OVERCLOCK=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN_LIB=m +CONFIG_SND_OXYGEN=m +# CONFIG_SND_CS4281 is not set +CONFIG_SND_CS46XX=m +CONFIG_SND_CS46XX_NEW_DSP=y +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +# CONFIG_SND_FM801 is not set +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +# CONFIG_SND_NM256 is not set +CONFIG_SND_PCXHR=m +# CONFIG_SND_RIPTIDE is not set +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +# CONFIG_SND_YMFPCI is not set + +# +# HD-Audio +# +CONFIG_SND_HDA=m +CONFIG_SND_HDA_GENERIC_LEDS=y +CONFIG_SND_HDA_INTEL=m +CONFIG_SND_HDA_HWDEP=y +CONFIG_SND_HDA_RECONFIG=y +CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 +CONFIG_SND_HDA_PATCH_LOADER=y +# CONFIG_SND_HDA_SCODEC_CS35L41_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L41_SPI is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_I2C is not set +# CONFIG_SND_HDA_SCODEC_CS35L56_SPI is not set +# CONFIG_SND_HDA_SCODEC_TAS2781_I2C is not set +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +# CONFIG_SND_HDA_CODEC_CS8409 is not set +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CA0132_DSP=y +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_GENERIC=m +CONFIG_SND_HDA_POWER_SAVE_DEFAULT=0 +# CONFIG_SND_HDA_INTEL_HDMI_SILENT_STREAM is not set +# CONFIG_SND_HDA_CTL_DEV_ID is not set +# end of HD-Audio + +CONFIG_SND_HDA_CORE=m +CONFIG_SND_HDA_DSP_LOADER=y +CONFIG_SND_HDA_COMPONENT=y +CONFIG_SND_HDA_PREALLOC_SIZE=512 +CONFIG_SND_INTEL_NHLT=y +CONFIG_SND_INTEL_DSP_CONFIG=m +CONFIG_SND_INTEL_SOUNDWIRE_ACPI=m +# CONFIG_SND_SPI is not set +CONFIG_SND_USB=y +CONFIG_SND_USB_AUDIO=m +# CONFIG_SND_USB_AUDIO_MIDI_V2 is not set +CONFIG_SND_USB_AUDIO_USE_MEDIA_CONTROLLER=y +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_LINE6=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_FIREWIRE=y +CONFIG_SND_FIREWIRE_LIB=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +# CONFIG_SND_SOC_ADI is not set +# CONFIG_SND_SOC_AMD_ACP is not set +# CONFIG_SND_AMD_ACP_CONFIG is not set +# CONFIG_SND_ATMEL_SOC is not set +# CONFIG_SND_BCM63XX_I2S_WHISTLER is not set +# CONFIG_SND_DESIGNWARE_I2S is not set + +# +# SoC Audio for Freescale CPUs +# + +# +# Common SoC Audio options for Freescale CPUs: +# +# CONFIG_SND_SOC_FSL_ASRC is not set +# CONFIG_SND_SOC_FSL_SAI is not set +# CONFIG_SND_SOC_FSL_AUDMIX is not set +# CONFIG_SND_SOC_FSL_SSI is not set +# CONFIG_SND_SOC_FSL_SPDIF is not set +# CONFIG_SND_SOC_FSL_ESAI is not set +# CONFIG_SND_SOC_FSL_MICFIL is not set +# CONFIG_SND_SOC_FSL_XCVR is not set +# CONFIG_SND_SOC_IMX_AUDMUX is not set +# end of SoC Audio for Freescale CPUs + +# CONFIG_SND_SOC_CHV3_I2S is not set +# CONFIG_SND_I2S_HI6210_I2S is not set + +# +# SoC Audio for Loongson CPUs +# +# CONFIG_SND_SOC_LOONGSON_I2S_PCI is not set +# CONFIG_SND_SOC_LOONGSON_CARD is not set +# end of SoC Audio for Loongson CPUs + +# CONFIG_SND_SOC_IMG is not set +# CONFIG_SND_SOC_MTK_BTCVSD is not set +# CONFIG_SND_SOC_SOF_TOPLEVEL is not set + +# +# STMicroelectronics STM32 SOC audio support +# +# end of STMicroelectronics STM32 SOC audio support + +# CONFIG_SND_SOC_XILINX_I2S is not set +# CONFIG_SND_SOC_XILINX_AUDIO_FORMATTER is not set +# CONFIG_SND_SOC_XILINX_SPDIF is not set +# CONFIG_SND_SOC_XTFPGA_I2S is not set +CONFIG_SND_SOC_I2C_AND_SPI=m + +# +# CODEC drivers +# +# CONFIG_SND_SOC_AC97_CODEC is not set +# CONFIG_SND_SOC_ADAU1372_I2C is not set +# CONFIG_SND_SOC_ADAU1372_SPI is not set +# CONFIG_SND_SOC_ADAU1701 is not set +# CONFIG_SND_SOC_ADAU1761_I2C is not set +# CONFIG_SND_SOC_ADAU1761_SPI is not set +# CONFIG_SND_SOC_ADAU7002 is not set +# CONFIG_SND_SOC_ADAU7118_HW is not set +# CONFIG_SND_SOC_ADAU7118_I2C is not set +# CONFIG_SND_SOC_AK4104 is not set +# CONFIG_SND_SOC_AK4118 is not set +# CONFIG_SND_SOC_AK4375 is not set +# CONFIG_SND_SOC_AK4458 is not set +# CONFIG_SND_SOC_AK4554 is not set +# CONFIG_SND_SOC_AK4613 is not set +# CONFIG_SND_SOC_AK4642 is not set +# CONFIG_SND_SOC_AK5386 is not set +# CONFIG_SND_SOC_AK5558 is not set +# CONFIG_SND_SOC_ALC5623 is not set +# CONFIG_SND_SOC_AUDIO_IIO_AUX is not set +# CONFIG_SND_SOC_AW8738 is not set +# CONFIG_SND_SOC_AW88395 is not set +# CONFIG_SND_SOC_AW88261 is not set +# CONFIG_SND_SOC_BD28623 is not set +# CONFIG_SND_SOC_BT_SCO is not set +# CONFIG_SND_SOC_CHV3_CODEC is not set +# CONFIG_SND_SOC_CS35L32 is not set +# CONFIG_SND_SOC_CS35L33 is not set +# CONFIG_SND_SOC_CS35L34 is not set +# CONFIG_SND_SOC_CS35L35 is not set +# CONFIG_SND_SOC_CS35L36 is not set +# CONFIG_SND_SOC_CS35L41_SPI is not set +# CONFIG_SND_SOC_CS35L41_I2C is not set +# CONFIG_SND_SOC_CS35L45_SPI is not set +# CONFIG_SND_SOC_CS35L45_I2C is not set +# CONFIG_SND_SOC_CS35L56_I2C is not set +# CONFIG_SND_SOC_CS35L56_SPI is not set +# CONFIG_SND_SOC_CS42L42 is not set +# CONFIG_SND_SOC_CS42L51_I2C is not set +# CONFIG_SND_SOC_CS42L52 is not set +# CONFIG_SND_SOC_CS42L56 is not set +# CONFIG_SND_SOC_CS42L73 is not set +# CONFIG_SND_SOC_CS42L83 is not set +# CONFIG_SND_SOC_CS4234 is not set +# CONFIG_SND_SOC_CS4265 is not set +# CONFIG_SND_SOC_CS4270 is not set +# CONFIG_SND_SOC_CS4271_I2C is not set +# CONFIG_SND_SOC_CS4271_SPI is not set +# CONFIG_SND_SOC_CS42XX8_I2C is not set +# CONFIG_SND_SOC_CS43130 is not set +# CONFIG_SND_SOC_CS4341 is not set +# CONFIG_SND_SOC_CS4349 is not set +# CONFIG_SND_SOC_CS53L30 is not set +# CONFIG_SND_SOC_CX2072X is not set +# CONFIG_SND_SOC_DA7213 is not set +# CONFIG_SND_SOC_DMIC is not set +# CONFIG_SND_SOC_ES7134 is not set +# CONFIG_SND_SOC_ES7241 is not set +# CONFIG_SND_SOC_ES8316 is not set +# CONFIG_SND_SOC_ES8326 is not set +# CONFIG_SND_SOC_ES8328_I2C is not set +# CONFIG_SND_SOC_ES8328_SPI is not set +# CONFIG_SND_SOC_GTM601 is not set +# CONFIG_SND_SOC_HDA is not set +# CONFIG_SND_SOC_ICS43432 is not set +# CONFIG_SND_SOC_IDT821034 is not set +# CONFIG_SND_SOC_INNO_RK3036 is not set +# CONFIG_SND_SOC_MAX98088 is not set +# CONFIG_SND_SOC_MAX98090 is not set +# CONFIG_SND_SOC_MAX98357A is not set +# CONFIG_SND_SOC_MAX98504 is not set +# CONFIG_SND_SOC_MAX9867 is not set +# CONFIG_SND_SOC_MAX98927 is not set +# CONFIG_SND_SOC_MAX98520 is not set +# CONFIG_SND_SOC_MAX98373_I2C is not set +# CONFIG_SND_SOC_MAX98388 is not set +# CONFIG_SND_SOC_MAX98390 is not set +# CONFIG_SND_SOC_MAX98396 is not set +# CONFIG_SND_SOC_MAX9860 is not set +# CONFIG_SND_SOC_MSM8916_WCD_DIGITAL is not set +# CONFIG_SND_SOC_PCM1681 is not set +# CONFIG_SND_SOC_PCM1789_I2C is not set +# CONFIG_SND_SOC_PCM179X_I2C is not set +# CONFIG_SND_SOC_PCM179X_SPI is not set +# CONFIG_SND_SOC_PCM186X_I2C is not set +# CONFIG_SND_SOC_PCM186X_SPI is not set +# CONFIG_SND_SOC_PCM3060_I2C is not set +# CONFIG_SND_SOC_PCM3060_SPI is not set +# CONFIG_SND_SOC_PCM3168A_I2C is not set +# CONFIG_SND_SOC_PCM3168A_SPI is not set +# CONFIG_SND_SOC_PCM5102A is not set +# CONFIG_SND_SOC_PCM512x_I2C is not set +# CONFIG_SND_SOC_PCM512x_SPI is not set +# CONFIG_SND_SOC_PEB2466 is not set +# CONFIG_SND_SOC_RK3328 is not set +# CONFIG_SND_SOC_RT5616 is not set +# CONFIG_SND_SOC_RT5631 is not set +# CONFIG_SND_SOC_RT5640 is not set +# CONFIG_SND_SOC_RT5659 is not set +# CONFIG_SND_SOC_RT9120 is not set +# CONFIG_SND_SOC_SGTL5000 is not set +# CONFIG_SND_SOC_SIMPLE_AMPLIFIER is not set +# CONFIG_SND_SOC_SIMPLE_MUX is not set +# CONFIG_SND_SOC_SMA1303 is not set +# CONFIG_SND_SOC_SPDIF is not set +# CONFIG_SND_SOC_SRC4XXX_I2C is not set +# CONFIG_SND_SOC_SSM2305 is not set +# CONFIG_SND_SOC_SSM2518 is not set +# CONFIG_SND_SOC_SSM2602_SPI is not set +# CONFIG_SND_SOC_SSM2602_I2C is not set +# CONFIG_SND_SOC_SSM3515 is not set +# CONFIG_SND_SOC_SSM4567 is not set +# CONFIG_SND_SOC_STA32X is not set +# CONFIG_SND_SOC_STA350 is not set +# CONFIG_SND_SOC_STI_SAS is not set +# CONFIG_SND_SOC_TAS2552 is not set +# CONFIG_SND_SOC_TAS2562 is not set +# CONFIG_SND_SOC_TAS2764 is not set +# CONFIG_SND_SOC_TAS2770 is not set +# CONFIG_SND_SOC_TAS2780 is not set +# CONFIG_SND_SOC_TAS2781_I2C is not set +# CONFIG_SND_SOC_TAS5086 is not set +# CONFIG_SND_SOC_TAS571X is not set +# CONFIG_SND_SOC_TAS5720 is not set +# CONFIG_SND_SOC_TAS5805M is not set +# CONFIG_SND_SOC_TAS6424 is not set +# CONFIG_SND_SOC_TDA7419 is not set +# CONFIG_SND_SOC_TFA9879 is not set +# CONFIG_SND_SOC_TFA989X is not set +# CONFIG_SND_SOC_TLV320ADC3XXX is not set +# CONFIG_SND_SOC_TLV320AIC23_I2C is not set +# CONFIG_SND_SOC_TLV320AIC23_SPI is not set +# CONFIG_SND_SOC_TLV320AIC31XX is not set +# CONFIG_SND_SOC_TLV320AIC32X4_I2C is not set +# CONFIG_SND_SOC_TLV320AIC32X4_SPI is not set +# CONFIG_SND_SOC_TLV320AIC3X_I2C is not set +# CONFIG_SND_SOC_TLV320AIC3X_SPI is not set +# CONFIG_SND_SOC_TLV320ADCX140 is not set +# CONFIG_SND_SOC_TS3A227E is not set +# CONFIG_SND_SOC_TSCS42XX is not set +# CONFIG_SND_SOC_TSCS454 is not set +# CONFIG_SND_SOC_UDA1334 is not set +# CONFIG_SND_SOC_WM8510 is not set +# CONFIG_SND_SOC_WM8523 is not set +# CONFIG_SND_SOC_WM8524 is not set +# CONFIG_SND_SOC_WM8580 is not set +# CONFIG_SND_SOC_WM8711 is not set +# CONFIG_SND_SOC_WM8728 is not set +# CONFIG_SND_SOC_WM8731_I2C is not set +# CONFIG_SND_SOC_WM8731_SPI is not set +# CONFIG_SND_SOC_WM8737 is not set +# CONFIG_SND_SOC_WM8741 is not set +# CONFIG_SND_SOC_WM8750 is not set +# CONFIG_SND_SOC_WM8753 is not set +# CONFIG_SND_SOC_WM8770 is not set +# CONFIG_SND_SOC_WM8776 is not set +# CONFIG_SND_SOC_WM8782 is not set +# CONFIG_SND_SOC_WM8804_I2C is not set +# CONFIG_SND_SOC_WM8804_SPI is not set +# CONFIG_SND_SOC_WM8903 is not set +# CONFIG_SND_SOC_WM8904 is not set +# CONFIG_SND_SOC_WM8940 is not set +# CONFIG_SND_SOC_WM8960 is not set +# CONFIG_SND_SOC_WM8961 is not set +# CONFIG_SND_SOC_WM8962 is not set +# CONFIG_SND_SOC_WM8974 is not set +# CONFIG_SND_SOC_WM8978 is not set +# CONFIG_SND_SOC_WM8985 is not set +# CONFIG_SND_SOC_ZL38060 is not set +# CONFIG_SND_SOC_MAX9759 is not set +# CONFIG_SND_SOC_MT6351 is not set +# CONFIG_SND_SOC_MT6358 is not set +# CONFIG_SND_SOC_MT6660 is not set +# CONFIG_SND_SOC_NAU8315 is not set +# CONFIG_SND_SOC_NAU8540 is not set +# CONFIG_SND_SOC_NAU8810 is not set +# CONFIG_SND_SOC_NAU8821 is not set +# CONFIG_SND_SOC_NAU8822 is not set +# CONFIG_SND_SOC_NAU8824 is not set +# CONFIG_SND_SOC_TPA6130A2 is not set +# CONFIG_SND_SOC_LPASS_WSA_MACRO is not set +# CONFIG_SND_SOC_LPASS_VA_MACRO is not set +# CONFIG_SND_SOC_LPASS_RX_MACRO is not set +# CONFIG_SND_SOC_LPASS_TX_MACRO is not set +# end of CODEC drivers + +# CONFIG_SND_SIMPLE_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD is not set +# CONFIG_SND_AUDIO_GRAPH_CARD2 is not set +# CONFIG_SND_TEST_COMPONENT is not set +# CONFIG_SND_VIRTIO is not set +CONFIG_AC97_BUS=m +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +CONFIG_LOGITECH_FF=y +CONFIG_LOGIRUMBLEPAD2_FF=y +CONFIG_LOGIG940_FF=y +CONFIG_LOGIWHEELS_FF=y +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +# CONFIG_HID_PICOLCD_FB is not set +# CONFIG_HID_PICOLCD_BACKLIGHT is not set +# CONFIG_HID_PICOLCD_LCD is not set +# CONFIG_HID_PICOLCD_LEDS is not set +# CONFIG_HID_PICOLCD_CIR is not set +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set +# CONFIG_I2C_HID_OF_ELAN is not set +# CONFIG_I2C_HID_OF_GOODIX is not set +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +CONFIG_USB_XHCI_PLATFORM=m +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +CONFIG_USB_EHCI_HCD_PLATFORM=y +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_UHCI_HCD=y +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +CONFIG_USB_DWC2=y +CONFIG_USB_DWC2_HOST=y + +# +# Gadget/Dual-role mode requires USB Gadget support to be enabled +# +# CONFIG_USB_DWC2_PERIPHERAL is not set +# CONFIG_USB_DWC2_DUAL_ROLE is not set +# CONFIG_USB_DWC2_PCI is not set +# CONFIG_USB_DWC2_DEBUG is not set +# CONFIG_USB_DWC2_TRACK_MISSED_SOFS is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +# CONFIG_USB_ONBOARD_HUB is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_SNP_UDC_PLAT is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +# CONFIG_USB_MAX3420_UDC is not set +# CONFIG_USB_CDNS2_UDC is not set +# CONFIG_USB_DUMMY_HCD is not set +# end of USB Peripheral Controller + +# CONFIG_USB_CONFIGFS is not set + +# +# USB Gadget precomposed configurations +# +# CONFIG_USB_ZERO is not set +# CONFIG_USB_AUDIO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +# CONFIG_USB_GADGET_TARGET is not set +# CONFIG_USB_G_SERIAL is not set +# CONFIG_USB_MIDI_GADGET is not set +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_NOKIA is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set +# CONFIG_USB_G_WEBCAM is not set +# CONFIG_USB_RAW_GADGET is not set +# end of USB Gadget precomposed configurations + +CONFIG_TYPEC=m +CONFIG_TYPEC_TCPM=m +CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +# CONFIG_TYPEC_TCPCI_MAXIM is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=m +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=m +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_MMC=m +CONFIG_PWRSEQ_EMMC=m +# CONFIG_PWRSEQ_SD8787 is not set +CONFIG_PWRSEQ_SIMPLE=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_OF_ARASAN is not set +# CONFIG_MMC_SDHCI_OF_AT91 is not set +# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set +# CONFIG_MMC_SDHCI_CADENCE is not set +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_SDHCI_MILBEAUT is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XENON=m +# CONFIG_MMC_SDHCI_OMAP is not set +# CONFIG_MMC_SDHCI_AM654 is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_AN30259A is not set +# CONFIG_LEDS_AW200XX is not set +# CONFIG_LEDS_AW2013 is not set +# CONFIG_LEDS_BCM6328 is not set +# CONFIG_LEDS_BCM6358 is not set +# CONFIG_LEDS_CR0014114 is not set +# CONFIG_LEDS_EL15203000 is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_LM3692X is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_LP55XX_COMMON is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set +# CONFIG_LEDS_IS31FL32XX is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +# CONFIG_LEDS_SYSCON is not set +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_SPI_BYTE is not set +# CONFIG_LEDS_LM3697 is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=y +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +# CONFIG_INFINIBAND_ERDMA is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +# CONFIG_RDMA_SIW is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +# CONFIG_RTC_DRV_HYM8563 is not set +CONFIG_RTC_DRV_MAX6900=m +# CONFIG_RTC_DRV_NCT3018Y is not set +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +# CONFIG_RTC_DRV_ISL12026 is not set +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +CONFIG_RTC_DRV_RV8803=m +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +CONFIG_RTC_DRV_RX4581=m +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=y + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m +# CONFIG_RTC_DRV_ZYNQMP is not set + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_CADENCE is not set +# CONFIG_RTC_DRV_FTRTC010 is not set +CONFIG_RTC_DRV_LOONGSON=y +# CONFIG_RTC_DRV_R7301 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +CONFIG_DMA_OF=y +# CONFIG_ALTERA_MSGDMA is not set +# CONFIG_DW_AXI_DMAC is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +# CONFIG_XILINX_ZYNQMP_DPDMA is not set +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=m +CONFIG_DW_DMAC=m +# CONFIG_DW_DMAC_PCI is not set +# CONFIG_DW_EDMA is not set +# CONFIG_SF_PDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_IRQ_BYPASS_MANAGER=m +# CONFIG_VIRT_DRIVERS is not set +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=m +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +# end of Microsoft Hyper-V guest support + +# CONFIG_GREYBUS is not set +CONFIG_COMEDI=m +# CONFIG_COMEDI_DEBUG is not set +CONFIG_COMEDI_DEFAULT_BUF_SIZE_KB=2048 +CONFIG_COMEDI_DEFAULT_BUF_MAXSIZE_KB=20480 +# CONFIG_COMEDI_MISC_DRIVERS is not set +# CONFIG_COMEDI_ISA_DRIVERS is not set +CONFIG_COMEDI_PCI_DRIVERS=m +CONFIG_COMEDI_8255_PCI=m +# CONFIG_COMEDI_ADDI_APCI_1032 is not set +# CONFIG_COMEDI_ADDI_APCI_1500 is not set +# CONFIG_COMEDI_ADDI_APCI_1516 is not set +# CONFIG_COMEDI_ADDI_APCI_1564 is not set +# CONFIG_COMEDI_ADDI_APCI_16XX is not set +# CONFIG_COMEDI_ADDI_APCI_2032 is not set +# CONFIG_COMEDI_ADDI_APCI_2200 is not set +# CONFIG_COMEDI_ADDI_APCI_3120 is not set +# CONFIG_COMEDI_ADDI_APCI_3501 is not set +# CONFIG_COMEDI_ADDI_APCI_3XXX is not set +CONFIG_COMEDI_ADL_PCI6208=m +CONFIG_COMEDI_ADL_PCI7X3X=m +CONFIG_COMEDI_ADL_PCI8164=m +CONFIG_COMEDI_ADL_PCI9111=m +CONFIG_COMEDI_ADL_PCI9118=m +CONFIG_COMEDI_ADV_PCI1710=m +CONFIG_COMEDI_ADV_PCI1720=m +CONFIG_COMEDI_ADV_PCI1723=m +CONFIG_COMEDI_ADV_PCI1724=m +CONFIG_COMEDI_ADV_PCI1760=m +CONFIG_COMEDI_ADV_PCI_DIO=m +# CONFIG_COMEDI_AMPLC_DIO200_PCI is not set +# CONFIG_COMEDI_AMPLC_PC236_PCI is not set +# CONFIG_COMEDI_AMPLC_PC263_PCI is not set +# CONFIG_COMEDI_AMPLC_PCI224 is not set +# CONFIG_COMEDI_AMPLC_PCI230 is not set +# CONFIG_COMEDI_CONTEC_PCI_DIO is not set +# CONFIG_COMEDI_DAS08_PCI is not set +# CONFIG_COMEDI_DT3000 is not set +# CONFIG_COMEDI_DYNA_PCI10XX is not set +# CONFIG_COMEDI_GSC_HPDI is not set +# CONFIG_COMEDI_MF6X4 is not set +# CONFIG_COMEDI_ICP_MULTI is not set +# CONFIG_COMEDI_DAQBOARD2000 is not set +# CONFIG_COMEDI_JR3_PCI is not set +# CONFIG_COMEDI_KE_COUNTER is not set +# CONFIG_COMEDI_CB_PCIDAS64 is not set +# CONFIG_COMEDI_CB_PCIDAS is not set +# CONFIG_COMEDI_CB_PCIDDA is not set +# CONFIG_COMEDI_CB_PCIMDAS is not set +# CONFIG_COMEDI_CB_PCIMDDA is not set +# CONFIG_COMEDI_ME4000 is not set +# CONFIG_COMEDI_ME_DAQ is not set +# CONFIG_COMEDI_NI_6527 is not set +# CONFIG_COMEDI_NI_65XX is not set +# CONFIG_COMEDI_NI_660X is not set +# CONFIG_COMEDI_NI_670X is not set +CONFIG_COMEDI_NI_LABPC_PCI=m +CONFIG_COMEDI_NI_PCIDIO=m +CONFIG_COMEDI_NI_PCIMIO=m +# CONFIG_COMEDI_RTD520 is not set +# CONFIG_COMEDI_S626 is not set +CONFIG_COMEDI_MITE=m +CONFIG_COMEDI_NI_TIOCMD=m +# CONFIG_COMEDI_USB_DRIVERS is not set +CONFIG_COMEDI_8254=m +CONFIG_COMEDI_8255=m +# CONFIG_COMEDI_8255_SA is not set +# CONFIG_COMEDI_KCOMEDILIB is not set +CONFIG_COMEDI_NI_LABPC=m +CONFIG_COMEDI_NI_TIO=m +CONFIG_COMEDI_NI_ROUTING=m +# CONFIG_COMEDI_TESTS is not set +CONFIG_STAGING=y +# CONFIG_PRISM2_USB is not set +# CONFIG_RTL8192U is not set +# CONFIG_RTLLIB is not set +# CONFIG_RTL8723BS is not set +# CONFIG_R8712U is not set +# CONFIG_RTS5208 is not set +# CONFIG_VT6655 is not set +# CONFIG_VT6656 is not set + +# +# IIO staging drivers +# + +# +# Accelerometers +# +# CONFIG_ADIS16203 is not set +# CONFIG_ADIS16240 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD7816 is not set +# end of Analog to digital converters + +# +# Analog digital bi-direction converters +# +# CONFIG_ADT7316 is not set +# end of Analog digital bi-direction converters + +# +# Direct Digital Synthesis +# +# CONFIG_AD9832 is not set +# CONFIG_AD9834 is not set +# end of Direct Digital Synthesis + +# +# Network Analyzer, Impedance Converters +# +# CONFIG_AD5933 is not set +# end of Network Analyzer, Impedance Converters + +# +# Resolver to digital converters +# +# CONFIG_AD2S1210 is not set +# end of Resolver to digital converters +# end of IIO staging drivers + +# CONFIG_FB_SM750 is not set +# CONFIG_STAGING_MEDIA is not set +# CONFIG_STAGING_BOARD is not set +# CONFIG_LTE_GDM724X is not set +# CONFIG_FB_TFT is not set +# CONFIG_KS7010 is not set +# CONFIG_PI433 is not set +# CONFIG_XIL_AXIS_FIFO is not set +# CONFIG_FIELDBUS_DEV is not set +# CONFIG_QLGE is not set +# CONFIG_VME_BUS is not set +CONFIG_LOONGARCH_PLATFORM_DEVICES=y +CONFIG_LOONGSON_LAPTOP=y +# CONFIG_GOLDFISH is not set +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_SI570 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_AXI_CLKGEN is not set +CONFIG_COMMON_CLK_LOONGSON2=y +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_COMMON_CLK_RS9_PCIE is not set +# CONFIG_COMMON_CLK_SI521XX is not set +# CONFIG_COMMON_CLK_VC3 is not set +# CONFIG_COMMON_CLK_VC5 is not set +# CONFIG_COMMON_CLK_VC7 is not set +# CONFIG_COMMON_CLK_FIXED_MMIO is not set +# CONFIG_XILINX_VCU is not set +# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set +# CONFIG_HWSPINLOCK is not set + +# +# Clock Source drivers +# +# end of Clock Source drivers + +# CONFIG_MAILBOX is not set +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +CONFIG_IOMMU_DEFAULT_DMA_STRICT=y +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +# CONFIG_IOMMU_DEFAULT_PASSTHROUGH is not set +CONFIG_OF_IOMMU=y +# CONFIG_IOMMUFD is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# CONFIG_LITEX_SOC_CONTROLLER is not set +# end of Enable LiteX SoC Builder specific drivers + +CONFIG_LOONGSON2_GUTS=y +CONFIG_LOONGSON2_PM=y +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +CONFIG_PM_DEVFREQ=y + +# +# DEVFREQ Governors +# +CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y +CONFIG_DEVFREQ_GOV_PERFORMANCE=y +CONFIG_DEVFREQ_GOV_POWERSAVE=y +CONFIG_DEVFREQ_GOV_USERSPACE=y +# CONFIG_DEVFREQ_GOV_PASSIVE is not set + +# +# DEVFREQ Drivers +# +# CONFIG_PM_DEVFREQ_EVENT is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ADI_AXI_ADC is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_ATMEL_TCB is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +# CONFIG_PWM_FSL_FTM is not set +# CONFIG_PWM_PCA9685 is not set +# CONFIG_PWM_XILINX is not set + +# +# IRQ chip support +# +CONFIG_IRQCHIP=y +# CONFIG_AL_FIC is not set +# CONFIG_XILINX_INTC is not set +CONFIG_IRQ_LOONGARCH_CPU=y +CONFIG_LOONGSON_LIOINTC=y +CONFIG_LOONGSON_EIOINTC=y +CONFIG_LOONGSON_HTVEC=y +CONFIG_LOONGSON_PCH_PIC=y +CONFIG_LOONGSON_PCH_MSI=y +CONFIG_LOONGSON_PCH_LPC=y +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_SIMPLE is not set +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_CADENCE_TORRENT is not set +# CONFIG_PHY_CADENCE_DPHY is not set +# CONFIG_PHY_CADENCE_DPHY_RX is not set +# CONFIG_PHY_CADENCE_SIERRA is not set +# CONFIG_PHY_CADENCE_SALVO is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_LAN966X_SERDES is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_MAPPHONE_MDM6600 is not set +# CONFIG_PHY_OCELOT_SERDES is not set +# CONFIG_PHY_SAMSUNG_USB2 is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +# CONFIG_DTPM is not set +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_USB4=m +# CONFIG_USB4_DEBUGFS_WRITE is not set +# CONFIG_USB4_DMA_TEST is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +# CONFIG_LIBNVDIMM is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=m +CONFIG_DEV_DAX_KMEM=m +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set +# CONFIG_NVMEM_U_BOOT_ENV is not set + +# +# HW tracing support +# +# CONFIG_STM is not set +# CONFIG_INTEL_TH is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +# CONFIG_FSI is not set +CONFIG_PM_OPP=y +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_EXT2_FS_POSIX_ACL=y +CONFIG_EXT2_FS_SECURITY=y +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +# CONFIG_JFS_DEBUG is not set +# CONFIG_JFS_STATISTICS is not set +CONFIG_XFS_FS=y +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +CONFIG_OCFS2_FS=m +CONFIG_OCFS2_FS_O2CB=m +CONFIG_OCFS2_FS_USERSPACE_CLUSTER=m +CONFIG_OCFS2_FS_STATS=y +CONFIG_OCFS2_DEBUG_MASKLOG=y +# CONFIG_OCFS2_DEBUG_FS is not set +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +CONFIG_QFMT_V1=m +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +# CONFIG_VIRT_FUSE is not set +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=y +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +# CONFIG_CACHEFILES_ONDEMAND is not set +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=936 +CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" +# CONFIG_FAT_DEFAULT_UTF8 is not set +CONFIG_EXFAT_FS=m +CONFIG_EXFAT_DEFAULT_IOCHARSET="utf8" +CONFIG_NTFS_FS=m +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set +CONFIG_NTFS3_FS=m +CONFIG_NTFS3_64BIT_CLUSTER=y +CONFIG_NTFS3_LZX_XPRESS=y +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_ARCH_SUPPORTS_HUGETLBFS=y +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +# CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON is not set +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +CONFIG_ORANGEFS_FS=m +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +CONFIG_ECRYPT_FS=m +CONFIG_ECRYPT_FS_MESSAGING=y +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +CONFIG_UBIFS_FS=m +CONFIG_UBIFS_FS_ADVANCED_COMPR=y +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +CONFIG_UBIFS_FS_ZSTD=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set +CONFIG_UBIFS_FS_XATTR=y +CONFIG_UBIFS_FS_SECURITY=y +# CONFIG_UBIFS_FS_AUTHENTICATION is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +CONFIG_MINIX_FS=m +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +CONFIG_ROMFS_FS=m +CONFIG_ROMFS_BACKED_BY_BLOCK=y +# CONFIG_ROMFS_BACKED_BY_MTD is not set +# CONFIG_ROMFS_BACKED_BY_BOTH is not set +CONFIG_ROMFS_ON_BLOCK=y +CONFIG_PSTORE=m +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +# CONFIG_PSTORE_CONSOLE is not set +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +# CONFIG_PSTORE_RAM is not set +# CONFIG_PSTORE_BLK is not set +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +# CONFIG_UFS_FS_WRITE is not set +# CONFIG_UFS_DEBUG is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_PCPU_KTHREAD=y +CONFIG_EROFS_FS_PCPU_KTHREAD_HIPRI=y +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +# CONFIG_ROOT_NFS is not set +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set +CONFIG_NFS_V4_2_READ_PLUS=y +CONFIG_NFSD=y +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=y +CONFIG_LOCKD=y +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=y +CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=y +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +CONFIG_CEPH_FSCACHE=y +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CEPH_FS_SECURITY_LABEL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_9P_FS=y +# CONFIG_9P_FS_POSIX_ACL is not set +# CONFIG_9P_FS_SECURITY is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +CONFIG_KEY_DH_OPERATIONS=y +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +# CONFIG_FORTIFY_SOURCE is not set +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +CONFIG_SECURITY_APPARMOR=y +# CONFIG_SECURITY_APPARMOR_DEBUG is not set +CONFIG_SECURITY_APPARMOR_INTROSPECT_POLICY=y +CONFIG_SECURITY_APPARMOR_HASH=y +CONFIG_SECURITY_APPARMOR_HASH_DEFAULT=y +CONFIG_SECURITY_APPARMOR_EXPORT_BINARY=y +CONFIG_SECURITY_APPARMOR_PARANOID_LOAD=y +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_LOCK_DOWN_KERNEL_FORCE_NONE=y +# CONFIG_LOCK_DOWN_KERNEL_FORCE_INTEGRITY is not set +# CONFIG_LOCK_DOWN_KERNEL_FORCE_CONFIDENTIALITY is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +CONFIG_IMA_NG_TEMPLATE=y +# CONFIG_IMA_SIG_TEMPLATE is not set +CONFIG_IMA_DEFAULT_TEMPLATE="ima-ng" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +# CONFIG_IMA_WRITE_POLICY is not set +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +# CONFIG_IMA_APPRAISE_BUILD_POLICY is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +# CONFIG_IMA_BLACKLIST_KEYRING is not set +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +# CONFIG_DEFAULT_SECURITY_SELINUX is not set +# CONFIG_DEFAULT_SECURITY_APPARMOR is not set +CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +# CONFIG_INIT_STACK_NONE is not set +# CONFIG_INIT_STACK_ALL_PATTERN is not set +CONFIG_INIT_STACK_ALL_ZERO=y +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +# CONFIG_BUG_ON_DATA_CORRUPTION is not set +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=y +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=y +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ENGINE=m +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=y +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +# CONFIG_CRYPTO_OFB is not set +CONFIG_CRYPTO_PCBC=m +# CONFIG_CRYPTO_XTS is not set +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=y +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=y +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=m +CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_842=m +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=y +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +CONFIG_CRYPTO_KDF800108_CTR=y +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (loongarch) +# +CONFIG_CRYPTO_CRC32_LOONGARCH=m +# end of Accelerated Cryptographic Algorithms for CPU (loongarch) + +CONFIG_CRYPTO_HW=y +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXX is not set +# CONFIG_CRYPTO_DEV_QAT_C62X is not set +# CONFIG_CRYPTO_DEV_QAT_4XXX is not set +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set +# CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set +# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +CONFIG_CRYPTO_DEV_VIRTIO=m +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_CCREE is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_SYSTEM_REVOCATION_KEYS="" +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=y +CONFIG_RAID6_PQ_BENCHMARK=y +CONFIG_PACKING=y +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=1 +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=m +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=y +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=y +# CONFIG_CRC8 is not set +CONFIG_XXHASH=y +CONFIG_AUDIT_GENERIC=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_842_COMPRESS=m +CONFIG_842_DECOMPRESS=m +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=y +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_DMA_DECLARE_COHERENT=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +# CONFIG_DMA_RESTRICTED_POOL is not set +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +# CONFIG_CPUMASK_OFFSTACK is not set +# CONFIG_FORCE_NR_CPUS is not set +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_LRU_CACHE=m +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_LIBFDT=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_GENERIC_LIB_ASHLDI3=y +CONFIG_GENERIC_LIB_ASHRDI3=y +CONFIG_GENERIC_LIB_LSHRDI3=y +CONFIG_GENERIC_LIB_CMPDI2=y +CONFIG_GENERIC_LIB_UCMPDI2=y +CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=4096 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +# CONFIG_VMLINUX_MAP is not set +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +# CONFIG_KGDB is not set +# CONFIG_UBSAN is not set +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_MEMORY_INIT is not set +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_HAVE_DEBUG_STACKOVERFLOW=y +# CONFIG_DEBUG_STACKOVERFLOW is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_ARCH_DISABLE_KASAN_INLINE=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +# CONFIG_KFENCE is not set +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=0 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PERF is not set +CONFIG_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +# CONFIG_SCHED_DEBUG is not set +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +# CONFIG_FUNCTION_PROFILER is not set +# CONFIG_STACK_TRACER is not set +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_SCHED_TRACER is not set +# CONFIG_HWLAT_TRACER is not set +# CONFIG_OSNOISE_TRACER is not set +# CONFIG_TIMERLAT_TRACER is not set +CONFIG_FTRACE_SYSCALLS=y +# CONFIG_TRACER_SNAPSHOT is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +# CONFIG_PROFILE_ALL_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +# CONFIG_SYNTH_EVENTS is not set +# CONFIG_USER_EVENTS is not set +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +# CONFIG_RING_BUFFER_BENCHMARK is not set +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_RV is not set +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +# CONFIG_STRICT_DEVMEM is not set + +# +# loongarch Debugging +# +# CONFIG_UNWINDER_GUESS is not set +CONFIG_UNWINDER_PROLOGUE=y +# end of loongarch Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_FUNCTION_ERROR_INJECTION is not set +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +# CONFIG_RUNTIME_TESTING_MENU is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/loongarch/configs/loongson3_defconfig b/arch/loongarch/configs/loongson3_defconfig index a3b52aaa83b33634c5be146bc02d50589bf5f762..3748aa27e99389fd52ffb49f8bbe2b73510cc2ef 100644 --- a/arch/loongarch/configs/loongson3_defconfig +++ b/arch/loongarch/configs/loongson3_defconfig @@ -4,8 +4,9 @@ CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BPF_SYSCALL=y -CONFIG_BPF_JIT=y -CONFIG_PREEMPT=y +# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set +CONFIG_PREEMPT_VOLUNTARY=y +CONFIG_IRQ_TIME_ACCOUNTING=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y CONFIG_TASKSTATS=y @@ -19,6 +20,7 @@ CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y CONFIG_CPUSETS=y @@ -34,75 +36,78 @@ CONFIG_RELAY=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y CONFIG_KALLSYMS_ALL=y -CONFIG_PERF_EVENTS=y -CONFIG_LOONGARCH=y -CONFIG_64BIT=y -CONFIG_MACH_LOONGSON64=y -CONFIG_PAGE_SIZE_16KB=y -CONFIG_HZ_250=y -CONFIG_DMI=y -CONFIG_EFI=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y -CONFIG_NR_CPUS=64 +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_NR_CPUS=256 CONFIG_NUMA=y -CONFIG_CPU_HAS_FPU=y CONFIG_CPU_HAS_LSX=y CONFIG_CPU_HAS_LASX=y -CONFIG_KEXEC=y -CONFIG_CRASH_DUMP=y CONFIG_RANDOMIZE_BASE=y -CONFIG_SUSPEND=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_LOONGSON3_ACPI_CPUFREQ=y CONFIG_HIBERNATION=y -CONFIG_ACPI=y CONFIG_ACPI_SPCR_TABLE=y CONFIG_ACPI_TAD=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_IPMI=m -CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PCI_SLOT=y CONFIG_ACPI_HOTPLUG_MEMORY=y -CONFIG_EFI_ZBOOT=y -CONFIG_EFI_GENERIC_STUB_INITRD_CMDLINE_LOADER=y -CONFIG_EFI_CAPSULE_LOADER=m -CONFIG_EFI_TEST=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m CONFIG_JUMP_LABEL=y CONFIG_MODULES=y CONFIG_MODULE_FORCE_LOAD=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_WBT=y CONFIG_PARTITION_ADVANCED=y CONFIG_BSD_DISKLABEL=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m -CONFIG_ZPOOL=y CONFIG_ZSWAP=y CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD=y -CONFIG_ZBUD=y CONFIG_Z3FOLD=y -CONFIG_ZSMALLOC=m +CONFIG_ZSMALLOC_STAT=y +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set # CONFIG_COMPAT_BRK is not set CONFIG_MEMORY_HOTPLUG=y CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y CONFIG_MEMORY_HOTREMOVE=y CONFIG_KSM=y CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA=y +CONFIG_IDLE_PAGE_TRACKING=y CONFIG_USERFAULTFD=y CONFIG_NET=y CONFIG_PACKET=y -CONFIG_UNIX=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX_DIAG=m CONFIG_TLS=m CONFIG_TLS_DEVICE=y +CONFIG_TLS_TOE=y CONFIG_XFRM_USER=y -CONFIG_NET_KEY=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m CONFIG_XDP_SOCKETS=y -CONFIG_INET=y +CONFIG_XDP_SOCKETS_DIAG=m CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y @@ -118,27 +123,83 @@ CONFIG_IP_MROUTE=y CONFIG_IP_MROUTE_MULTIPLE_TABLES=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_NET_FOU_IP_TUNNELS=y +CONFIG_INET_AH=m CONFIG_INET_ESP=m -CONFIG_INET_UDP_DIAG=y +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_ESPINTCP=y +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_INET_DIAG_DESTROY=y CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_CUBIC=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_ESPINTCP=y +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_IPV6_RPL_LWTUNNEL=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y CONFIG_NETWORK_PHY_TIMESTAMPING=y CONFIG_NETFILTER=y CONFIG_BRIDGE_NETFILTER=m -CONFIG_NETFILTER_NETLINK_LOG=m CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y CONFIG_NF_CONNTRACK_AMANDA=m CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m CONFIG_NF_CONNTRACK_NETBIOS_NS=m CONFIG_NF_CONNTRACK_SNMP=m CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m CONFIG_NFT_CONNLIMIT=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -151,22 +212,35 @@ CONFIG_NFT_QUOTA=m CONFIG_NFT_REJECT=m CONFIG_NFT_COMPAT=m CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m CONFIG_NFT_SOCKET=m CONFIG_NFT_OSF=m CONFIG_NFT_TPROXY=m +CONFIG_NFT_SYNPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_AUDIT=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m CONFIG_NETFILTER_XT_TARGET_DSCP=m CONFIG_NETFILTER_XT_TARGET_HMARK=m CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m CONFIG_NETFILTER_XT_TARGET_LED=m CONFIG_NETFILTER_XT_TARGET_LOG=m CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m CONFIG_NETFILTER_XT_TARGET_TRACE=m CONFIG_NETFILTER_XT_TARGET_SECMARK=m CONFIG_NETFILTER_XT_TARGET_TCPMSS=m @@ -182,7 +256,6 @@ CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m CONFIG_NETFILTER_XT_MATCH_CONNMARK=m CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m CONFIG_NETFILTER_XT_MATCH_CPU=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m CONFIG_NETFILTER_XT_MATCH_DSCP=m CONFIG_NETFILTER_XT_MATCH_ESP=m @@ -191,6 +264,7 @@ CONFIG_NETFILTER_XT_MATCH_HELPER=m CONFIG_NETFILTER_XT_MATCH_IPCOMP=m CONFIG_NETFILTER_XT_MATCH_IPRANGE=m CONFIG_NETFILTER_XT_MATCH_IPVS=m +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set CONFIG_NETFILTER_XT_MATCH_LENGTH=m CONFIG_NETFILTER_XT_MATCH_LIMIT=m CONFIG_NETFILTER_XT_MATCH_MAC=m @@ -200,10 +274,12 @@ CONFIG_NETFILTER_XT_MATCH_NFACCT=m CONFIG_NETFILTER_XT_MATCH_OSF=m CONFIG_NETFILTER_XT_MATCH_OWNER=m CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m CONFIG_NETFILTER_XT_MATCH_QUOTA=m CONFIG_NETFILTER_XT_MATCH_RATEEST=m CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m CONFIG_NETFILTER_XT_MATCH_SOCKET=m CONFIG_NETFILTER_XT_MATCH_STATE=m CONFIG_NETFILTER_XT_MATCH_STATISTIC=m @@ -212,8 +288,25 @@ CONFIG_NETFILTER_XT_MATCH_TCPMSS=m CONFIG_NETFILTER_XT_MATCH_TIME=m CONFIG_NETFILTER_XT_MATCH_U32=m CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m CONFIG_IP_VS=m CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y CONFIG_IP_VS_PROTO_TCP=y CONFIG_IP_VS_PROTO_UDP=y CONFIG_IP_VS_PROTO_ESP=y @@ -221,11 +314,24 @@ CONFIG_IP_VS_PROTO_AH=y CONFIG_IP_VS_PROTO_SCTP=y CONFIG_IP_VS_RR=m CONFIG_IP_VS_WRR=m -CONFIG_IP_VS_NFCT=y -CONFIG_NF_TABLES_IPV4=y +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m CONFIG_NFT_DUP_IPV4=m CONFIG_NFT_FIB_IPV4=m CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m CONFIG_IP_NF_IPTABLES=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m @@ -246,18 +352,21 @@ CONFIG_IP_NF_SECURITY=m CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m -CONFIG_NF_TABLES_IPV6=y -CONFIG_IP6_NF_IPTABLES=y +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RPFILTER=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_MATCH_SRH=m -CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_TARGET_SYNPROXY=m CONFIG_IP6_NF_MANGLE=m @@ -267,76 +376,212 @@ CONFIG_IP6_NF_NAT=m CONFIG_IP6_NF_TARGET_MASQUERADE=m CONFIG_IP6_NF_TARGET_NPT=m CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_META=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_NF_CONNTRACK_BRIDGE=m CONFIG_BRIDGE_NF_EBTABLES=m CONFIG_BRIDGE_EBT_BROUTE=m CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m CONFIG_BPFILTER=y -CONFIG_IP_SCTP=m -CONFIG_RDS=y +CONFIG_IP_DCCP=m +CONFIG_IP_DCCP_CCID2_DEBUG=y +CONFIG_IP_DCCP_CCID3_DEBUG=y +CONFIG_IP_DCCP_DEBUG=y +CONFIG_SCTP_DBG_OBJCNT=y +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_RDS=m +CONFIG_RDS_RDMA=m +CONFIG_RDS_TCP=m +CONFIG_RDS_DEBUG=y +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +CONFIG_ATM_CLIP_NO_ICMP=y +CONFIG_ATM_LANE=m +CONFIG_ATM_MPOA=m +CONFIG_ATM_BR2684=m +CONFIG_ATM_BR2684_IPFILTER=y CONFIG_L2TP=m CONFIG_L2TP_V3=y CONFIG_L2TP_IP=m CONFIG_L2TP_ETH=m CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_BRIDGE_MRP=y +CONFIG_NET_DSA=m +CONFIG_NET_DSA_TAG_AR9331=m +CONFIG_NET_DSA_TAG_BRCM=m +CONFIG_NET_DSA_TAG_BRCM_PREPEND=m +CONFIG_NET_DSA_TAG_GSWIP=m +CONFIG_NET_DSA_TAG_DSA=m +CONFIG_NET_DSA_TAG_EDSA=m +CONFIG_NET_DSA_TAG_MTK=m +CONFIG_NET_DSA_TAG_KSZ=m +CONFIG_NET_DSA_TAG_OCELOT=m +CONFIG_NET_DSA_TAG_QCA=m +CONFIG_NET_DSA_TAG_RTL4_A=m +CONFIG_NET_DSA_TAG_LAN9303=m +CONFIG_NET_DSA_TAG_SJA1105=m +CONFIG_NET_DSA_TAG_TRAILER=m CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y CONFIG_LLC2=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_X25=m +CONFIG_LAPB=m +CONFIG_PHONET=m +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +CONFIG_IEEE802154_NL802154_EXPERIMENTAL=y +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m CONFIG_NET_SCHED=y CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_TAPRIO=m +CONFIG_NET_SCH_GRED=m CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_FQ_PIE=m CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_ETS=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m -CONFIG_NET_CLS_CGROUP=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_EMATCH_IPT=m CONFIG_NET_CLS_ACT=y CONFIG_NET_ACT_POLICE=m CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m CONFIG_NET_ACT_IPT=m CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_MPLS=m +CONFIG_NET_ACT_VLAN=m CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_CONNMARK=m +CONFIG_NET_ACT_CTINFO=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_ACT_GATE=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=y +CONFIG_BATMAN_ADV=m +CONFIG_BATMAN_ADV_NC=y +CONFIG_BATMAN_ADV_DEBUG=y CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m -CONFIG_NETLINK_DIAG=y +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_HSR=m +CONFIG_QRTR=m +CONFIG_QRTR_TUN=m +CONFIG_NET_NCSI=y +CONFIG_NCSI_OEM_CMD_GET_MAC=y CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_CAN=m CONFIG_BT=m CONFIG_BT_RFCOMM=m CONFIG_BT_RFCOMM_TTY=y CONFIG_BT_BNEP=m CONFIG_BT_BNEP_MC_FILTER=y CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m CONFIG_BT_HIDP=m CONFIG_BT_HS=y CONFIG_BT_HCIBTUSB=m CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y -CONFIG_BT_HCIBTUSB_MTK=y +# CONFIG_BT_HCIBTUSB_BCM is not set +CONFIG_BT_HCIBTSDIO=m CONFIG_BT_HCIUART=m CONFIG_BT_HCIUART_BCSP=y CONFIG_BT_HCIUART_ATH3K=y -CONFIG_BT_HCIUART_INTEL=y -CONFIG_BT_HCIUART_AG6XX=y CONFIG_BT_HCIBCM203X=m CONFIG_BT_HCIBPA10X=m CONFIG_BT_HCIBFUSB=m -CONFIG_BT_HCIDTL1=m -CONFIG_BT_HCIBT3C=m -CONFIG_BT_HCIBLUECARD=m CONFIG_BT_HCIVHCI=m CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m CONFIG_BT_ATH3K=m -CONFIG_BT_VIRTIO=m CONFIG_CFG80211=m CONFIG_CFG80211_WEXT=y CONFIG_MAC80211=m @@ -344,15 +589,19 @@ CONFIG_RFKILL=m CONFIG_RFKILL_INPUT=y CONFIG_NET_9P=y CONFIG_NET_9P_VIRTIO=y -CONFIG_CEPH_LIB=m -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y CONFIG_PCIEAER=y -# CONFIG_PCIEASPM is not set +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=m CONFIG_PCI_IOV=y -CONFIG_HOTPLUG_PCI=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI_ACPI=y CONFIG_HOTPLUG_PCI_SHPC=y CONFIG_PCCARD=m +# CONFIG_PCMCIA is not set CONFIG_YENTA=m CONFIG_RAPIDIO=y CONFIG_RAPIDIO_TSI721=y @@ -364,7 +613,12 @@ CONFIG_UEVENT_HELPER=y CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_FW_LOADER_COMPRESS=y -CONFIG_FW_LOADER_COMPRESS_ZSTD=y +CONFIG_CONNECTOR=y +CONFIG_DMI_SYSFS=y +CONFIG_ISCSI_IBFT=m +CONFIG_EFI_ZBOOT=y +CONFIG_EFI_CAPSULE_LOADER=m +CONFIG_EFI_TEST=m CONFIG_MTD=m CONFIG_MTD_BLOCK=m CONFIG_MTD_CFI=m @@ -374,22 +628,31 @@ CONFIG_MTD_CFI_AMDSTD=m CONFIG_MTD_CFI_STAA=m CONFIG_MTD_RAM=m CONFIG_MTD_ROM=m +CONFIG_MTD_BLOCK2MTD=m +CONFIG_MTD_SPI_NOR=m CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m CONFIG_MTD_UBI_BLOCK=y -CONFIG_PARPORT=y -CONFIG_PARPORT_PC=y -CONFIG_PARPORT_SERIAL=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m CONFIG_PARPORT_PC_FIFO=y +CONFIG_PARPORT_1284=y +# CONFIG_PNP_DEBUG_MESSAGES is not set +CONFIG_BLK_DEV_NULL_BLK=m CONFIG_ZRAM=m CONFIG_ZRAM_DEF_COMP_ZSTD=y -CONFIG_BLK_DEV_LOOP=y +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 CONFIG_BLK_DEV_DRBD=m CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM=m CONFIG_BLK_DEV_RAM_SIZE=8192 -CONFIG_VIRTIO_BLK=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=m CONFIG_BLK_DEV_RBD=m -CONFIG_BLK_DEV_NVME=y +CONFIG_BLK_DEV_NVME=m CONFIG_NVME_MULTIPATH=y CONFIG_NVME_RDMA=m CONFIG_NVME_FC=m @@ -399,18 +662,40 @@ CONFIG_NVME_TARGET_PASSTHRU=y CONFIG_NVME_TARGET_LOOP=m CONFIG_NVME_TARGET_RDMA=m CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m CONFIG_NVME_TARGET_TCP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m CONFIG_EEPROM_AT24=m -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_CHR_DEV_SG=y +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_SENSORS_LIS3_I2C=m +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_LOGGING=y -CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_SCAN_ASYNC=y CONFIG_SCSI_FC_ATTRS=m -CONFIG_SCSI_SAS_ATA=y CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AACRAID=m CONFIG_SCSI_MVSAS=y # CONFIG_SCSI_MVSAS_DEBUG is not set CONFIG_SCSI_MVSAS_TASKLET=y @@ -419,8 +704,10 @@ CONFIG_MEGARAID_NEWGEN=y CONFIG_MEGARAID_MM=y CONFIG_MEGARAID_MAILBOX=y CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=y +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_SMARTPQI=m CONFIG_LIBFC=m CONFIG_LIBFCOE=m CONFIG_FCOE=m @@ -428,35 +715,46 @@ CONFIG_SCSI_QLOGIC_1280=m CONFIG_SCSI_QLA_FC=m CONFIG_TCM_QLA2XXX=m CONFIG_SCSI_QLA_ISCSI=m -CONFIG_SCSI_LPFC=m CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_DWC=y +CONFIG_ATA_PIIX=m CONFIG_PATA_ATIIXP=y -CONFIG_PATA_PCMCIA=m +CONFIG_ATA_GENERIC=m CONFIG_MD=y -CONFIG_BLK_DEV_MD=m +CONFIG_BLK_DEV_MD=y CONFIG_MD_LINEAR=m -CONFIG_MD_RAID0=m -CONFIG_MD_RAID1=m -CONFIG_MD_RAID10=m -CONFIG_MD_RAID456=m CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m CONFIG_BCACHE=m -CONFIG_BLK_DEV_DM=y +CONFIG_BLK_DEV_DM=m CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_THIN_PROVISIONING=m CONFIG_DM_CACHE=m CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m CONFIG_DM_RAID=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_DM_MULTIPATH_QL=m CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m CONFIG_TARGET_CORE=m CONFIG_TCM_IBLOCK=m CONFIG_TCM_FILEIO=m @@ -464,18 +762,45 @@ CONFIG_TCM_PSCSI=m CONFIG_TCM_USER2=m CONFIG_LOOPBACK_TARGET=m CONFIG_ISCSI_TARGET=m -CONFIG_NETDEVICES=y +CONFIG_ISCSI_TARGET_CXGB4=m +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +CONFIG_FUSION_SAS=m +CONFIG_FUSION_CTL=m +CONFIG_FUSION_LOGGING=y +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m CONFIG_BONDING=m -CONFIG_DUMMY=y +CONFIG_DUMMY=m CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m -CONFIG_VXLAN=y +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_NTB_NETDEV=m CONFIG_RIONET=m CONFIG_TUN=m CONFIG_VETH=m CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set @@ -487,36 +812,63 @@ CONFIG_VIRTIO_NET=m # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set CONFIG_BNX2=y +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y # CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_CHELSIO_T1=m CONFIG_CHELSIO_T1_1G=y CONFIG_CHELSIO_T3=m -CONFIG_CHELSIO_T4=m +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_IPSEC_INLINE=m # CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_DNET=m # CONFIG_NET_VENDOR_DEC is not set # CONFIG_NET_VENDOR_DLINK is not set # CONFIG_NET_VENDOR_EMULEX is not set # CONFIG_NET_VENDOR_EZCHIP is not set # CONFIG_NET_VENDOR_I825XX is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IXGBE=y +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_IGB=m +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m # CONFIG_NET_VENDOR_MARVELL is not set -# CONFIG_NET_VENDOR_MELLANOX is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m # CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set # CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NI is not set # CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NETRONOME is not set # CONFIG_NET_VENDOR_NVIDIA is not set # CONFIG_NET_VENDOR_OKI is not set +CONFIG_ETHOC=m # CONFIG_NET_VENDOR_QLOGIC is not set # CONFIG_NET_VENDOR_BROCADE is not set # CONFIG_NET_VENDOR_QUALCOMM is not set # CONFIG_NET_VENDOR_RDC is not set CONFIG_8139CP=m CONFIG_8139TOO=m -CONFIG_R8169=y +# CONFIG_8139TOO_PIO is not set +CONFIG_8139TOO_8129=y +CONFIG_R8169=m # CONFIG_NET_VENDOR_RENESAS is not set # CONFIG_NET_VENDOR_ROCKER is not set # CONFIG_NET_VENDOR_SAMSUNG is not set @@ -525,46 +877,150 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_SIS is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set CONFIG_STMMAC_ETH=y # CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_TEHUTI is not set # CONFIG_NET_VENDOR_TI is not set # CONFIG_NET_VENDOR_VIA is not set -CONFIG_NGBE=y -CONFIG_TXGBE=y +CONFIG_NGBE=m +CONFIG_TXGBE=m # CONFIG_NET_VENDOR_WIZNET is not set # CONFIG_NET_VENDOR_XILINX is not set +CONFIG_LED_TRIGGER_PHY=y +CONFIG_SFP=y +CONFIG_AMD_PHY=m +CONFIG_AQUANTIA_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_BCM7XXX_PHY=m +CONFIG_BCM87XX_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=y +CONFIG_MICREL_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +CONFIG_NATIONAL_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +CONFIG_MICREL_KS8995MA=m +CONFIG_CAN_VCAN=m +CONFIG_CAN_SLCAN=m +CONFIG_CAN_C_CAN=m +CONFIG_CAN_C_CAN_PLATFORM=m +CONFIG_CAN_C_CAN_PCI=m +CONFIG_CAN_CC770=m +CONFIG_CAN_CC770_PLATFORM=m +CONFIG_CAN_SJA1000=m +CONFIG_CAN_EMS_PCI=m +CONFIG_CAN_KVASER_PCI=m +CONFIG_CAN_PEAK_PCI=m +CONFIG_CAN_PLX_PCI=m +CONFIG_CAN_SJA1000_PLATFORM=m +CONFIG_CAN_SOFTING=m +CONFIG_CAN_8DEV_USB=m +CONFIG_CAN_EMS_USB=m +CONFIG_CAN_KVASER_USB=m +CONFIG_CAN_PEAK_USB=m +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_MSCC_MIIM=m +CONFIG_MDIO_THUNDER=m CONFIG_PPP=m CONFIG_PPP_BSDCOMP=m CONFIG_PPP_DEFLATE=m CONFIG_PPP_FILTER=y CONFIG_PPP_MPPE=m CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m CONFIG_PPPOE=m CONFIG_PPTP=m CONFIG_PPPOL2TP=m CONFIG_PPP_ASYNC=m CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m CONFIG_USB_RTL8150=m CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m # CONFIG_USB_NET_AX8817X is not set # CONFIG_USB_NET_AX88179_178A is not set CONFIG_USB_NET_CDC_EEM=m CONFIG_USB_NET_HUAWEI_CDC_NCM=m CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m # CONFIG_USB_NET_NET1080 is not set +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y # CONFIG_USB_BELKIN is not set # CONFIG_USB_ARMLINUX is not set +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y # CONFIG_USB_NET_ZAURUS is not set +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_WLAN_VENDOR_ADMTEK is not set CONFIG_ATH9K=m +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_WOW=y CONFIG_ATH9K_HTC=m +CONFIG_ATH10K=m +CONFIG_ATH10K_PCI=m +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_BRCMSMAC=m +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_WLAN_VENDOR_CISCO is not set CONFIG_IWLWIFI=m CONFIG_IWLDVM=m CONFIG_IWLMVM=m -CONFIG_HOSTAP=m +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m CONFIG_MT7601U=m +CONFIG_MT76x0U=m +CONFIG_MT76x2U=m CONFIG_RT2X00=m +CONFIG_RT2800PCI=m CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y CONFIG_RTL8192CE=m CONFIG_RTL8192SE=m CONFIG_RTL8192DE=m @@ -576,29 +1032,80 @@ CONFIG_RTL8821AE=m CONFIG_RTL8192CU=m # CONFIG_RTLWIFI_DEBUG is not set CONFIG_RTL8XXXU=m -CONFIG_RTW88=m -CONFIG_RTW88_8822BE=m -CONFIG_RTW88_8822CE=m -CONFIG_RTW88_8723DE=m -CONFIG_RTW88_8821CE=m -CONFIG_RTW89=m -CONFIG_RTW89_8852AE=m -CONFIG_RTW89_8852CE=m +# CONFIG_WLAN_VENDOR_RSI is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set CONFIG_ZD1211RW=m CONFIG_USB_NET_RNDIS_WLAN=m +CONFIG_MAC80211_HWSIM=m +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m +CONFIG_IEEE802154_FAKELB=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_USB4_NET=m +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m CONFIG_INPUT_MOUSEDEV=y CONFIG_INPUT_MOUSEDEV_PSAUX=y +CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y CONFIG_KEYBOARD_XTKBD=m CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_MOUSE_PS2_SENTELIC=y CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_SMBUS=y +CONFIG_MOUSE_VSXXXAA=m +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +CONFIG_TABLET_USB_KBTAB=m +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m CONFIG_INPUT_MISC=y +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m CONFIG_INPUT_UINPUT=m +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +CONFIG_RMI4_I2C=m +CONFIG_RMI4_SPI=m +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F34=y +CONFIG_RMI4_F55=y CONFIG_SERIO_SERPORT=m CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m CONFIG_LEGACY_PTY_COUNT=16 CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=16 CONFIG_SERIAL_8250_RUNTIME_UARTS=16 @@ -606,37 +1113,190 @@ CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_SHARE_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_JSM=m +CONFIG_SERIAL_ARC=m CONFIG_SERIAL_NONSTANDARD=y +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m CONFIG_PRINTER=m +CONFIG_PPDEV=m CONFIG_VIRTIO_CONSOLE=y CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=m -CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m CONFIG_HW_RANDOM_VIRTIO=m +CONFIG_TCG_TIS_SPI=m +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +CONFIG_TCG_TIS_ST33ZP24_SPI=m CONFIG_I2C_CHARDEV=y +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD8111=m +CONFIG_I2C_ISCH=m CONFIG_I2C_PIIX4=y +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_SCMI=m +CONFIG_I2C_DESIGNWARE_PLATFORM=y CONFIG_I2C_GPIO=y -CONFIG_I2C_LS2X=y +CONFIG_I2C_LS2X=m +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +CONFIG_I2C_DIOLAN_U2C=m +CONFIG_I2C_PARPORT=m +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m +CONFIG_I2C_STUB=m CONFIG_SPI=y -CONFIG_SPI_LOONGSON_PCI=m +CONFIG_SPI_LOONGSON_PCI=y CONFIG_SPI_LOONGSON_PLATFORM=m +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m +CONFIG_DP83640_PHY=m CONFIG_PINCTRL=y CONFIG_PINCTRL_LOONGSON2=y CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_LOONGSON=y +CONFIG_GPIO_AMDPT=m CONFIG_GPIO_LOONGSON_64BIT=y +CONFIG_GPIO_VIPERBOARD=m CONFIG_POWER_RESET=y -CONFIG_POWER_RESET_RESTART=y -CONFIG_POWER_RESET_SYSCON=y -CONFIG_POWER_RESET_SYSCON_POWEROFF=y -CONFIG_SYSCON_REBOOT_MODE=y +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +CONFIG_SENSORS_ASC7621=m +CONFIG_SENSORS_ATXP1=m +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +CONFIG_SENSORS_LINEAGE=m +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +CONFIG_SENSORS_LTC4245=m +CONFIG_SENSORS_LTC4261=m +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +CONFIG_SENSORS_MCP3021=m +CONFIG_SENSORS_LM63=m +CONFIG_SENSORS_LM73=m CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +CONFIG_SENSORS_NCT6775=m +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_ADM1275=m +CONFIG_SENSORS_LM25066=m +CONFIG_SENSORS_LTC2978=m +CONFIG_SENSORS_MAX16064=m +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +CONFIG_SENSORS_ZL6100=m +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +CONFIG_SENSORS_ADS7828=m +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m CONFIG_SENSORS_W83795=m +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_EMULATION=y CONFIG_LOONGSON2_THERMAL=m +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_GPIO_WATCHDOG=m +CONFIG_WDAT_WDT=m +CONFIG_ALIM7101_WDT=m +CONFIG_I6300ESB_WDT=m +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m +CONFIG_USBPCWATCHDOG=m +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +CONFIG_MFD_VIPERBOARD=m +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +CONFIG_MFD_VX855=m CONFIG_RC_CORE=m CONFIG_LIRC=y CONFIG_RC_DECODERS=y @@ -650,99 +1310,580 @@ CONFIG_IR_SANYO_DECODER=m CONFIG_IR_SHARP_DECODER=m CONFIG_IR_SONY_DECODER=m CONFIG_IR_XMP_DECODER=m +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +CONFIG_IR_SERIAL_TRANSMITTER=y +CONFIG_IR_STREAMZAP=m +CONFIG_IR_TTUSBIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m CONFIG_MEDIA_SUPPORT=m +CONFIG_DVB_MAX_ADAPTERS=8 CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_USB_GSPCA=m +CONFIG_USB_GSPCA_BENQ=m +CONFIG_USB_GSPCA_CONEX=m +CONFIG_USB_GSPCA_CPIA1=m +CONFIG_USB_GSPCA_ETOMS=m +CONFIG_USB_GSPCA_FINEPIX=m +CONFIG_USB_GSPCA_JEILINJ=m +CONFIG_USB_GSPCA_JL2005BCD=m +CONFIG_USB_GSPCA_KONICA=m +CONFIG_USB_GSPCA_MARS=m +CONFIG_USB_GSPCA_MR97310A=m +CONFIG_USB_GSPCA_NW80X=m +CONFIG_USB_GSPCA_OV519=m +CONFIG_USB_GSPCA_OV534=m +CONFIG_USB_GSPCA_OV534_9=m +CONFIG_USB_GSPCA_PAC207=m +CONFIG_USB_GSPCA_PAC7302=m +CONFIG_USB_GSPCA_PAC7311=m +CONFIG_USB_GSPCA_SE401=m +CONFIG_USB_GSPCA_SN9C2028=m +CONFIG_USB_GSPCA_SN9C20X=m +CONFIG_USB_GSPCA_SONIXB=m +CONFIG_USB_GSPCA_SONIXJ=m +CONFIG_USB_GSPCA_SPCA1528=m +CONFIG_USB_GSPCA_SPCA500=m +CONFIG_USB_GSPCA_SPCA501=m +CONFIG_USB_GSPCA_SPCA505=m +CONFIG_USB_GSPCA_SPCA506=m +CONFIG_USB_GSPCA_SPCA508=m +CONFIG_USB_GSPCA_SPCA561=m +CONFIG_USB_GSPCA_SQ905=m +CONFIG_USB_GSPCA_SQ905C=m +CONFIG_USB_GSPCA_SQ930X=m +CONFIG_USB_GSPCA_STK014=m +CONFIG_USB_GSPCA_STV0680=m +CONFIG_USB_GSPCA_SUNPLUS=m +CONFIG_USB_GSPCA_T613=m +CONFIG_USB_GSPCA_TOPRO=m +CONFIG_USB_GSPCA_TV8532=m +CONFIG_USB_GSPCA_VC032X=m +CONFIG_USB_GSPCA_VICAM=m +CONFIG_USB_GSPCA_XIRLINK_CIT=m +CONFIG_USB_GSPCA_ZC3XX=m +CONFIG_USB_GL860=m +CONFIG_USB_M5602=m +CONFIG_USB_STV06XX=m +CONFIG_USB_PWC=m +CONFIG_USB_S2255=m CONFIG_USB_VIDEO_CLASS=m +CONFIG_VIDEO_HDPVR=m +CONFIG_VIDEO_PVRUSB2=m +CONFIG_VIDEO_AU0828=m +CONFIG_DVB_B2C2_FLEXCOP_USB=m +CONFIG_DVB_USB_V2=m +CONFIG_DVB_USB_AF9035=m +CONFIG_DVB_USB_ANYSEE=m +CONFIG_DVB_USB_AU6610=m +CONFIG_DVB_USB_AZ6007=m +CONFIG_DVB_USB_CE6230=m +CONFIG_DVB_USB_EC168=m +CONFIG_DVB_USB_GL861=m +CONFIG_DVB_USB_LME2510=m +CONFIG_DVB_USB_MXL111SF=m +CONFIG_DVB_USB=m +CONFIG_DVB_USB_A800=m +CONFIG_DVB_USB_AF9005=m +CONFIG_DVB_USB_AF9005_REMOTE=m +CONFIG_DVB_USB_AZ6027=m +CONFIG_DVB_USB_CINERGY_T2=m +CONFIG_DVB_USB_CXUSB=m +CONFIG_DVB_USB_DIB0700=m +CONFIG_DVB_USB_DIBUSB_MB=m +CONFIG_DVB_USB_DIBUSB_MC=m +CONFIG_DVB_USB_DIGITV=m +CONFIG_DVB_USB_DTT200U=m +CONFIG_DVB_USB_DTV5100=m +CONFIG_DVB_USB_DW2102=m +CONFIG_DVB_USB_GP8PSK=m +CONFIG_DVB_USB_M920X=m +CONFIG_DVB_USB_NOVA_T_USB2=m +CONFIG_DVB_USB_OPERA1=m +CONFIG_DVB_USB_PCTV452E=m +CONFIG_DVB_USB_TECHNISAT_USB2=m +CONFIG_DVB_USB_TTUSB2=m +CONFIG_DVB_USB_UMT_010=m +CONFIG_DVB_USB_VP702X=m +CONFIG_DVB_USB_VP7045=m +CONFIG_SMS_USB_DRV=m +CONFIG_DVB_TTUSB_BUDGET=m +CONFIG_DVB_TTUSB_DEC=m +CONFIG_VIDEO_EM28XX=m +CONFIG_VIDEO_EM28XX_ALSA=m +CONFIG_VIDEO_EM28XX_DVB=m CONFIG_MEDIA_PCI_SUPPORT=y +CONFIG_VIDEO_IVTV=m +CONFIG_VIDEO_FB_IVTV=m CONFIG_VIDEO_BT848=m CONFIG_DVB_BT8XX=m +CONFIG_VIDEO_CX18=m +CONFIG_VIDEO_CX23885=m +CONFIG_MEDIA_ALTERA_CI=m +CONFIG_VIDEO_CX88=m +CONFIG_VIDEO_CX88_ALSA=m +CONFIG_VIDEO_CX88_BLACKBIRD=m +CONFIG_VIDEO_CX88_DVB=m +# CONFIG_VIDEO_CX88_ENABLE_VP3054 is not set +CONFIG_VIDEO_SAA7134=m +CONFIG_VIDEO_SAA7134_ALSA=m +CONFIG_VIDEO_SAA7134_DVB=m +CONFIG_VIDEO_SAA7164=m +CONFIG_DVB_B2C2_FLEXCOP_PCI=m +CONFIG_DVB_DDBRIDGE=m +CONFIG_DVB_DM1105=m +CONFIG_MANTIS_CORE=m +CONFIG_DVB_MANTIS=m +CONFIG_DVB_HOPPER=m +CONFIG_DVB_NGENE=m +CONFIG_DVB_PLUTO2=m +CONFIG_DVB_PT1=m +CONFIG_DVB_BUDGET_CORE=m +CONFIG_DVB_BUDGET=m +CONFIG_DVB_BUDGET_CI=m +CONFIG_DVB_BUDGET_AV=m +CONFIG_SMS_SDIO_DRV=m +CONFIG_DVB_FIREDTV=m CONFIG_DRM=y +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DP_AUX_CHARDEV=y +CONFIG_DRM_DP_CEC=y +# CONFIG_DRM_I2C_CH7006 is not set +# CONFIG_DRM_I2C_SIL164 is not set CONFIG_DRM_RADEON=m CONFIG_DRM_RADEON_USERPTR=y CONFIG_DRM_AMDGPU=m CONFIG_DRM_AMDGPU_SI=y CONFIG_DRM_AMDGPU_CIK=y CONFIG_DRM_AMDGPU_USERPTR=y +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_VKMS=m +CONFIG_DRM_UDL=m CONFIG_DRM_AST=y +CONFIG_DRM_MGAG200=m CONFIG_DRM_QXL=m CONFIG_DRM_VIRTIO_GPU=m CONFIG_DRM_LOONGSON=y +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m CONFIG_FB=y CONFIG_FB_EFI=y CONFIG_FB_RADEON=y -CONFIG_LCD_CLASS_DEVICE=y +CONFIG_FB_LS2K500=m +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=m CONFIG_LCD_PLATFORM=m +CONFIG_BACKLIGHT_LP855X=m # CONFIG_VGA_CONSOLE is not set -CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set CONFIG_SOUND=y CONFIG_SND=y +CONFIG_SND_OSSEMUL=y +CONFIG_SND_HRTIMER=m +# CONFIG_SND_SUPPORT_OLD_API is not set CONFIG_SND_SEQUENCER=m CONFIG_SND_SEQ_DUMMY=m +CONFIG_SND_SEQUENCER_OSS=m +CONFIG_SND_DUMMY=m +CONFIG_SND_ALOOP=m +CONFIG_SND_VIRMIDI=m +CONFIG_SND_MTPAV=m +CONFIG_SND_MPU401=m +CONFIG_SND_AC97_POWER_SAVE=y +CONFIG_SND_AC97_POWER_SAVE_DEFAULT=5 +CONFIG_SND_AD1889=m +CONFIG_SND_ATIIXP=m +CONFIG_SND_ATIIXP_MODEM=m +CONFIG_SND_AU8810=m +CONFIG_SND_AU8820=m +CONFIG_SND_AU8830=m CONFIG_SND_BT87X=m CONFIG_SND_BT87X_OVERCLOCK=y -CONFIG_SND_HDA_INTEL=y +CONFIG_SND_CA0106=m +CONFIG_SND_CMIPCI=m +CONFIG_SND_OXYGEN=m +CONFIG_SND_CS46XX=m +CONFIG_SND_CTXFI=m +CONFIG_SND_DARLA20=m +CONFIG_SND_GINA20=m +CONFIG_SND_LAYLA20=m +CONFIG_SND_DARLA24=m +CONFIG_SND_GINA24=m +CONFIG_SND_LAYLA24=m +CONFIG_SND_MONA=m +CONFIG_SND_MIA=m +CONFIG_SND_ECHO3G=m +CONFIG_SND_INDIGO=m +CONFIG_SND_INDIGOIO=m +CONFIG_SND_INDIGODJ=m +CONFIG_SND_INDIGOIOX=m +CONFIG_SND_INDIGODJX=m +CONFIG_SND_ENS1370=m +CONFIG_SND_ENS1371=m +CONFIG_SND_HDSP=m +CONFIG_SND_HDSPM=m +CONFIG_SND_ICE1724=m +CONFIG_SND_INTEL8X0=m +CONFIG_SND_INTEL8X0M=m +CONFIG_SND_KORG1212=m +CONFIG_SND_LOLA=m +CONFIG_SND_LX6464ES=m +CONFIG_SND_MIXART=m +CONFIG_SND_PCXHR=m +CONFIG_SND_RME32=m +CONFIG_SND_RME96=m +CONFIG_SND_RME9652=m +CONFIG_SND_VIA82XX=m +CONFIG_SND_VIA82XX_MODEM=m +CONFIG_SND_VIRTUOSO=m +CONFIG_SND_VX222=m +CONFIG_SND_HDA_INTEL=m CONFIG_SND_HDA_HWDEP=y CONFIG_SND_HDA_INPUT_BEEP=y +CONFIG_SND_HDA_INPUT_BEEP_MODE=0 CONFIG_SND_HDA_PATCH_LOADER=y -CONFIG_SND_HDA_CODEC_REALTEK=y -CONFIG_SND_HDA_CODEC_SIGMATEL=y -CONFIG_SND_HDA_CODEC_HDMI=y -CONFIG_SND_HDA_CODEC_CONEXANT=y +CONFIG_SND_HDA_CODEC_REALTEK=m +CONFIG_SND_HDA_CODEC_ANALOG=m +CONFIG_SND_HDA_CODEC_SIGMATEL=m +CONFIG_SND_HDA_CODEC_VIA=m +CONFIG_SND_HDA_CODEC_HDMI=m +CONFIG_SND_HDA_CODEC_CIRRUS=m +CONFIG_SND_HDA_CODEC_CONEXANT=m +CONFIG_SND_HDA_CODEC_CA0110=m +CONFIG_SND_HDA_CODEC_CA0132=m +CONFIG_SND_HDA_CODEC_CMEDIA=m +CONFIG_SND_HDA_CODEC_SI3054=m +CONFIG_SND_HDA_PREALLOC_SIZE=512 +# CONFIG_SND_SPI is not set CONFIG_SND_USB_AUDIO=m +CONFIG_SND_USB_UA101=m +CONFIG_SND_USB_CAIAQ=m +CONFIG_SND_USB_CAIAQ_INPUT=y +CONFIG_SND_USB_6FIRE=m +CONFIG_SND_USB_HIFACE=m +CONFIG_SND_BCD2000=m +CONFIG_SND_USB_POD=m +CONFIG_SND_USB_PODHD=m +CONFIG_SND_USB_TONEPORT=m +CONFIG_SND_USB_VARIAX=m +CONFIG_SND_DICE=m +CONFIG_SND_OXFW=m +CONFIG_SND_ISIGHT=m +CONFIG_SND_FIREWORKS=m +CONFIG_SND_BEBOB=m +CONFIG_SND_FIREWIRE_DIGI00X=m +CONFIG_SND_FIREWIRE_TASCAM=m +CONFIG_SND_FIREWIRE_MOTU=m +CONFIG_SND_FIREFACE=m +CONFIG_SND_SOC=m +CONFIG_HID_BATTERY_STRENGTH=y CONFIG_HIDRAW=y CONFIG_UHID=m CONFIG_HID_A4TECH=m +CONFIG_HID_ACRUX=m +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +CONFIG_HID_PRODIKEYS=m +CONFIG_HID_CMEDIA=m +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +CONFIG_HID_EZKEY=m +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +CONFIG_HID_HOLTEK=m +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LENOVO=m CONFIG_HID_LOGITECH=m CONFIG_HID_LOGITECH_DJ=m CONFIG_LOGITECH_FF=y CONFIG_LOGIRUMBLEPAD2_FF=y CONFIG_LOGIG940_FF=y +CONFIG_HID_MAGICMOUSE=y CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m CONFIG_HID_MULTITOUCH=m +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PLANTRONICS=m +CONFIG_HID_PRIMAX=m +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +CONFIG_HID_STEELSERIES=m CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +CONFIG_HID_SMARTJOYPLUS=m +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +CONFIG_HID_PID=y CONFIG_USB_HIDDEV=y +CONFIG_I2C_HID=m +CONFIG_USB_LED_TRIG=y CONFIG_USB=y -CONFIG_USB_OTG=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_LEDS_TRIGGER_USBPORT=m CONFIG_USB_MON=y CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PLATFORM=m CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_UHCI_HCD=m -CONFIG_USB_ACM=m +CONFIG_USB_UHCI_HCD=y CONFIG_USB_PRINTER=m +CONFIG_USB_TMC=m CONFIG_USB_STORAGE=m CONFIG_USB_STORAGE_REALTEK=m +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m CONFIG_USB_UAS=m +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m CONFIG_USB_DWC2=y CONFIG_USB_DWC2_HOST=y CONFIG_USB_SERIAL=m +CONFIG_USB_SERIAL_GENERIC=y +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +CONFIG_USB_SERIAL_DEBUG=m +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +CONFIG_USB_IOWARRIOR=m +CONFIG_USB_ISIGHTFW=m +CONFIG_USB_HSIC_USB3503=m +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m CONFIG_USB_GADGET=y CONFIG_TYPEC=m CONFIG_TYPEC_TCPM=m CONFIG_TYPEC_TCPCI=m +CONFIG_TYPEC_RT1711H=m +CONFIG_TYPEC_FUSB302=m CONFIG_TYPEC_UCSI=m CONFIG_UCSI_ACPI=m +CONFIG_TYPEC_TPS6598X=m +CONFIG_TYPEC_MUX_PI3USB30532=m +CONFIG_TYPEC_DP_ALTMODE=m +CONFIG_MMC=m +CONFIG_SDIO_UART=m +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +CONFIG_MMC_TIFM_SD=m +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_SDHCI_XENON=m +CONFIG_MEMSTICK=m +CONFIG_MSPRO_BLOCK=m +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_LM3530=m +CONFIG_LEDS_LP3944=m +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +CONFIG_LEDS_TRIGGER_AUDIO=y CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_RDMA_RXE=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_EFI=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_DS1374=m +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +CONFIG_RTC_DRV_FM3130=m +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +CONFIG_RTC_DRV_RV8803=m +CONFIG_RTC_DRV_RX4581=m +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_EFI=m +CONFIG_RTC_DRV_STK17TA8=m +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m CONFIG_RTC_DRV_LOONGSON=y CONFIG_DMADEVICES=y -CONFIG_UIO=m +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_UIO_CIF=m CONFIG_UIO_PDRV_GENIRQ=m CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m CONFIG_UIO_PCI_GENERIC=m CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y CONFIG_VFIO_PCI=m CONFIG_VIRTIO_PCI=y CONFIG_VIRTIO_BALLOON=m @@ -778,7 +1919,27 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y CONFIG_DEVFREQ_GOV_PERFORMANCE=y CONFIG_DEVFREQ_GOV_POWERSAVE=y CONFIG_DEVFREQ_GOV_USERSPACE=y +CONFIG_IIO=m +CONFIG_HID_SENSOR_ACCEL_3D=m +CONFIG_HID_SENSOR_GYRO_3D=m +CONFIG_HID_SENSOR_HUMIDITY=m +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +CONFIG_HID_SENSOR_PRESS=m +CONFIG_HID_SENSOR_TEMP=m +CONFIG_NTB=m +CONFIG_NTB_PINGPONG=m +CONFIG_NTB_TOOL=m +CONFIG_NTB_PERF=m +CONFIG_NTB_TRANSPORT=m CONFIG_PWM=y +CONFIG_POWERCAP=y +CONFIG_USB4=m +CONFIG_DAX=y +CONFIG_DEV_DAX=m CONFIG_EXT2_FS=y CONFIG_EXT2_FS_XATTR=y CONFIG_EXT2_FS_POSIX_ACL=y @@ -799,37 +1960,41 @@ CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_FANOTIFY=y CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -CONFIG_QUOTA=y -# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QFMT_V1=m -CONFIG_QFMT_V2=m +CONFIG_QFMT_V2=y CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_CUSE=m CONFIG_VIRTIO_FS=m CONFIG_OVERLAY_FS=y +# CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW is not set CONFIG_OVERLAY_FS_INDEX=y CONFIG_OVERLAY_FS_XINO_AUTO=y CONFIG_OVERLAY_FS_METACOPY=y -CONFIG_FSCACHE=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y CONFIG_CACHEFILES=m -CONFIG_ISO9660_FS=y +CONFIG_ISO9660_FS=m CONFIG_JOLIET=y CONFIG_ZISOFS=y -CONFIG_UDF_FS=y +CONFIG_UDF_FS=m CONFIG_MSDOS_FS=m CONFIG_VFAT_FS=m CONFIG_FAT_DEFAULT_CODEPAGE=936 CONFIG_FAT_DEFAULT_IOCHARSET="gb2312" CONFIG_EXFAT_FS=m +CONFIG_NTFS_FS=m CONFIG_NTFS3_FS=m CONFIG_NTFS3_64BIT_CLUSTER=y CONFIG_NTFS3_LZX_XPRESS=y CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y CONFIG_TMPFS=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y CONFIG_ORANGEFS_FS=m CONFIG_ECRYPT_FS=m CONFIG_ECRYPT_FS_MESSAGING=y @@ -838,7 +2003,8 @@ CONFIG_HFSPLUS_FS=m CONFIG_UBIFS_FS=m CONFIG_UBIFS_FS_ADVANCED_COMPR=y CONFIG_CRAMFS=m -CONFIG_SQUASHFS=y +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y CONFIG_SQUASHFS_XATTR=y CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y @@ -846,79 +2012,188 @@ CONFIG_SQUASHFS_XZ=y CONFIG_MINIX_FS=m CONFIG_ROMFS_FS=m CONFIG_PSTORE=m -CONFIG_PSTORE_LZO_COMPRESS=m -CONFIG_PSTORE_LZ4_COMPRESS=m -CONFIG_PSTORE_LZ4HC_COMPRESS=m -CONFIG_PSTORE_842_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS=y -CONFIG_PSTORE_ZSTD_COMPRESS_DEFAULT=y CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_EROFS_FS=m CONFIG_EROFS_FS_ZIP_LZMA=y CONFIG_EROFS_FS_PCPU_KTHREAD=y CONFIG_NFS_FS=y +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y +CONFIG_NFS_V4=m CONFIG_NFS_V4_1=y CONFIG_NFS_V4_2=y -CONFIG_ROOT_NFS=y +# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_NFSD_BLOCKLAYOUT=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_FLEXFILELAYOUT=y +CONFIG_NFSD_V4_2_INTER_SSC=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y CONFIG_CEPH_FS=m CONFIG_CEPH_FSCACHE=y CONFIG_CEPH_FS_POSIX_ACL=y CONFIG_CEPH_FS_SECURITY_LABEL=y CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y # CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y CONFIG_9P_FS=y +CONFIG_NLS_DEFAULT="utf8" CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m CONFIG_NLS_UTF8=y CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y CONFIG_KEY_DH_OPERATIONS=y CONFIG_SECURITY=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y -CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_SECURITY_APPARMOR=y CONFIG_SECURITY_YAMA=y +CONFIG_SECURITY_LOCKDOWN_LSM=y +CONFIG_SECURITY_LOCKDOWN_LSM_EARLY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y CONFIG_DEFAULT_SECURITY_DAC=y +CONFIG_LSM="landlock,lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_USER=m # CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set CONFIG_CRYPTO_PCRYPT=m CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SM2=y CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m CONFIG_CRYPTO_KHAZAD=m CONFIG_CRYPTO_SEED=m CONFIG_CRYPTO_SERPENT=m CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=m +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_DEFLATE=m -CONFIG_CRYPTO_LZO=m +CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_842=m CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4HC=m -CONFIG_CRYPTO_USER_API_HASH=m -CONFIG_CRYPTO_USER_API_SKCIPHER=m -CONFIG_CRYPTO_USER_API_RNG=m -CONFIG_CRYPTO_USER_API_AEAD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y CONFIG_CRYPTO_CRC32_LOONGARCH=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_CHELSIO=m CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_REVOCATION_LIST=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_DMA_CMA=y CONFIG_PRINTK_TIME=y +CONFIG_PRINTK_CALLER=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_FRAME_WARN=4096 CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_MAGIC_SYSRQ=y -CONFIG_DEBUG_FS=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y # CONFIG_SCHED_DEBUG is not set CONFIG_SCHEDSTATS=y -# CONFIG_DEBUG_PREEMPT is not set -# CONFIG_FTRACE is not set +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +# CONFIG_STRICT_DEVMEM is not set +# CONFIG_RUNTIME_TESTING_MENU is not set diff --git a/arch/loongarch/include/asm/Kbuild b/arch/loongarch/include/asm/Kbuild index 93783fa24f6e9b634be44a3634346524eaec3811..22991a6f0e2b5500890f49b5c1d51848163b58c2 100644 --- a/arch/loongarch/include/asm/Kbuild +++ b/arch/loongarch/include/asm/Kbuild @@ -23,4 +23,3 @@ generic-y += poll.h generic-y += param.h generic-y += posix_types.h generic-y += resource.h -generic-y += kvm_para.h diff --git a/arch/loongarch/include/asm/addrspace.h b/arch/loongarch/include/asm/addrspace.h index b24437e28c6eda457b2be003b51ad3809600f7cc..60a2ce1a65319c13c070d881b0683c58f3085cd5 100644 --- a/arch/loongarch/include/asm/addrspace.h +++ b/arch/loongarch/include/asm/addrspace.h @@ -124,6 +124,7 @@ extern unsigned long vm_map_base; #define PCI_IOSIZE SZ_32M #define ISA_IOSIZE SZ_16K #define IO_SPACE_LIMIT (PCI_IOSIZE - 1) +#define ISA_PHY_IOBASE LOONGSON_LIO_BASE #define PHYS_LINK_KADDR PHYSADDR(VMLINUX_LOAD_ADDRESS) diff --git a/arch/loongarch/include/asm/efi.h b/arch/loongarch/include/asm/efi.h index 091897d40b0375758b4822ae1a95719013314709..ca4e66dc40358460edc5279f78653da6538cdf42 100644 --- a/arch/loongarch/include/asm/efi.h +++ b/arch/loongarch/include/asm/efi.h @@ -6,6 +6,7 @@ #define _ASM_LOONGARCH_EFI_H #include +#include void __init efi_init(void); void __init efi_runtime_init(void); diff --git a/arch/loongarch/include/asm/fpu.h b/arch/loongarch/include/asm/fpu.h index c2d8962fda00bec9b6a7fa1fee0a3ff720823a01..4d635b8e32459fa5baf05bc963d304c710022b3f 100644 --- a/arch/loongarch/include/asm/fpu.h +++ b/arch/loongarch/include/asm/fpu.h @@ -48,6 +48,10 @@ static inline void disable_lasx(void); static inline void save_lasx(struct task_struct *t); static inline void restore_lasx(struct task_struct *t); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ +DECLARE_PER_CPU(unsigned long, msa_count); +DECLARE_PER_CPU(unsigned long, lasx_count); +#endif /* * Mask the FCSR Cause bits according to the Enable bits, observing * that Unimplemented is always enabled. @@ -210,6 +214,9 @@ static inline void enable_lsx(void) { if (cpu_has_lsx) csr_xchg32(CSR_EUEN_LSXEN, CSR_EUEN_LSXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(msa_count, raw_smp_processor_id())++; +#endif } static inline void disable_lsx(void) @@ -256,8 +263,12 @@ static inline void restore_lsx_upper(struct task_struct *t) {} static inline void enable_lasx(void) { - if (cpu_has_lasx) + if (cpu_has_lasx) { csr_xchg32(CSR_EUEN_LASXEN, CSR_EUEN_LASXEN, LOONGARCH_CSR_EUEN); +#ifdef CONFIG_LOONGSON3_ACPI_CPUFREQ + per_cpu(lasx_count, raw_smp_processor_id())++; +#endif + } } static inline void disable_lasx(void) diff --git a/arch/loongarch/include/asm/hardirq.h b/arch/loongarch/include/asm/hardirq.h index 0ef3b18f89803708d6e8a96b37c9ff037cc2e264..b26d596a73aa249daff90e2603259a22440001d5 100644 --- a/arch/loongarch/include/asm/hardirq.h +++ b/arch/loongarch/include/asm/hardirq.h @@ -12,11 +12,16 @@ extern void ack_bad_irq(unsigned int irq); #define ack_bad_irq ack_bad_irq +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNCTION, +}; #define NR_IPI 2 typedef struct { unsigned int ipi_irqs[NR_IPI]; unsigned int __softirq_pending; + atomic_t message ____cacheline_aligned_in_smp; } ____cacheline_aligned irq_cpustat_t; DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); diff --git a/arch/loongarch/include/asm/inst.h b/arch/loongarch/include/asm/inst.h index 71e1ed4165c80d8a753309fcb381b394dfa85665..e4c545fecaeab1a7aa38466be97e39ca075f4e30 100644 --- a/arch/loongarch/include/asm/inst.h +++ b/arch/loongarch/include/asm/inst.h @@ -12,6 +12,7 @@ #define INSN_NOP 0x03400000 #define INSN_BREAK 0x002a0000 +#define INSN_HVCL 0x002b8000 #define ADDR_IMMMASK_LU52ID 0xFFF0000000000000 #define ADDR_IMMMASK_LU32ID 0x000FFFFF00000000 @@ -65,6 +66,15 @@ enum reg2_op { revbd_op = 0x0f, revh2w_op = 0x10, revhd_op = 0x11, + cpucfg_op = 0x1b, + iocsrrdb_op = 0x19200, + iocsrrdh_op = 0x19201, + iocsrrdw_op = 0x19202, + iocsrrdd_op = 0x19203, + iocsrwrb_op = 0x19204, + iocsrwrh_op = 0x19205, + iocsrwrw_op = 0x19206, + iocsrwrd_op = 0x19207, }; enum reg2i5_op { @@ -318,6 +328,13 @@ struct reg2bstrd_format { unsigned int opcode : 10; }; +struct reg2csr_format { + unsigned int rd : 5; + unsigned int rj : 5; + unsigned int csr : 14; + unsigned int opcode : 8; +}; + struct reg3_format { unsigned int rd : 5; unsigned int rj : 5; @@ -346,6 +363,7 @@ union loongarch_instruction { struct reg2i14_format reg2i14_format; struct reg2i16_format reg2i16_format; struct reg2bstrd_format reg2bstrd_format; + struct reg2csr_format reg2csr_format; struct reg3_format reg3_format; struct reg3sa2_format reg3sa2_format; }; diff --git a/arch/loongarch/include/asm/irq.h b/arch/loongarch/include/asm/irq.h index 218b4da0ea90d012199fe65f47eceae810a0a300..85a3315597b6b34a72752f97cda525bade0b18ed 100644 --- a/arch/loongarch/include/asm/irq.h +++ b/arch/loongarch/include/asm/irq.h @@ -53,6 +53,7 @@ struct acpi_vector_group { extern struct acpi_vector_group pch_group[MAX_IO_PICS]; extern struct acpi_vector_group msi_group[MAX_IO_PICS]; +#define MAX_CORES_PER_EIO_NODE 256 #define CORES_PER_EIO_NODE 4 #define LOONGSON_CPU_UART0_VEC 10 /* CPU UART0 */ @@ -117,8 +118,18 @@ extern struct fwnode_handle *liointc_handle; extern struct fwnode_handle *pch_lpc_handle; extern struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; -extern irqreturn_t loongson_ipi_interrupt(int irq, void *dev); +extern void fixup_irqs(void); +static inline int get_percpu_irq(int vector) +{ + struct irq_domain *d; + + d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); + if (d) + return irq_create_mapping(d, vector); + + return -EINVAL; +} #include #endif /* _ASM_IRQ_H */ diff --git a/arch/loongarch/include/asm/kvm_csr.h b/arch/loongarch/include/asm/kvm_csr.h new file mode 100644 index 0000000000000000000000000000000000000000..476c9f620dd52b64a9fb4b98757bf08da5173f78 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_csr.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_CSR_H__ +#define __ASM_LOONGARCH_KVM_CSR_H__ + +#include +#include +#include +#include + +#define gcsr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__( \ + " gcsrrd %[val], %[reg]\n\t" \ + : [val] "=r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +#define gcsr_write(v, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__ ( \ + " gcsrwr %[val], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [reg] "i" (csr) \ + : "memory"); \ +}) + +#define gcsr_xchg(v, m, csr) \ +({ \ + register unsigned long __v = v; \ + __asm__ __volatile__( \ + " gcsrxchg %[val], %[mask], %[reg]\n\t" \ + : [val] "+r" (__v) \ + : [mask] "r" (m), [reg] "i" (csr) \ + : "memory"); \ + __v; \ +}) + +/* Guest CSRS read and write */ +#define read_gcsr_crmd() gcsr_read(LOONGARCH_CSR_CRMD) +#define write_gcsr_crmd(val) gcsr_write(val, LOONGARCH_CSR_CRMD) +#define read_gcsr_prmd() gcsr_read(LOONGARCH_CSR_PRMD) +#define write_gcsr_prmd(val) gcsr_write(val, LOONGARCH_CSR_PRMD) +#define read_gcsr_euen() gcsr_read(LOONGARCH_CSR_EUEN) +#define write_gcsr_euen(val) gcsr_write(val, LOONGARCH_CSR_EUEN) +#define read_gcsr_misc() gcsr_read(LOONGARCH_CSR_MISC) +#define write_gcsr_misc(val) gcsr_write(val, LOONGARCH_CSR_MISC) +#define read_gcsr_ecfg() gcsr_read(LOONGARCH_CSR_ECFG) +#define write_gcsr_ecfg(val) gcsr_write(val, LOONGARCH_CSR_ECFG) +#define read_gcsr_estat() gcsr_read(LOONGARCH_CSR_ESTAT) +#define write_gcsr_estat(val) gcsr_write(val, LOONGARCH_CSR_ESTAT) +#define read_gcsr_era() gcsr_read(LOONGARCH_CSR_ERA) +#define write_gcsr_era(val) gcsr_write(val, LOONGARCH_CSR_ERA) +#define read_gcsr_badv() gcsr_read(LOONGARCH_CSR_BADV) +#define write_gcsr_badv(val) gcsr_write(val, LOONGARCH_CSR_BADV) +#define read_gcsr_badi() gcsr_read(LOONGARCH_CSR_BADI) +#define write_gcsr_badi(val) gcsr_write(val, LOONGARCH_CSR_BADI) +#define read_gcsr_eentry() gcsr_read(LOONGARCH_CSR_EENTRY) +#define write_gcsr_eentry(val) gcsr_write(val, LOONGARCH_CSR_EENTRY) + +#define read_gcsr_asid() gcsr_read(LOONGARCH_CSR_ASID) +#define write_gcsr_asid(val) gcsr_write(val, LOONGARCH_CSR_ASID) +#define read_gcsr_pgdl() gcsr_read(LOONGARCH_CSR_PGDL) +#define write_gcsr_pgdl(val) gcsr_write(val, LOONGARCH_CSR_PGDL) +#define read_gcsr_pgdh() gcsr_read(LOONGARCH_CSR_PGDH) +#define write_gcsr_pgdh(val) gcsr_write(val, LOONGARCH_CSR_PGDH) +#define write_gcsr_pgd(val) gcsr_write(val, LOONGARCH_CSR_PGD) +#define read_gcsr_pgd() gcsr_read(LOONGARCH_CSR_PGD) +#define read_gcsr_pwctl0() gcsr_read(LOONGARCH_CSR_PWCTL0) +#define write_gcsr_pwctl0(val) gcsr_write(val, LOONGARCH_CSR_PWCTL0) +#define read_gcsr_pwctl1() gcsr_read(LOONGARCH_CSR_PWCTL1) +#define write_gcsr_pwctl1(val) gcsr_write(val, LOONGARCH_CSR_PWCTL1) +#define read_gcsr_stlbpgsize() gcsr_read(LOONGARCH_CSR_STLBPGSIZE) +#define write_gcsr_stlbpgsize(val) gcsr_write(val, LOONGARCH_CSR_STLBPGSIZE) +#define read_gcsr_rvacfg() gcsr_read(LOONGARCH_CSR_RVACFG) +#define write_gcsr_rvacfg(val) gcsr_write(val, LOONGARCH_CSR_RVACFG) + +#define read_gcsr_cpuid() gcsr_read(LOONGARCH_CSR_CPUID) +#define write_gcsr_cpuid(val) gcsr_write(val, LOONGARCH_CSR_CPUID) +#define read_gcsr_prcfg1() gcsr_read(LOONGARCH_CSR_PRCFG1) +#define write_gcsr_prcfg1(val) gcsr_write(val, LOONGARCH_CSR_PRCFG1) +#define read_gcsr_prcfg2() gcsr_read(LOONGARCH_CSR_PRCFG2) +#define write_gcsr_prcfg2(val) gcsr_write(val, LOONGARCH_CSR_PRCFG2) +#define read_gcsr_prcfg3() gcsr_read(LOONGARCH_CSR_PRCFG3) +#define write_gcsr_prcfg3(val) gcsr_write(val, LOONGARCH_CSR_PRCFG3) + +#define read_gcsr_kscratch0() gcsr_read(LOONGARCH_CSR_KS0) +#define write_gcsr_kscratch0(val) gcsr_write(val, LOONGARCH_CSR_KS0) +#define read_gcsr_kscratch1() gcsr_read(LOONGARCH_CSR_KS1) +#define write_gcsr_kscratch1(val) gcsr_write(val, LOONGARCH_CSR_KS1) +#define read_gcsr_kscratch2() gcsr_read(LOONGARCH_CSR_KS2) +#define write_gcsr_kscratch2(val) gcsr_write(val, LOONGARCH_CSR_KS2) +#define read_gcsr_kscratch3() gcsr_read(LOONGARCH_CSR_KS3) +#define write_gcsr_kscratch3(val) gcsr_write(val, LOONGARCH_CSR_KS3) +#define read_gcsr_kscratch4() gcsr_read(LOONGARCH_CSR_KS4) +#define write_gcsr_kscratch4(val) gcsr_write(val, LOONGARCH_CSR_KS4) +#define read_gcsr_kscratch5() gcsr_read(LOONGARCH_CSR_KS5) +#define write_gcsr_kscratch5(val) gcsr_write(val, LOONGARCH_CSR_KS5) +#define read_gcsr_kscratch6() gcsr_read(LOONGARCH_CSR_KS6) +#define write_gcsr_kscratch6(val) gcsr_write(val, LOONGARCH_CSR_KS6) +#define read_gcsr_kscratch7() gcsr_read(LOONGARCH_CSR_KS7) +#define write_gcsr_kscratch7(val) gcsr_write(val, LOONGARCH_CSR_KS7) + +#define read_gcsr_timerid() gcsr_read(LOONGARCH_CSR_TMID) +#define write_gcsr_timerid(val) gcsr_write(val, LOONGARCH_CSR_TMID) +#define read_gcsr_timercfg() gcsr_read(LOONGARCH_CSR_TCFG) +#define write_gcsr_timercfg(val) gcsr_write(val, LOONGARCH_CSR_TCFG) +#define read_gcsr_timertick() gcsr_read(LOONGARCH_CSR_TVAL) +#define write_gcsr_timertick(val) gcsr_write(val, LOONGARCH_CSR_TVAL) +#define read_gcsr_timeroffset() gcsr_read(LOONGARCH_CSR_CNTC) +#define write_gcsr_timeroffset(val) gcsr_write(val, LOONGARCH_CSR_CNTC) + +#define read_gcsr_llbctl() gcsr_read(LOONGARCH_CSR_LLBCTL) +#define write_gcsr_llbctl(val) gcsr_write(val, LOONGARCH_CSR_LLBCTL) + +#define read_gcsr_tlbidx() gcsr_read(LOONGARCH_CSR_TLBIDX) +#define write_gcsr_tlbidx(val) gcsr_write(val, LOONGARCH_CSR_TLBIDX) +#define read_gcsr_tlbrentry() gcsr_read(LOONGARCH_CSR_TLBRENTRY) +#define write_gcsr_tlbrentry(val) gcsr_write(val, LOONGARCH_CSR_TLBRENTRY) +#define read_gcsr_tlbrbadv() gcsr_read(LOONGARCH_CSR_TLBRBADV) +#define write_gcsr_tlbrbadv(val) gcsr_write(val, LOONGARCH_CSR_TLBRBADV) +#define read_gcsr_tlbrera() gcsr_read(LOONGARCH_CSR_TLBRERA) +#define write_gcsr_tlbrera(val) gcsr_write(val, LOONGARCH_CSR_TLBRERA) +#define read_gcsr_tlbrsave() gcsr_read(LOONGARCH_CSR_TLBRSAVE) +#define write_gcsr_tlbrsave(val) gcsr_write(val, LOONGARCH_CSR_TLBRSAVE) +#define read_gcsr_tlbrelo0() gcsr_read(LOONGARCH_CSR_TLBRELO0) +#define write_gcsr_tlbrelo0(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO0) +#define read_gcsr_tlbrelo1() gcsr_read(LOONGARCH_CSR_TLBRELO1) +#define write_gcsr_tlbrelo1(val) gcsr_write(val, LOONGARCH_CSR_TLBRELO1) +#define read_gcsr_tlbrehi() gcsr_read(LOONGARCH_CSR_TLBREHI) +#define write_gcsr_tlbrehi(val) gcsr_write(val, LOONGARCH_CSR_TLBREHI) +#define read_gcsr_tlbrprmd() gcsr_read(LOONGARCH_CSR_TLBRPRMD) +#define write_gcsr_tlbrprmd(val) gcsr_write(val, LOONGARCH_CSR_TLBRPRMD) + +#define read_gcsr_directwin0() gcsr_read(LOONGARCH_CSR_DMWIN0) +#define write_gcsr_directwin0(val) gcsr_write(val, LOONGARCH_CSR_DMWIN0) +#define read_gcsr_directwin1() gcsr_read(LOONGARCH_CSR_DMWIN1) +#define write_gcsr_directwin1(val) gcsr_write(val, LOONGARCH_CSR_DMWIN1) +#define read_gcsr_directwin2() gcsr_read(LOONGARCH_CSR_DMWIN2) +#define write_gcsr_directwin2(val) gcsr_write(val, LOONGARCH_CSR_DMWIN2) +#define read_gcsr_directwin3() gcsr_read(LOONGARCH_CSR_DMWIN3) +#define write_gcsr_directwin3(val) gcsr_write(val, LOONGARCH_CSR_DMWIN3) + +/* Guest related CSRs */ +#define read_csr_gtlbc() csr_read64(LOONGARCH_CSR_GTLBC) +#define write_csr_gtlbc(val) csr_write64(val, LOONGARCH_CSR_GTLBC) +#define read_csr_trgp() csr_read64(LOONGARCH_CSR_TRGP) +#define read_csr_gcfg() csr_read64(LOONGARCH_CSR_GCFG) +#define write_csr_gcfg(val) csr_write64(val, LOONGARCH_CSR_GCFG) +#define read_csr_gstat() csr_read64(LOONGARCH_CSR_GSTAT) +#define write_csr_gstat(val) csr_write64(val, LOONGARCH_CSR_GSTAT) +#define read_csr_gintc() csr_read64(LOONGARCH_CSR_GINTC) +#define write_csr_gintc(val) csr_write64(val, LOONGARCH_CSR_GINTC) +#define read_csr_gcntc() csr_read64(LOONGARCH_CSR_GCNTC) +#define write_csr_gcntc(val) csr_write64(val, LOONGARCH_CSR_GCNTC) + +#define __BUILD_GCSR_OP(name) __BUILD_CSR_COMMON(gcsr_##name) + +__BUILD_CSR_OP(gcfg) +__BUILD_CSR_OP(gstat) +__BUILD_CSR_OP(gtlbc) +__BUILD_CSR_OP(gintc) +__BUILD_GCSR_OP(llbctl) +__BUILD_GCSR_OP(tlbidx) + +#define set_gcsr_estat(val) \ + gcsr_xchg(val, val, LOONGARCH_CSR_ESTAT) +#define clear_gcsr_estat(val) \ + gcsr_xchg(~(val), val, LOONGARCH_CSR_ESTAT) + +#define kvm_read_hw_gcsr(id) gcsr_read(id) +#define kvm_write_hw_gcsr(id, val) gcsr_write(val, id) + +#define kvm_save_hw_gcsr(csr, gid) (csr->csrs[gid] = gcsr_read(gid)) +#define kvm_restore_hw_gcsr(csr, gid) (gcsr_write(csr->csrs[gid], gid)) + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu); + +static __always_inline unsigned long kvm_read_sw_gcsr(struct loongarch_csrs *csr, int gid) +{ + return csr->csrs[gid]; +} + +static __always_inline void kvm_write_sw_gcsr(struct loongarch_csrs *csr, int gid, unsigned long val) +{ + csr->csrs[gid] = val; +} + +static __always_inline void kvm_set_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long val) +{ + csr->csrs[gid] |= val; +} + +static __always_inline void kvm_change_sw_gcsr(struct loongarch_csrs *csr, + int gid, unsigned long mask, unsigned long val) +{ + unsigned long _mask = mask; + + csr->csrs[gid] &= ~_mask; + csr->csrs[gid] |= val & _mask; +} + +#define KVM_PMU_PLV_ENABLE (CSR_PERFCTRL_PLV0 | \ + CSR_PERFCTRL_PLV1 | \ + CSR_PERFCTRL_PLV2 | \ + CSR_PERFCTRL_PLV3) + +#endif /* __ASM_LOONGARCH_KVM_CSR_H__ */ diff --git a/arch/loongarch/include/asm/kvm_host.h b/arch/loongarch/include/asm/kvm_host.h new file mode 100644 index 0000000000000000000000000000000000000000..c146d2ebdb90128fa737d441fa5fedadf04fddf0 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_host.h @@ -0,0 +1,307 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_HOST_H__ +#define __ASM_LOONGARCH_KVM_HOST_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* Loongarch KVM register ids */ +#define KVM_GET_IOC_CSR_IDX(id) ((id & KVM_CSR_IDX_MASK) >> LOONGARCH_REG_SHIFT) +#define KVM_GET_IOC_CPUCFG_IDX(id) ((id & KVM_CPUCFG_IDX_MASK) >> LOONGARCH_REG_SHIFT) + +#define KVM_MAX_VCPUS 256 +#define KVM_MAX_CPUCFG_REGS 21 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 0 + +#define KVM_HALT_POLL_NS_DEFAULT 500000 +#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(1) + +#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \ + KVM_GUESTDBG_USE_SW_BP | KVM_GUESTDBG_SINGLESTEP) +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; + u64 pages; + u64 hugepages; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 int_exits; + u64 idle_exits; + u64 cpucfg_exits; + u64 signal_exits; + u64 hypercall_exits; +}; + +#define KVM_MEM_HUGEPAGE_CAPABLE (1UL << 0) +#define KVM_MEM_HUGEPAGE_INCAPABLE (1UL << 1) +struct kvm_arch_memory_slot { + unsigned long flags; +}; + +struct kvm_context { + unsigned long vpid_cache; + struct kvm_vcpu *last_vcpu; +}; + +struct kvm_world_switch { + int (*exc_entry)(void); + int (*enter_guest)(struct kvm_run *run, struct kvm_vcpu *vcpu); + unsigned long page_order; +}; + +#define MAX_PGTABLE_LEVELS 4 + +/* + * Physical cpu id is used for interrupt routing, there are different + * definitions about physical cpuid on different hardwares. + * For LOONGARCH_CSR_CPUID register, max cpuid size if 512 + * For IPI HW, max dest CPUID size 1024 + * For extioi interrupt controller, max dest CPUID size is 256 + * For MSI interrupt controller, max supported CPUID size is 65536 + * + * Currently max CPUID is defined as 256 for KVM hypervisor, in future + * it will be expanded to 4096, including 16 packages at most. And every + * package supports at most 256 vcpus + */ +#define KVM_MAX_PHYID 256 + +struct kvm_phyid_info { + struct kvm_vcpu *vcpu; + bool enabled; +}; + +struct kvm_phyid_map { + int max_phyid; + struct kvm_phyid_info phys_map[KVM_MAX_PHYID]; +}; + +struct kvm_arch { + /* Guest physical mm */ + kvm_pte_t *pgd; + unsigned long gpa_size; + unsigned long invalid_ptes[MAX_PGTABLE_LEVELS]; + unsigned int pte_shifts[MAX_PGTABLE_LEVELS]; + unsigned int root_level; + spinlock_t phyid_map_lock; + struct kvm_phyid_map *phyid_map; + + s64 time_offset; + struct kvm_context __percpu *vmcs; +}; + +#define CSR_MAX_NUMS 0x800 + +struct loongarch_csrs { + unsigned long csrs[CSR_MAX_NUMS]; +}; + +/* Resume Flags */ +#define RESUME_HOST 0 +#define RESUME_GUEST 1 + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_DO_IOCSR, /* handle IOCSR request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_EXCEPT, /* A guest exception has been generated */ +}; + +#define KVM_LARCH_FPU (0x1 << 0) +#define KVM_LARCH_LSX (0x1 << 1) +#define KVM_LARCH_LASX (0x1 << 2) +#define KVM_LARCH_SWCSR_LATEST (0x1 << 3) +#define KVM_LARCH_HWCSR_USABLE (0x1 << 4) +#define KVM_LARCH_PERF (0x1 << 5) + +struct kvm_vcpu_arch { + /* + * Switch pointer-to-function type to unsigned long + * for loading the value into register directly. + */ + unsigned long host_eentry; + unsigned long guest_eentry; + + /* Pointers stored here for easy accessing from assembly code */ + int (*handle_exit)(struct kvm_run *run, struct kvm_vcpu *vcpu); + + /* Host registers preserved across guest mode execution */ + unsigned long host_sp; + unsigned long host_tp; + unsigned long host_pgd; + + /* Host CSRs are used when handling exits from guest */ + unsigned long badi; + unsigned long badv; + unsigned long host_ecfg; + unsigned long host_estat; + unsigned long host_percpu; + + /* GPRs */ + unsigned long gprs[32]; + unsigned long pc; + + /* Which auxiliary state is loaded (KVM_LARCH_*) */ + unsigned int aux_inuse; + + /* FPU state */ + struct loongarch_fpu fpu FPU_ALIGN; + + /* CSR state */ + struct loongarch_csrs *csr; + + /* GPR used as IO source/target */ + u32 io_gpr; + + /* KVM register to control count timer */ + u32 count_ctl; + struct hrtimer swtimer; + + /* Bitmask of intr that are pending */ + unsigned long irq_pending; + /* Bitmask of pending intr to be cleared */ + unsigned long irq_clear; + + /* Bitmask of exceptions that are pending */ + unsigned long exception_pending; + unsigned int esubcode; + + /* Cache for pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* vcpu's vpid */ + u64 vpid; + + /* Frequency of stable timer in Hz */ + u64 timer_mhz; + ktime_t expire; + + /* Last CPU the vCPU state was loaded on */ + int last_sched_cpu; + /* mp state */ + struct kvm_mp_state mp_state; + /* cpucfg */ + u32 cpucfg[KVM_MAX_CPUCFG_REGS]; + /* paravirt steal time */ + struct { + u64 guest_addr; + u64 last_steal; + struct gfn_to_hva_cache cache; + } st; + /* Save host pmu csr */ + u64 perf_ctrl[4]; + u64 perf_cntr[4]; +}; + +static inline unsigned long readl_sw_gcsr(struct loongarch_csrs *csr, int reg) +{ + return csr->csrs[reg]; +} + +static inline void writel_sw_gcsr(struct loongarch_csrs *csr, int reg, unsigned long val) +{ + csr->csrs[reg] = val; +} + +static inline bool kvm_guest_has_fpu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_FP; +} + +static inline bool kvm_guest_has_lsx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LSX; +} + +static inline bool kvm_guest_has_lasx(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[2] & CPUCFG2_LASX; +} + +static inline bool kvm_guest_has_pmu(struct kvm_vcpu_arch *arch) +{ + return arch->cpucfg[6] & CPUCFG6_PMP; +} + +static inline int kvm_get_pmu_num(struct kvm_vcpu_arch *arch) +{ + return (arch->cpucfg[6] & CPUCFG6_PMNUM) >> CPUCFG6_PMNUM_SHIFT; +} + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* MMU handling */ +void kvm_flush_tlb_all(void); +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa); +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long badv, bool write); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +static inline void update_pc(struct kvm_vcpu_arch *arch) +{ + arch->pc += 4; +} + +/* + * kvm_is_ifetch_fault() - Find whether a TLBL exception is due to ifetch fault. + * @vcpu: Virtual CPU. + * + * Returns: Whether the TLBL exception was likely due to an instruction + * fetch fault rather than a data load fault. + */ +static inline bool kvm_is_ifetch_fault(struct kvm_vcpu_arch *arch) +{ + return arch->pc == arch->badv; +} + +/* Misc */ +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {} +void kvm_check_vpid(struct kvm_vcpu *vcpu); +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer); +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot); +void kvm_init_vmcs(struct kvm *kvm); +void kvm_exc_entry(void); +int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu); + +extern unsigned long vpid_mask; +extern const unsigned long kvm_exception_size; +extern const unsigned long kvm_enter_guest_size; +extern struct kvm_world_switch *kvm_loongarch_ops; + +#define SW_GCSR (1 << 0) +#define HW_GCSR (1 << 1) +#define INVALID_GCSR (1 << 2) + +int get_gcsr_flag(int csr); +void set_hw_gcsr(int csr_id, unsigned long val); + +#endif /* __ASM_LOONGARCH_KVM_HOST_H__ */ diff --git a/arch/loongarch/include/asm/kvm_mmu.h b/arch/loongarch/include/asm/kvm_mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..099bafc6f797c960adf971147150ce5e9a580407 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_mmu.h @@ -0,0 +1,139 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_MMU_H__ +#define __ASM_LOONGARCH_KVM_MMU_H__ + +#include +#include +#include + +/* + * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels + * for which pages need to be cached. + */ +#define KVM_MMU_CACHE_MIN_PAGES (CONFIG_PGTABLE_LEVELS - 1) + +#define _KVM_FLUSH_PGTABLE 0x1 +#define _KVM_HAS_PGMASK 0x2 +#define kvm_pfn_pte(pfn, prot) (((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot)) +#define kvm_pte_pfn(x) ((phys_addr_t)((x & _PFN_MASK) >> PFN_PTE_SHIFT)) + +typedef unsigned long kvm_pte_t; +typedef struct kvm_ptw_ctx kvm_ptw_ctx; +typedef int (*kvm_pte_ops)(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx); + +struct kvm_ptw_ctx { + kvm_pte_ops ops; + unsigned long flag; + + /* for kvm_arch_mmu_enable_log_dirty_pt_masked use */ + unsigned long mask; + unsigned long gfn; + + /* page walk mmu info */ + unsigned int level; + unsigned long pgtable_shift; + unsigned long invalid_entry; + unsigned long *invalid_ptes; + unsigned int *pte_shifts; + void *opaque; + + /* free pte table page list */ + struct list_head list; +}; + +kvm_pte_t *kvm_pgd_alloc(void); + +static inline void kvm_set_pte(kvm_pte_t *ptep, kvm_pte_t val) +{ + WRITE_ONCE(*ptep, val); +} + +static inline int kvm_pte_write(kvm_pte_t pte) { return pte & _PAGE_WRITE; } +static inline int kvm_pte_dirty(kvm_pte_t pte) { return pte & _PAGE_DIRTY; } +static inline int kvm_pte_young(kvm_pte_t pte) { return pte & _PAGE_ACCESSED; } +static inline int kvm_pte_huge(kvm_pte_t pte) { return pte & _PAGE_HUGE; } + +static inline kvm_pte_t kvm_pte_mkyoung(kvm_pte_t pte) +{ + return pte | _PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkold(kvm_pte_t pte) +{ + return pte & ~_PAGE_ACCESSED; +} + +static inline kvm_pte_t kvm_pte_mkdirty(kvm_pte_t pte) +{ + return pte | _PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkclean(kvm_pte_t pte) +{ + return pte & ~_PAGE_DIRTY; +} + +static inline kvm_pte_t kvm_pte_mkhuge(kvm_pte_t pte) +{ + return pte | _PAGE_HUGE; +} + +static inline kvm_pte_t kvm_pte_mksmall(kvm_pte_t pte) +{ + return pte & ~_PAGE_HUGE; +} + +static inline int kvm_need_flush(kvm_ptw_ctx *ctx) +{ + return ctx->flag & _KVM_FLUSH_PGTABLE; +} + +static inline kvm_pte_t *kvm_pgtable_offset(kvm_ptw_ctx *ctx, kvm_pte_t *table, + phys_addr_t addr) +{ + + return table + ((addr >> ctx->pgtable_shift) & (PTRS_PER_PTE - 1)); +} + +static inline phys_addr_t kvm_pgtable_addr_end(kvm_ptw_ctx *ctx, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t boundary, size; + + size = 0x1UL << ctx->pgtable_shift; + boundary = (addr + size) & ~(size - 1); + return (boundary - 1 < end - 1) ? boundary : end; +} + +static inline int kvm_pte_present(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + if (!ctx || ctx->level == 0) + return !!(*entry & _PAGE_PRESENT); + + return *entry != ctx->invalid_entry; +} + +static inline int kvm_pte_none(kvm_ptw_ctx *ctx, kvm_pte_t *entry) +{ + return *entry == ctx->invalid_entry; +} + +static inline void kvm_ptw_enter(kvm_ptw_ctx *ctx) +{ + ctx->level--; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +static inline void kvm_ptw_exit(kvm_ptw_ctx *ctx) +{ + ctx->level++; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; +} + +#endif /* __ASM_LOONGARCH_KVM_MMU_H__ */ diff --git a/arch/loongarch/include/asm/kvm_para.h b/arch/loongarch/include/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..032101b941d92fea374afbb18877ed67314007dc --- /dev/null +++ b/arch/loongarch/include/asm/kvm_para.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_KVM_PARA_H +#define _ASM_LOONGARCH_KVM_PARA_H + +/* + * Hypercall code field + */ +#define HYPERVISOR_KVM 1 +#define HYPERVISOR_VENDOR_SHIFT 8 +#define HYPERCALL_CODE(vendor, code) ((vendor << HYPERVISOR_VENDOR_SHIFT) + code) +#define KVM_HCALL_CODE_PV_SERVICE 0 +#define KVM_HCALL_CODE_SWDBG 1 +#define KVM_HCALL_PV_SERVICE HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_PV_SERVICE) +#define KVM_HCALL_FUNC_PV_IPI 1 +#define KVM_HCALL_FUNC_NOTIFY 2 +#define KVM_HCALL_SWDBG HYPERCALL_CODE(HYPERVISOR_KVM, KVM_HCALL_CODE_SWDBG) + +/* + * LoongArch hypercall return code + */ +#define KVM_HCALL_STATUS_SUCCESS 0 +#define KVM_HCALL_INVALID_CODE -1UL +#define KVM_HCALL_INVALID_PARAMETER -2UL + +#define KVM_STEAL_PHYS_VALID BIT_ULL(0) +#define KVM_STEAL_PHYS_MASK GENMASK_ULL(63, 6) +struct kvm_steal_time { + __u64 steal; + __u32 version; + __u32 flags; + __u32 pad[12]; +}; + +/* + * Hypercall interface for KVM hypervisor + * + * a0: function identifier + * a1-a6: args + * Return value will be placed in v0. + * Up to 6 arguments are passed in a1, a2, a3, a4, a5, a6. + */ +static __always_inline long kvm_hypercall(u64 fid) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall1(u64 fid, unsigned long arg0) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall2(u64 fid, + unsigned long arg0, unsigned long arg1) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall3(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r" (fun), "r" (a1), "r" (a2), "r" (a3) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall4(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4) + : "memory" + ); + + return ret; +} + +static __always_inline long kvm_hypercall5(u64 fid, + unsigned long arg0, unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4) +{ + register long ret asm("v0"); + register unsigned long fun asm("a0") = fid; + register unsigned long a1 asm("a1") = arg0; + register unsigned long a2 asm("a2") = arg1; + register unsigned long a3 asm("a3") = arg2; + register unsigned long a4 asm("a4") = arg3; + register unsigned long a5 asm("a5") = arg4; + + __asm__ __volatile__( + "hvcl "__stringify(KVM_HCALL_PV_SERVICE) + : "=r" (ret) + : "r"(fun), "r" (a1), "r" (a2), "r" (a3), "r" (a4), "r" (a5) + : "memory" + ); + + return ret; +} + + +static inline unsigned int kvm_arch_para_features(void) +{ + return 0; +} + +static inline unsigned int kvm_arch_para_hints(void) +{ + return 0; +} + +static inline bool kvm_check_and_clear_guest_paused(void) +{ + return false; +} +#endif /* _ASM_LOONGARCH_KVM_PARA_H */ diff --git a/arch/loongarch/include/asm/kvm_types.h b/arch/loongarch/include/asm/kvm_types.h new file mode 100644 index 0000000000000000000000000000000000000000..2fe1d4bdff66cac9e4e6703752ce1ac89fb2fd86 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_types.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef _ASM_LOONGARCH_KVM_TYPES_H +#define _ASM_LOONGARCH_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40 + +#endif /* _ASM_LOONGARCH_KVM_TYPES_H */ diff --git a/arch/loongarch/include/asm/kvm_vcpu.h b/arch/loongarch/include/asm/kvm_vcpu.h new file mode 100644 index 0000000000000000000000000000000000000000..1da24994b838a9acbc905e5130cba451261651d2 --- /dev/null +++ b/arch/loongarch/include/asm/kvm_vcpu.h @@ -0,0 +1,115 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __ASM_LOONGARCH_KVM_VCPU_H__ +#define __ASM_LOONGARCH_KVM_VCPU_H__ + +#include +#include + +/* Controlled by 0x5 guest estat */ +#define CPU_SIP0 (_ULCAST_(1)) +#define CPU_SIP1 (_ULCAST_(1) << 1) +#define CPU_PMU (_ULCAST_(1) << 10) +#define CPU_TIMER (_ULCAST_(1) << 11) +#define CPU_IPI (_ULCAST_(1) << 12) + +/* Controlled by 0x52 guest exception VIP aligned to estat bit 5~12 */ +#define CPU_IP0 (_ULCAST_(1)) +#define CPU_IP1 (_ULCAST_(1) << 1) +#define CPU_IP2 (_ULCAST_(1) << 2) +#define CPU_IP3 (_ULCAST_(1) << 3) +#define CPU_IP4 (_ULCAST_(1) << 4) +#define CPU_IP5 (_ULCAST_(1) << 5) +#define CPU_IP6 (_ULCAST_(1) << 6) +#define CPU_IP7 (_ULCAST_(1) << 7) + +#define MNSEC_PER_SEC (NSEC_PER_SEC >> 20) + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGSON_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGSON_IRQ_TYPE_MASK 0xff +#define KVM_LOONGSON_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGSON_IRQ_VCPU_MASK 0xff +#define KVM_LOONGSON_IRQ_NUM_SHIFT 0 +#define KVM_LOONGSON_IRQ_NUM_MASK 0xffff + +typedef union loongarch_instruction larch_inst; +typedef int (*exit_handle_fn)(struct kvm_vcpu *); + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst); +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_emu_idle(struct kvm_vcpu *vcpu); +int kvm_pending_timer(struct kvm_vcpu *vcpu); +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault); +void kvm_deliver_intr(struct kvm_vcpu *vcpu); +void kvm_deliver_exception(struct kvm_vcpu *vcpu); + +void kvm_own_fpu(struct kvm_vcpu *vcpu); +void kvm_lose_fpu(struct kvm_vcpu *vcpu); +void kvm_save_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fpu(struct loongarch_fpu *fpu); +void kvm_restore_fcsr(struct loongarch_fpu *fpu); + +#ifdef CONFIG_CPU_HAS_LSX +int kvm_own_lsx(struct kvm_vcpu *vcpu); +void kvm_save_lsx(struct loongarch_fpu *fpu); +void kvm_restore_lsx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; } +static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { } +#endif + +#ifdef CONFIG_CPU_HAS_LASX +int kvm_own_lasx(struct kvm_vcpu *vcpu); +void kvm_save_lasx(struct loongarch_fpu *fpu); +void kvm_restore_lasx(struct loongarch_fpu *fpu); +#else +static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; } +static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { } +static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { } +#endif + +int kvm_own_pmu(struct kvm_vcpu *vcpu); + +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long hz); +void kvm_reset_timer(struct kvm_vcpu *vcpu); +void kvm_save_timer(struct kvm_vcpu *vcpu); +void kvm_restore_timer(struct kvm_vcpu *vcpu); + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid); + +/* + * Loongarch KVM guest interrupt handling + */ +static inline void kvm_queue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + set_bit(irq, &vcpu->arch.irq_pending); + clear_bit(irq, &vcpu->arch.irq_clear); +} + +static inline void kvm_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + clear_bit(irq, &vcpu->arch.irq_pending); + set_bit(irq, &vcpu->arch.irq_clear); +} + +static inline int kvm_queue_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + /* only one exception can be injected */ + if (!vcpu->arch.exception_pending) { + set_bit(code, &vcpu->arch.exception_pending); + vcpu->arch.esubcode = subcode; + return 0; + } else + return -1; +} + +#endif /* __ASM_LOONGARCH_KVM_VCPU_H__ */ diff --git a/arch/loongarch/include/asm/loongarch.h b/arch/loongarch/include/asm/loongarch.h index 33531d432b492d201f98ae64b941255a94355f91..e852c0f62eb7096c08e008fbeecf390219fe1f3f 100644 --- a/arch/loongarch/include/asm/loongarch.h +++ b/arch/loongarch/include/asm/loongarch.h @@ -119,6 +119,7 @@ #define CPUCFG6_PMP BIT(0) #define CPUCFG6_PAMVER GENMASK(3, 1) #define CPUCFG6_PMNUM GENMASK(7, 4) +#define CPUCFG6_PMNUM_SHIFT 4 #define CPUCFG6_PMBITS GENMASK(13, 8) #define CPUCFG6_UPM BIT(14) @@ -158,6 +159,18 @@ #define CPUCFG48_VFPU_CG BIT(2) #define CPUCFG48_RAM_CG BIT(3) +/* + * cpucfg index area: 0x40000000 -- 0x400000ff + * SW emulation for KVM hypervirsor + */ +#define CPUCFG_KVM_BASE 0x40000000UL +#define CPUCFG_KVM_SIZE 0x100 +#define CPUCFG_KVM_SIG CPUCFG_KVM_BASE +#define KVM_SIGNATURE "KVM\0" +#define CPUCFG_KVM_FEATURE (CPUCFG_KVM_BASE + 4) +#define KVM_FEATURE_PV_IPI BIT(1) +#define KVM_FEATURE_STEAL_TIME BIT(2) + #ifndef __ASSEMBLY__ /* CSR */ @@ -171,6 +184,7 @@ /* IOCSR */ #define iocsr_read32(reg) __iocsrrd_w(reg) #define iocsr_read64(reg) __iocsrrd_d(reg) +#define iocsr_write8(val, reg) __iocsrwr_b(val, reg) #define iocsr_write32(val, reg) __iocsrwr_w(val, reg) #define iocsr_write64(val, reg) __iocsrwr_d(val, reg) @@ -226,6 +240,7 @@ #define LOONGARCH_CSR_ECFG 0x4 /* Exception config */ #define CSR_ECFG_VS_SHIFT 16 #define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS_SHIFT_END (CSR_ECFG_VS_SHIFT + CSR_ECFG_VS_WIDTH - 1) #define CSR_ECFG_VS (_ULCAST_(0x7) << CSR_ECFG_VS_SHIFT) #define CSR_ECFG_IM_SHIFT 0 #define CSR_ECFG_IM_WIDTH 14 @@ -314,13 +329,14 @@ #define CSR_TLBLO1_V (_ULCAST_(0x1) << CSR_TLBLO1_V_SHIFT) #define LOONGARCH_CSR_GTLBC 0x15 /* Guest TLB control */ -#define CSR_GTLBC_RID_SHIFT 16 -#define CSR_GTLBC_RID_WIDTH 8 -#define CSR_GTLBC_RID (_ULCAST_(0xff) << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TGID_SHIFT 16 +#define CSR_GTLBC_TGID_WIDTH 8 +#define CSR_GTLBC_TGID_SHIFT_END (CSR_GTLBC_TGID_SHIFT + CSR_GTLBC_TGID_WIDTH - 1) +#define CSR_GTLBC_TGID (_ULCAST_(0xff) << CSR_GTLBC_TGID_SHIFT) #define CSR_GTLBC_TOTI_SHIFT 13 #define CSR_GTLBC_TOTI (_ULCAST_(0x1) << CSR_GTLBC_TOTI_SHIFT) -#define CSR_GTLBC_USERID_SHIFT 12 -#define CSR_GTLBC_USERID (_ULCAST_(0x1) << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_USETGID_SHIFT 12 +#define CSR_GTLBC_USETGID (_ULCAST_(0x1) << CSR_GTLBC_USETGID_SHIFT) #define CSR_GTLBC_GMTLBSZ_SHIFT 0 #define CSR_GTLBC_GMTLBSZ_WIDTH 6 #define CSR_GTLBC_GMTLBSZ (_ULCAST_(0x3f) << CSR_GTLBC_GMTLBSZ_SHIFT) @@ -475,6 +491,7 @@ #define LOONGARCH_CSR_GSTAT 0x50 /* Guest status */ #define CSR_GSTAT_GID_SHIFT 16 #define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID_SHIFT_END (CSR_GSTAT_GID_SHIFT + CSR_GSTAT_GID_WIDTH - 1) #define CSR_GSTAT_GID (_ULCAST_(0xff) << CSR_GSTAT_GID_SHIFT) #define CSR_GSTAT_GIDBIT_SHIFT 4 #define CSR_GSTAT_GIDBIT_WIDTH 6 @@ -525,6 +542,12 @@ #define CSR_GCFG_MATC_GUEST (_ULCAST_(0x0) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATC_SHITF) #define CSR_GCFG_MATC_NEST (_ULCAST_(0x2) << CSR_GCFG_MATC_SHITF) +#define CSR_GCFG_MATP_NEST_SHIFT 2 +#define CSR_GCFG_MATP_NEST (_ULCAST_(0x1) << CSR_GCFG_MATP_NEST_SHIFT) +#define CSR_GCFG_MATP_ROOT_SHIFT 1 +#define CSR_GCFG_MATP_ROOT (_ULCAST_(0x1) << CSR_GCFG_MATP_ROOT_SHIFT) +#define CSR_GCFG_MATP_GUEST_SHIFT 0 +#define CSR_GCFG_MATP_GUEST (_ULCAST_(0x1) << CSR_GCFG_MATP_GUEST_SHIFT) #define LOONGARCH_CSR_GINTC 0x52 /* Guest interrupt control */ #define CSR_GINTC_HC_SHIFT 16 diff --git a/arch/loongarch/include/asm/paravirt.h b/arch/loongarch/include/asm/paravirt.h new file mode 100644 index 0000000000000000000000000000000000000000..fe27fb5e82b88fa3546b56441ae2cbc0b590350f --- /dev/null +++ b/arch/loongarch/include/asm/paravirt.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_LOONGARCH_PARAVIRT_H +#define _ASM_LOONGARCH_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int pv_ipi_init(void); +int __init pv_time_init(void); +#else +static inline int pv_ipi_init(void) +{ + return 0; +} + +static inline int pv_time_init(void) +{ + return 0; +} +#endif // CONFIG_PARAVIRT +#endif diff --git a/arch/loongarch/include/asm/paravirt_api_clock.h b/arch/loongarch/include/asm/paravirt_api_clock.h new file mode 100644 index 0000000000000000000000000000000000000000..8a418f0b4fd537164e5f5fa5a0bdbc5886fcee1f --- /dev/null +++ b/arch/loongarch/include/asm/paravirt_api_clock.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Loongson Technology Corporation Limited + */ +#ifndef _ASM_API_CLOCK_H +#define _ASM_API_CLOCK_H + +#include + +#endif diff --git a/arch/loongarch/include/asm/smp.h b/arch/loongarch/include/asm/smp.h index f81e5f01d61905f5b8d7da4786ba512258381acd..75d30529748c944748f2d70f8389c7fee509f69c 100644 --- a/arch/loongarch/include/asm/smp.h +++ b/arch/loongarch/include/asm/smp.h @@ -12,6 +12,13 @@ #include #include +struct smp_ops { + void (*init_ipi)(void); + void (*send_ipi_mask)(const struct cpumask *mask, unsigned int action); + void (*send_ipi_single)(int cpu, unsigned int action); +}; + +extern struct smp_ops smp_ops; extern int smp_num_siblings; extern int num_processors; extern int disabled_cpus; @@ -24,8 +31,6 @@ void loongson_prepare_cpus(unsigned int max_cpus); void loongson_boot_secondary(int cpu, struct task_struct *idle); void loongson_init_secondary(void); void loongson_smp_finish(void); -void loongson_send_ipi_single(int cpu, unsigned int action); -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action); #ifdef CONFIG_HOTPLUG_CPU int loongson_cpu_disable(void); void loongson_cpu_die(unsigned int cpu); @@ -59,9 +64,12 @@ extern int __cpu_logical_map[NR_CPUS]; #define cpu_physical_id(cpu) cpu_logical_map(cpu) -#define SMP_BOOT_CPU 0x1 -#define SMP_RESCHEDULE 0x2 -#define SMP_CALL_FUNCTION 0x4 +#define ACTION_BOOT_CPU 0 +#define ACTION_RESCHEDULE 1 +#define ACTION_CALL_FUNCTION 2 +#define SMP_BOOT_CPU BIT(ACTION_BOOT_CPU) +#define SMP_RESCHEDULE BIT(ACTION_RESCHEDULE) +#define SMP_CALL_FUNCTION BIT(ACTION_CALL_FUNCTION) struct secondary_data { unsigned long stack; @@ -71,7 +79,8 @@ extern struct secondary_data cpuboot_data; extern asmlinkage void smpboot_entry(void); extern asmlinkage void start_secondary(void); - +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); extern void calculate_cpu_foreign_map(void); /* @@ -79,16 +88,6 @@ extern void calculate_cpu_foreign_map(void); */ extern void show_ipi_list(struct seq_file *p, int prec); -static inline void arch_send_call_function_single_ipi(int cpu) -{ - loongson_send_ipi_single(cpu, SMP_CALL_FUNCTION); -} - -static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) -{ - loongson_send_ipi_mask(mask, SMP_CALL_FUNCTION); -} - #ifdef CONFIG_HOTPLUG_CPU static inline int __cpu_disable(void) { diff --git a/arch/loongarch/include/asm/sparsemem.h b/arch/loongarch/include/asm/sparsemem.h index 8d4af6aff8a8f1e149f59b4a9b34b80bf7197db2..1f331ee584ef38c0b21de9b0536ce883b9b62cee 100644 --- a/arch/loongarch/include/asm/sparsemem.h +++ b/arch/loongarch/include/asm/sparsemem.h @@ -8,7 +8,7 @@ * SECTION_SIZE_BITS 2^N: how big each section will be * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space */ -#define SECTION_SIZE_BITS 29 /* 2^29 = Largest Huge Page Size */ +#define SECTION_SIZE_BITS 28 #define MAX_PHYSMEM_BITS 48 #ifdef CONFIG_SPARSEMEM_VMEMMAP diff --git a/arch/loongarch/include/uapi/asm/kvm.h b/arch/loongarch/include/uapi/asm/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..9891ed93816a3af2cf08713602ccd8d5b8e35556 --- /dev/null +++ b/arch/loongarch/include/uapi/asm/kvm.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#ifndef __UAPI_ASM_LOONGARCH_KVM_H +#define __UAPI_ASM_LOONGARCH_KVM_H + +#include + +/* + * KVM LoongArch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM +#define __KVM_HAVE_GUEST_DEBUG + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_DIRTY_LOG_PAGE_OFFSET 64 + +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; + } fpr[32]; +}; + +/* + * For LoongArch, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / SIMD registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GPR (KVM_REG_LOONGARCH | 0x00000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x10000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x20000ULL) +#define KVM_REG_LOONGARCH_FPSIMD (KVM_REG_LOONGARCH | 0x30000ULL) +#define KVM_REG_LOONGARCH_CPUCFG (KVM_REG_LOONGARCH | 0x40000ULL) +#define KVM_REG_LOONGARCH_MASK (KVM_REG_LOONGARCH | 0x70000ULL) +#define KVM_CSR_IDX_MASK 0x7fff +#define KVM_CPUCFG_IDX_MASK 0x7fff + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +#define KVM_REG_LOONGARCH_COUNTER (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_VCPU_RESET (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) +/* Debugging: Special instruction for software breakpoint */ +#define KVM_REG_LOONGARCH_DEBUG_INST (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) + +#define LOONGARCH_REG_SHIFT 3 +#define LOONGARCH_REG_64(TYPE, REG) (TYPE | KVM_REG_SIZE_U64 | (REG << LOONGARCH_REG_SHIFT)) +#define KVM_IOC_CSRID(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CSR, REG) +#define KVM_IOC_CPUCFG(REG) LOONGARCH_REG_64(KVM_REG_LOONGARCH_CPUCFG, REG) + +/* Device Control API on vcpu fd */ +#define KVM_LOONGARCH_VCPU_CPUCFG 0 +#define KVM_LOONGARCH_VCPU_PVTIME_CTRL 1 +#define KVM_LOONGARCH_VCPU_PVTIME_GPA 0 + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 +#define KVM_MAX_CORES 256 + +#endif /* __UAPI_ASM_LOONGARCH_KVM_H */ diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile index 4fcc168f07323154b4d7fc6712ab7a2298bb0d3e..6c148ccea67440cb148b517d9b7e9a75ad0bb744 100644 --- a/arch/loongarch/kernel/Makefile +++ b/arch/loongarch/kernel/Makefile @@ -9,6 +9,7 @@ obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \ alternative.o unwind.o +obj-y += legacy_boot.o obj-$(CONFIG_ACPI) += acpi.o obj-$(CONFIG_EFI) += efi.o @@ -48,6 +49,7 @@ obj-$(CONFIG_MODULES) += module.o module-sections.o obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_PROC_FS) += proc.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_SMP) += smp.o diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c index 8e00a754e548943ae4dba5d330a1354426c713d1..23601a3a8352f511e8ec45ec8fca2de6e3e88aad 100644 --- a/arch/loongarch/kernel/acpi.c +++ b/arch/loongarch/kernel/acpi.c @@ -17,6 +17,7 @@ #include #include #include +#include "legacy_boot.h" int acpi_disabled; EXPORT_SYMBOL(acpi_disabled); @@ -60,7 +61,7 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) } #ifdef CONFIG_SMP -static int set_processor_mask(u32 id, u32 flags) +int set_processor_mask(u32 id, u32 flags) { int cpu, cpuid = id; @@ -134,6 +135,10 @@ static void __init acpi_process_madt(void) __cpu_logical_map[i] = -1; } #endif + + if (efi_bp && bpi_version <= BPI_VERSION_V1) + legacy_madt_table_init(); + acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC, acpi_parse_processor, MAX_CORE_PIC); diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c index 8da0726777edb41ea66d47f640308c18435f4551..173fe514fc9ecf2974c01c1e06b8f130988da9a7 100644 --- a/arch/loongarch/kernel/asm-offsets.c +++ b/arch/loongarch/kernel/asm-offsets.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -289,3 +290,34 @@ void output_fgraph_ret_regs_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT("KVM/LoongArch Specific offsets."); + + OFFSET(VCPU_FCC, kvm_vcpu_arch, fpu.fcc); + OFFSET(VCPU_FCSR0, kvm_vcpu_arch, fpu.fcsr); + BLANK(); + + OFFSET(KVM_VCPU_ARCH, kvm_vcpu, arch); + OFFSET(KVM_VCPU_KVM, kvm_vcpu, kvm); + OFFSET(KVM_VCPU_RUN, kvm_vcpu, run); + BLANK(); + + OFFSET(KVM_ARCH_HSP, kvm_vcpu_arch, host_sp); + OFFSET(KVM_ARCH_HTP, kvm_vcpu_arch, host_tp); + OFFSET(KVM_ARCH_HPGD, kvm_vcpu_arch, host_pgd); + OFFSET(KVM_ARCH_HANDLE_EXIT, kvm_vcpu_arch, handle_exit); + OFFSET(KVM_ARCH_HEENTRY, kvm_vcpu_arch, host_eentry); + OFFSET(KVM_ARCH_GEENTRY, kvm_vcpu_arch, guest_eentry); + OFFSET(KVM_ARCH_GPC, kvm_vcpu_arch, pc); + OFFSET(KVM_ARCH_GGPR, kvm_vcpu_arch, gprs); + OFFSET(KVM_ARCH_HBADI, kvm_vcpu_arch, badi); + OFFSET(KVM_ARCH_HBADV, kvm_vcpu_arch, badv); + OFFSET(KVM_ARCH_HECFG, kvm_vcpu_arch, host_ecfg); + OFFSET(KVM_ARCH_HESTAT, kvm_vcpu_arch, host_estat); + OFFSET(KVM_ARCH_HPERCPU, kvm_vcpu_arch, host_percpu); + + OFFSET(KVM_GPGD, kvm, arch.pgd); + BLANK(); +} diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c index 7a9c6a9dd2d01fb429b67aea02deada5e5b1f3be..cc0ccde58db87f8ddd6d5e568f40f864bd55b0c1 100644 --- a/arch/loongarch/kernel/dma.c +++ b/arch/loongarch/kernel/dma.c @@ -4,6 +4,28 @@ */ #include #include +#include + +/* + * We extract 4bit node id (bit 44~47) from Loongson-3's + * 48bit physical address space and embed it into 40bit. + */ + +static int node_id_offset; + +dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + long nid = (paddr >> 44) & 0xf; + + return ((nid << 44) ^ paddr) | (nid << node_id_offset); +} + +phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + long nid = (daddr >> node_id_offset) & 0xf; + + return ((nid << node_id_offset) ^ daddr) | (nid << 44); +} void acpi_arch_dma_setup(struct device *dev) { @@ -11,6 +33,11 @@ void acpi_arch_dma_setup(struct device *dev) u64 mask, end = 0; const struct bus_dma_region *map = NULL; + if (node_id_offset == 0) { + node_id_offset = ((readl(LS7A_DMA_CFG) & LS7A_DMA_NODE_MASK) >> LS7A_DMA_NODE_SHF); + node_id_offset += 36; + } + ret = acpi_dma_get_range(dev, &map); if (!ret && map) { const struct bus_dma_region *r = map; diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c index 9fc10cea21e10e387ef47f3fac85a9c3fc5fc66c..b132af112664bcc029b4b3540791dba48b7ea42d 100644 --- a/arch/loongarch/kernel/efi.c +++ b/arch/loongarch/kernel/efi.c @@ -23,13 +23,17 @@ #include #include +#include #include +#include +#include "legacy_boot.h" static unsigned long efi_nr_tables; static unsigned long efi_config_table; static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR; static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR; +static __initdata pgd_t *pgd_efi; static efi_system_table_t *efi_systab; static efi_config_table_type_t arch_tables[] __initdata = { @@ -49,8 +53,165 @@ void __init *efi_fdt_pointer(void) return early_memremap_ro(fdt_pointer, SZ_64K); } +static int __init efimap_populate_hugepages( + unsigned long start, unsigned long end, + pgprot_t prot) +{ + unsigned long addr; + unsigned long next; + pmd_t entry; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (pud_none(*pud)) { + void *p = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + + if (!p) + return -1; + pmd_init(p); + pud_populate(&init_mm, pud, p); + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + entry = pfn_pmd((addr >> PAGE_SHIFT), prot); + entry = pmd_mkhuge(entry); + set_pmd_at(&init_mm, addr, pmd, entry); + } + } + return 0; +} + +static void __init efi_map_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + pgd_efi = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); + if (!pgd_efi) { + pr_err("alloc efi pgd failed!\n"); + return; + } + pgd_init(pgd_efi); + csr_write64((long)pgd_efi, LOONGARCH_CSR_PGDL); + + /* Low Memory, Cached */ + efimap_populate_hugepages(0, SZ_256M, PAGE_KERNEL); + + for_each_node_mask(node, node_possible_map) { + /* MMIO Registers, Uncached */ + efimap_populate_hugepages(SZ_256M | (node << 44), + SZ_512M | (node << 44), PAGE_KERNEL_SUC); + + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* System memory, Cached */ + efimap_populate_hugepages(node ? start : SZ_512M, end, PAGE_KERNEL); + } +} + +static int __init efimap_free_pgt(unsigned long start, unsigned long end) +{ + unsigned long addr; + unsigned long next; + pud_t *pud; + pmd_t *pmd; + + for (addr = start; addr < end; addr = next) { + next = pmd_addr_end(addr, end); + + pud = pud_offset((p4d_t *)pgd_efi + pgd_index(addr), addr); + if (!pud_present(*pud)) + continue; + pmd = pmd_offset(pud, addr); + memblock_free(pmd, PAGE_SIZE); + pud_clear(pud); + } + return 0; +} + +static void __init efi_unmap_pgt(void) +{ + unsigned long node; + unsigned long start, end; + unsigned long start_pfn, end_pfn; + + for_each_node_mask(node, node_possible_map) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + start = ALIGN_DOWN(start_pfn << PAGE_SHIFT, PMD_SIZE); + end = ALIGN(end_pfn << PAGE_SHIFT, PMD_SIZE); + + /* Free pagetable memory */ + efimap_free_pgt(start, end); + } + + memblock_free(pgd_efi, PAGE_SIZE); + csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL); + local_flush_tlb_all(); +} + +/* + * set_virtual_map() - create a virtual mapping for the EFI memory map and call + * efi_set_virtual_address_map enter virtual for runtime service + * + * This function populates the virt_addr fields of all memory region descriptors + * in @memory_map whose EFI_MEMORY_RUNTIME attribute is set. Those descriptors + * are also copied to @runtime_map, and their total count is returned in @count. + */ +static int __init set_virtual_map(void) +{ + efi_status_t status; + int count = 0; + unsigned int size; + unsigned long attr; + efi_runtime_services_t *rt; + efi_set_virtual_address_map_t *svam; + efi_memory_desc_t *in, runtime_map[32]; + + if (efi_bp) + return EFI_SUCCESS; + + size = sizeof(efi_memory_desc_t); + + for_each_efi_memory_desc(in) { + attr = in->attribute; + if (!(attr & EFI_MEMORY_RUNTIME)) + continue; + + if (attr & (EFI_MEMORY_WB | EFI_MEMORY_WT)) + in->virt_addr = TO_CACHE(in->phys_addr); + else + in->virt_addr = TO_UNCACHE(in->phys_addr); + + memcpy(&runtime_map[count++], in, size); + } + + rt = early_memremap_ro((unsigned long)efi_systab->runtime, sizeof(*rt)); + + /* Install the new virtual address map */ + svam = rt->set_virtual_address_map; + + efi_map_pgt(); + + status = svam(size * count, size, efi.memmap.desc_version, + (efi_memory_desc_t *)TO_PHYS((unsigned long)runtime_map)); + + efi_unmap_pgt(); + if (status != EFI_SUCCESS) + return -1; + + return 0; +} + void __init efi_runtime_init(void) { + efi_status_t status; + if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime) return; @@ -59,7 +220,11 @@ void __init efi_runtime_init(void) return; } - efi.runtime = (efi_runtime_services_t *)efi_systab->runtime; + status = set_virtual_map(); + if (status < 0) + return; + + efi.runtime = READ_ONCE(efi_systab->runtime); efi.runtime_version = (unsigned int)efi.runtime->hdr.revision; efi_native_runtime_setup(); @@ -93,10 +258,12 @@ void __init efi_init(void) void *config_tables; struct efi_boot_memmap *tbl; - if (!efi_system_table) - return; + if (efi_system_table) + efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, + sizeof(*efi_systab)); + else + efi_systab = (efi_system_table_t *)efi_bp->systemtable; - efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab)); if (!efi_systab) { pr_err("Can't find EFI system table.\n"); return; diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c index 6b3bfb0092e60b34946490415ff7cd2a51287886..85dbfb1256eb260d09278108c00be564f3528749 100644 --- a/arch/loongarch/kernel/env.c +++ b/arch/loongarch/kernel/env.c @@ -12,6 +12,7 @@ #include #include #include +#include "legacy_boot.h" u64 efi_system_table; struct loongson_system_configuration loongson_sysconf; @@ -22,6 +23,11 @@ void __init init_environ(void) int efi_boot = fw_arg0; char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE); + legacy_boot_init(fw_arg0, fw_arg1, fw_arg2); + + if (efi_bp) + return; + if (efi_boot) set_bit(EFI_BOOT, &efi.flags); else diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S index d53ab10f464465e3f88910614afefce92b5af607..4382e36ae3d44466663aefaa5af1f23717876f35 100644 --- a/arch/loongarch/kernel/fpu.S +++ b/arch/loongarch/kernel/fpu.S @@ -349,6 +349,7 @@ SYM_FUNC_START(_restore_lsx_upper) lsx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lsx_upper) +EXPORT_SYMBOL(_restore_lsx_upper) SYM_FUNC_START(_init_lsx_upper) lsx_init_all_upper t1 @@ -384,6 +385,7 @@ SYM_FUNC_START(_restore_lasx_upper) lasx_restore_all_upper a0 t0 t1 jr ra SYM_FUNC_END(_restore_lasx_upper) +EXPORT_SYMBOL(_restore_lasx_upper) SYM_FUNC_START(_init_lasx_upper) lasx_init_all_upper t1 diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c index 883e5066ae445fa157c74fc9eb65939a5770f159..8b21449a70920ee72310f18ec1136ed89e939d3f 100644 --- a/arch/loongarch/kernel/irq.c +++ b/arch/loongarch/kernel/irq.c @@ -20,6 +20,7 @@ #include #include #include +#include "legacy_boot.h" DEFINE_PER_CPU(unsigned long, irq_stack); DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); @@ -61,6 +62,12 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) if (header->length < sizeof(struct acpi_table_mcfg)) return -EINVAL; + for (i = 0; i < MAX_IO_PICS; i++) { + msi_group[i].pci_segment = -1; + msi_group[i].node = -1; + pch_group[i].node = -1; + } + n = (header->length - sizeof(struct acpi_table_mcfg)) / sizeof(struct acpi_mcfg_allocation); mcfg = (struct acpi_table_mcfg *)header; @@ -76,34 +83,48 @@ static int __init early_pci_mcfg_parse(struct acpi_table_header *header) static void __init init_vec_parent_group(void) { - int i; - - for (i = 0; i < MAX_IO_PICS; i++) { - msi_group[i].pci_segment = -1; - msi_group[i].node = -1; - pch_group[i].node = -1; - } - acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse); } -static int __init get_ipi_irq(void) +#ifdef CONFIG_HOTPLUG_CPU +static void handle_irq_affinity(void) { - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_IPI); + struct irq_desc *desc; + struct irq_chip *chip; + unsigned int irq; + unsigned long flags; + struct cpumask *affinity; + + for_each_active_irq(irq) { + desc = irq_to_desc(irq); + if (!desc) + continue; + + raw_spin_lock_irqsave(&desc->lock, flags); + + affinity = desc->irq_data.common->affinity; + if (!cpumask_intersects(affinity, cpu_online_mask)) + cpumask_copy(affinity, cpu_online_mask); + + chip = irq_data_get_irq_chip(&desc->irq_data); + if (chip && chip->irq_set_affinity) + chip->irq_set_affinity(&desc->irq_data, + desc->irq_data.common->affinity, true); + raw_spin_unlock_irqrestore(&desc->lock, flags); + } +} - return -EINVAL; +void fixup_irqs(void) +{ + handle_irq_affinity(); + irq_cpu_offline(); + clear_csr_ecfg(ECFG0_IM); } +#endif void __init init_IRQ(void) { - int i; -#ifdef CONFIG_SMP - int r, ipi_irq; - static int ipi_dummy_dev; -#endif + int i, ret; unsigned int order = get_order(IRQ_STACK_SIZE); struct page *page; @@ -111,15 +132,15 @@ void __init init_IRQ(void) clear_csr_estat(ESTATF_IP); init_vec_parent_group(); - irqchip_init(); + if (efi_bp && bpi_version <= BPI_VERSION_V1) { + ret = setup_legacy_IRQ(); + if (ret) + panic("IRQ domain init error!\n"); + } else { + irqchip_init(); + } #ifdef CONFIG_SMP - ipi_irq = get_ipi_irq(); - if (ipi_irq < 0) - panic("IPI IRQ mapping failed\n"); - irq_set_percpu_devid(ipi_irq); - r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev); - if (r < 0) - panic("IPI IRQ request failed\n"); + smp_ops.init_ipi(); #endif for (i = 0; i < NR_IRQS; i++) @@ -133,5 +154,5 @@ void __init init_IRQ(void) per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE); } - set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); + set_csr_ecfg(ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC); } diff --git a/arch/loongarch/kernel/legacy_boot.c b/arch/loongarch/kernel/legacy_boot.c new file mode 100644 index 0000000000000000000000000000000000000000..35a0a118486fb6e23a1497f4a480389ff9cdf7b7 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Author: Yun Liu, liuyun@loongson.cn + * Copyright (C) 2020 Loongson Technology Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "legacy_boot.h" + +#define MAX_CORE_PIC 256 +#define PREFIX "ACPI: " + +#define MSI_MSG_ADDRESS 0x2FF00000 +#define MSI_MSG_DEFAULT_COUNT 0xC0 + +struct boot_params *efi_bp; +struct loongsonlist_mem_map *g_mmap; +struct acpi_madt_lio_pic *acpi_liointc; +struct acpi_madt_eio_pic *acpi_eiointc[MAX_IO_PICS]; + +struct acpi_madt_ht_pic *acpi_htintc; +struct acpi_madt_lpc_pic *acpi_pchlpc; +struct acpi_madt_msi_pic *acpi_pchmsi[MAX_IO_PICS]; +struct acpi_madt_bio_pic *acpi_pchpic[MAX_IO_PICS]; + +struct irq_domain *cpu_domain; +struct irq_domain *liointc_domain; +struct irq_domain *pch_lpc_domain; +struct irq_domain *pch_msi_domain[MAX_IO_PICS]; +struct irq_domain *pch_pic_domain[MAX_IO_PICS]; + +char arcs_cmdline[COMMAND_LINE_SIZE]; +int nr_io_pics; +int bpi_version; + +struct acpi_madt_lio_pic liointc_default = { + .address = LOONGSON_REG_BASE + 0x1400, + .size = 256, + .cascade = {2, 3}, + .cascade_map = {0x00FFFFFF, 0xff000000}, +}; + +struct acpi_madt_lpc_pic pchlpc_default = { + .address = LS7A_LPC_REG_BASE, + .size = SZ_4K, + .cascade = 19, +}; + +struct acpi_madt_eio_pic eiointc_default[MAX_IO_PICS]; +struct acpi_madt_msi_pic pchmsi_default[MAX_IO_PICS]; +struct acpi_madt_bio_pic pchpic_default[MAX_IO_PICS]; + +static int +acpi_parse_lapic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_local_apic *processor = NULL; + + processor = (struct acpi_madt_local_apic *)header; + if (BAD_MADT_ENTRY(processor, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + set_processor_mask(processor->id, processor->lapic_flags); + + return 0; +} + +static int bad_pch_pic(unsigned long address) +{ + if (nr_io_pics >= MAX_IO_PICS) { + pr_warn("WARNING: Max # of I/O PCH_PICs (%d) exceeded (found %d), skipping\n", + MAX_IO_PICS, nr_io_pics); + return 1; + } + if (!address) { + pr_warn("WARNING: Bogus (zero) I/O PCH_PIC address found in table, skipping!\n"); + return 1; + } + return 0; +} + +void register_default_pic(int id, u32 address, u32 irq_base) +{ + int j, idx, entries, cores; + unsigned long addr; + u64 node_map = 0; + + if (bad_pch_pic(address)) + return; + + idx = nr_io_pics; + cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + pchpic_default[idx].address = address; + if (idx) + pchpic_default[idx].address |= nid_to_addrbase(id) | HT1LO_OFFSET; + pchpic_default[idx].id = id; + pchpic_default[idx].version = 0; + pchpic_default[idx].size = 0x1000; + pchpic_default[idx].gsi_base = irq_base; + + msi_group[nr_io_pics].pci_segment = nr_io_pics; + pch_group[nr_io_pics].node = msi_group[nr_io_pics].node = id; + + addr = pchpic_default[idx].address; + /* Read INT_ID.int_num */ + entries = (((unsigned long)ls7a_readq(addr) >> 48) & 0xff) + 1; + pchmsi_default[idx].msg_address = MSI_MSG_ADDRESS; + pchmsi_default[idx].start = entries; + pchmsi_default[idx].count = MSI_MSG_DEFAULT_COUNT; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + node_map |= (1 << node); + } + eiointc_default[idx].cascade = 3 + idx; + eiointc_default[idx].node = id; + eiointc_default[idx].node_map = node_map; + + if (idx) { + int i; + + for (i = 0; i < idx + 1; i++) { + node_map = 0; + + for_each_possible_cpu(j) { + int node = cpu_logical_map(j) / cores; + + if (((node & 7) < 4) ? !i : i) + node_map |= (1 << node); + } + eiointc_default[i].node_map = node_map; + } + } + + acpi_pchpic[idx] = &pchpic_default[idx]; + acpi_pchmsi[idx] = &pchmsi_default[idx]; + acpi_eiointc[idx] = &eiointc_default[idx]; + + nr_io_pics++; +} + +static int +acpi_parse_legacy_pch_pic(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_io_apic *pch_pic = NULL; + + pch_pic = (struct acpi_madt_io_apic *)header; + + if (BAD_MADT_ENTRY(pch_pic, end)) + return -EINVAL; + + acpi_table_print_madt_entry(&header->common); + + register_default_pic(pch_pic->id, pch_pic->address, + pch_pic->global_irq_base); + + return 0; +} + +__init int legacy_madt_table_init(void) +{ + /* Parse MADT LAPIC entries */ + acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC, acpi_parse_lapic, MAX_CORE_PIC); + acpi_table_parse_madt(ACPI_MADT_TYPE_IO_APIC, acpi_parse_legacy_pch_pic, MAX_IO_PICS); + + acpi_liointc = &liointc_default; + acpi_pchlpc = &pchlpc_default; + + return 0; +} + +int setup_legacy_IRQ(void) +{ + int i, ret; + struct irq_domain *pic_domain; + + if (!acpi_eiointc[0]) + cpu_data[0].options &= ~LOONGARCH_CPU_EXTIOI; + + ret = cpuintc_acpi_init(NULL, 0); + if (ret) { + pr_err("CPU domain init error!\n"); + return -1; + } + cpu_domain = get_cpudomain(); + ret = liointc_acpi_init(cpu_domain, acpi_liointc); + if (ret) { + pr_err("Liointc domain init error!\n"); + return -1; + } + liointc_domain = irq_find_matching_fwnode(liointc_handle, DOMAIN_BUS_ANY); + if (cpu_has_extioi) { + pr_info("Using EIOINTC interrupt mode\n"); + for (i = 0; i < nr_io_pics; i++) { + ret = eiointc_acpi_init(cpu_domain, acpi_eiointc[i]); + if (ret) { + pr_err("Eiointc domain init error!\n"); + return -1; + } + + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[i], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[i], 0); + } + /* HTVECINTC maybe not use */ + } else { + pr_info("Using HTVECINTC interrupt mode\n"); + ret = htvec_acpi_init(liointc_domain, acpi_htintc); + if (ret) { + pr_err("HTVECintc domain init error!\n"); + return -1; + } + pch_pic_parse_madt((union acpi_subtable_headers *)acpi_pchpic[0], 0); + pch_msi_parse_madt((union acpi_subtable_headers *)acpi_pchmsi[0], 0); + } + + pic_domain = get_pchpic_irq_domain(); + if (pic_domain && !cpu_has_hypervisor) + pch_lpc_acpi_init(pic_domain, acpi_pchlpc); + + return 0; +} + +/* + * Manage initrd + */ +#ifdef CONFIG_BLK_DEV_INITRD +static __init int rd_start_early(char *p) +{ + phys_initrd_start = __pa(memparse(p, NULL)); + + return 0; +} +early_param("rd_start", rd_start_early); + +static __init int rd_size_early(char *p) +{ + phys_initrd_size = memparse(p, NULL); + + return 0; +} +early_param("rd_size", rd_size_early); + +#endif + +__init void fw_init_cmdline(unsigned long argc, unsigned long cmdp) +{ + int i; + char **_fw_argv; + + _fw_argv = (char **)cmdp; + + arcs_cmdline[0] = '\0'; + for (i = 1; i < argc; i++) { + strlcat(arcs_cmdline, _fw_argv[i], COMMAND_LINE_SIZE); + if (i < (argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } + strscpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE); +} + +static u8 ext_listhdr_checksum(u8 *buffer, u32 length) +{ + u8 sum = 0; + u8 *end = buffer + length; + + while (buffer < end) + sum = (u8)(sum + *(buffer++)); + + return sum; +} + +static int parse_mem(struct _extention_list_hdr *head) +{ + g_mmap = (struct loongsonlist_mem_map *)head; + if (ext_listhdr_checksum((u8 *)g_mmap, head->length)) { + pr_err("mem checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need vbios */ +static int parse_vbios(struct _extention_list_hdr *head) +{ + struct loongsonlist_vbios *pvbios; + + pvbios = (struct loongsonlist_vbios *)head; + + if (ext_listhdr_checksum((u8 *)pvbios, head->length)) { + pr_err("vbios_addr checksum error\n"); + return -EPERM; + } + return 0; +} + +/* legacy firmware passed, add use this info if need screeninfo KVM? */ +static int parse_screeninfo(struct _extention_list_hdr *head) +{ + struct loongsonlist_screeninfo *pscreeninfo; + + pscreeninfo = (struct loongsonlist_screeninfo *)head; + if (ext_listhdr_checksum((u8 *)pscreeninfo, head->length)) { + pr_err("screeninfo_addr checksum error\n"); + return -EPERM; + } + + memcpy(&screen_info, &pscreeninfo->si, sizeof(screen_info)); + return 0; +} + +static int list_find(struct boot_params *bp) +{ + struct _extention_list_hdr *fhead = NULL; + unsigned long index; + + fhead = bp->extlist; + if (!fhead) { + pr_err("the bp ext struct empty!\n"); + return -1; + } + do { + if (memcmp(&(fhead->signature), LOONGSON_MEM_SIGNATURE, 3) == 0) { + if (parse_mem(fhead) != 0) { + pr_err("parse mem failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_VBIOS_SIGNATURE, 5) == 0) { + if (parse_vbios(fhead) != 0) { + pr_err("parse vbios failed\n"); + return -EPERM; + } + } else if (memcmp(&(fhead->signature), LOONGSON_SCREENINFO_SIGNATURE, 5) == 0) { + if (parse_screeninfo(fhead) != 0) { + pr_err("parse screeninfo failed\n"); + return -EPERM; + } + } + fhead = (struct _extention_list_hdr *)fhead->next; + index = (unsigned long)fhead; + } while (index); + return 0; +} + +unsigned int bpi_init(void) +{ + return list_find(efi_bp); +} + +static int get_bpi_version(u64 *signature) +{ + u8 data[9]; + int version = BPI_VERSION_NONE; + + data[8] = 0; + + memcpy(data, signature, sizeof(*signature)); + if (kstrtoint(&data[3], 10, &version)) + return BPI_VERSION_NONE; + return version; +} + +static void __init parse_bpi_flags(void) +{ + if (efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); +} + +__init unsigned long legacy_boot_init(unsigned long argc, unsigned long cmdptr, unsigned long bpi) +{ + int ret; + + if (!bpi || argc < 2) + return -1; + efi_bp = (struct boot_params *)bpi; + bpi_version = get_bpi_version(&efi_bp->signature); + pr_info("BPI%d with boot flags %llx.\n", bpi_version, efi_bp->flags); + if (bpi_version == BPI_VERSION_NONE) { + if (cpu_has_hypervisor) + pr_err(FW_BUG "Fatal error, bpi ver NONE!\n"); + else + panic(FW_BUG "Fatal error, bpi ver NONE!\n"); + } else if (bpi_version == BPI_VERSION_V2) + parse_bpi_flags(); + + fw_init_cmdline(argc, cmdptr); + ret = bpi_init(); + if (ret) { + pr_err("init legacy firmware error!\n"); + return -1; + } + + return 0; +} + +static int __init add_legacy_isa_io(struct fwnode_handle *fwnode, unsigned long isa_base) +{ + int ret = 0; + unsigned long vaddr; + struct logic_pio_hwaddr *range; + + range = kzalloc(sizeof(*range), GFP_ATOMIC); + if (!range) + return -ENOMEM; + + range->fwnode = fwnode; + range->size = ISA_IOSIZE; + range->hw_start = isa_base; + range->flags = LOGIC_PIO_CPU_MMIO; + + ret = logic_pio_register_range(range); + if (ret) { + kfree(range); + return ret; + } + + if (range->io_start != 0) { + logic_pio_unregister_range(range); + kfree(range); + return -EINVAL; + } + + vaddr = (unsigned long)(PCI_IOBASE + range->io_start); + ret = ioremap_page_range(vaddr, vaddr + range->size, range->hw_start, + pgprot_device(PAGE_KERNEL)); + return ret; +} + +static struct fwnode_handle * __init parse_isa_base(u64 *cpu_addr) +{ + struct device_node *np; + const __be32 *ranges = NULL; + int len; + struct device_node *node; + + for_each_node_by_name(np, "isa") { + node = of_node_get(np); + + if (!node) + break; + + ranges = of_get_property(node, "ranges", &len); + + if (!ranges || (ranges && len > 0)) + break; + } + if (ranges) { + ranges += 2; + *cpu_addr = of_translate_address(np, ranges); + return &np->fwnode; + } + + return NULL; +} + +static int __init register_legacy_isa_io(void) +{ + struct fwnode_handle *fwnode; + u64 cpu_addr; + + if (!acpi_disabled) { + cpu_addr = ISA_PHY_IOBASE; + fwnode = kzalloc(sizeof(*fwnode), GFP_ATOMIC); + } else { + fwnode = parse_isa_base(&cpu_addr); + } + + if (fwnode) + add_legacy_isa_io(fwnode, cpu_addr); + + return 0; +} +arch_initcall(register_legacy_isa_io); diff --git a/arch/loongarch/kernel/legacy_boot.h b/arch/loongarch/kernel/legacy_boot.h new file mode 100644 index 0000000000000000000000000000000000000000..982bf9b1de72571654c34588d5e25e002bc84177 --- /dev/null +++ b/arch/loongarch/kernel/legacy_boot.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LEGACY_BOOT_H_ +#define __LEGACY_BOOT_H_ +#include +#include +#define ADDRESS_TYPE_SYSRAM 1 +#define ADDRESS_TYPE_RESERVED 2 +#define ADDRESS_TYPE_ACPI 3 +#define ADDRESS_TYPE_NVS 4 +#define ADDRESS_TYPE_PMEM 5 + +#define LOONGSON3_BOOT_MEM_MAP_MAX 128 +#define RT_MAP_START 100 +#define FIX_MAP_ENTRY 32 + +/* mask of the flags in bootparamsinterface */ +#define BPI_FLAGS_UEFI_SUPPORTED BIT(0) +#define BPI_FLAGS_SOC_CPU BIT(1) + +#define LOONGSON_DMA_MASK_BIT 64 +#define LOONGSON_MEM_SIGNATURE "MEM" +#define LOONGSON_VBIOS_SIGNATURE "VBIOS" +#define LOONGSON_EFIBOOT_SIGNATURE "BPI" +#define LOONGSON_SCREENINFO_SIGNATURE "SINFO" +#define LOONGSON_EFIBOOT_VERSION 1000 + +/* Values for Version firmware */ + +enum bpi_vers { + BPI_VERSION_NONE = 0, + BPI_VERSION_V1 = 1000, + BPI_VERSION_V2 = 1001, +}; + +struct boot_params { + u64 signature; /* {"BPIXXXXX"} */ + void *systemtable; + struct _extention_list_hdr *extlist; + u64 flags; +} __packed; + +struct _extention_list_hdr { + u64 signature; + u32 length; + u8 revision; + u8 checksum; + struct _extention_list_hdr *next; +} __packed; + +struct loongsonlist_mem_map { + struct _extention_list_hdr header; /*{"M", "E", "M"}*/ + u8 map_count; + struct _loongson_mem_map { + u32 mem_type; + u64 mem_start; + u64 mem_size; + } __packed map[LOONGSON3_BOOT_MEM_MAP_MAX]; +} __packed; + +struct loongsonlist_vbios { + struct _extention_list_hdr header; /* {VBIOS} */ + u64 vbios_addr; +} __packed; + +struct loongsonlist_screeninfo { + struct _extention_list_hdr header; + struct screen_info si; +}; +unsigned long legacy_boot_init(unsigned long argc, + unsigned long cmdptr, unsigned long bpi); +extern int bpi_version; +extern struct boot_params *efi_bp; +extern struct loongsonlist_mem_map *g_mmap; +extern int set_processor_mask(u32 id, u32 flags); +extern int __init setup_legacy_IRQ(void); +extern struct loongson_system_configuration loongson_sysconf; +extern unsigned long long smp_group[MAX_PACKAGES]; +extern int legacy_madt_table_init(void); +extern struct pch_pic *pch_pic_priv[MAX_IO_PICS]; +extern struct irq_domain *get_cpudomain(void); +extern int __init cpuintc_acpi_init(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_pic_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern int __init +pch_msi_parse_madt(union acpi_subtable_headers *header, + const unsigned long end); +extern struct irq_domain *get_pchpic_irq_domain(void); +#endif diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c index 2dcb9e003657c848adff71078870fe682451e416..561706cb1e6d1a25e4d41279bff3a51361cd10bb 100644 --- a/arch/loongarch/kernel/machine_kexec.c +++ b/arch/loongarch/kernel/machine_kexec.c @@ -59,6 +59,9 @@ static void kexec_image_info(const struct kimage *kimage) } } +#define MAX_ARGS 64 +#define KEXEC_CMDLINE_SIZE (COMMAND_LINE_SIZE * 2) + int machine_kexec_prepare(struct kimage *kimage) { int i; @@ -70,11 +73,49 @@ int machine_kexec_prepare(struct kimage *kimage) kimage->arch.efi_boot = fw_arg0; kimage->arch.systable_ptr = fw_arg2; + if (!fw_arg2) + pr_err("Small fdt mode is not supported!\n"); + /* Find the command line */ for (i = 0; i < kimage->nr_segments; i++) { if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) { - if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) - kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + if (fw_arg0 < 2) { + /* New firmware */ + if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE)) + kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr; + } else { + /* Old firmware */ + int argc = 0; + long offt; + char *ptr, *str; + unsigned long *argv; + + /* + * convert command line string to array + * of parameters (as bootloader does). + */ + argv = (unsigned long *)kmalloc(KEXEC_CMDLINE_SIZE, GFP_KERNEL); + argv[argc++] = (unsigned long)(KEXEC_CMDLINE_ADDR + KEXEC_CMDLINE_SIZE/2); + str = (char *)argv + KEXEC_CMDLINE_SIZE/2; + + if (copy_from_user(str, kimage->segment[i].buf, KEXEC_CMDLINE_SIZE/2)) + return -EINVAL; + + ptr = strchr(str, ' '); + + while (ptr && (argc < MAX_ARGS)) { + *ptr = '\0'; + if (ptr[1] != ' ') { + offt = (long)(ptr - str + 1); + argv[argc++] = (unsigned long)argv + KEXEC_CMDLINE_SIZE/2 + offt; + } + ptr = strchr(ptr + 1, ' '); + } + + kimage->arch.efi_boot = argc; + kimage->arch.cmdline_ptr = (unsigned long)argv; + break; + } break; } } diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c index aed901c57fb439493c560de777fbc86689f827ae..5fd1bc3333bc36a808830d411b404925660833ab 100644 --- a/arch/loongarch/kernel/mem.c +++ b/arch/loongarch/kernel/mem.c @@ -9,13 +9,35 @@ #include #include #include - +#include "legacy_boot.h" void __init memblock_init(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_start, mem_end, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + /* parse memory information */ + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + pr_info("add memory region memblock - base: 0x%llx size: 0x%llx\n", mem_start, mem_size); + memblock_add(mem_start, mem_size); + if (max_low_pfn < (mem_end >> PAGE_SHIFT)) + max_low_pfn = mem_end >> PAGE_SHIFT; + break; + } + } + memblock_set_current_limit(PFN_PHYS(max_low_pfn)); + memblock_reserve(__pa_symbol(&_text), + __pa_symbol(&_end) - __pa_symbol(&_text)); + return; + } /* Parse memory information */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c index 6e65ff12d5c7dc5062bc4d9bce24f4ef68f3b8da..1c3ede74ea4d50eb99bd380cd1d09c8f7127b366 100644 --- a/arch/loongarch/kernel/numa.c +++ b/arch/loongarch/kernel/numa.c @@ -25,6 +25,7 @@ #include #include #include +#include "legacy_boot.h" int numa_off; struct pglist_data *node_data[MAX_NUMNODES]; @@ -37,7 +38,6 @@ static struct numa_meminfo numa_meminfo; cpumask_t cpus_on_node[MAX_NUMNODES]; cpumask_t phys_cpus_on_node[MAX_NUMNODES]; EXPORT_SYMBOL(cpus_on_node); - /* * apicid, cpu, node mappings */ @@ -301,10 +301,45 @@ static void __init add_numamem_region(u64 start, u64 end, u32 type) static void __init init_node_memblock(void) { - u32 mem_type; + u32 i, mem_type; u64 mem_end, mem_start, mem_size; efi_memory_desc_t *md; + if (g_mmap) { + for (i = 0; i < g_mmap->map_count; i++) { + mem_type = g_mmap->map[i].mem_type; + mem_start = g_mmap->map[i].mem_start; + mem_size = g_mmap->map[i].mem_size; + mem_end = g_mmap->map[i].mem_start + mem_size; + + switch (mem_type) { + case ADDRESS_TYPE_SYSRAM: + mem_start = PFN_ALIGN(mem_start); + mem_end = PFN_ALIGN(mem_end - PAGE_SIZE + 1); + if (mem_start >= mem_end) + break; + add_numamem_region(mem_start, mem_end, EFI_PERSISTENT_MEMORY); + break; + + case ADDRESS_TYPE_ACPI: + mem_start = PFN_ALIGN(mem_start - PAGE_SIZE + 1); + mem_end = PFN_ALIGN(mem_end); + mem_size = mem_end - mem_start; + memblock_add(mem_start, mem_size); + memblock_mark_nomap(mem_start, mem_size); + memblock_set_node(mem_start, mem_size, + &memblock.memory, 0); + memblock_reserve(mem_start, mem_size); + break; + + case ADDRESS_TYPE_RESERVED: + memblock_reserve(mem_start, mem_size); + break; + } + } + return; + } + /* Parse memory information and activate */ for_each_efi_memory_desc(md) { mem_type = md->type; diff --git a/arch/loongarch/kernel/paravirt.c b/arch/loongarch/kernel/paravirt.c new file mode 100644 index 0000000000000000000000000000000000000000..56182c64ab38ebfd7b218a45e45aa725a0a9d48e --- /dev/null +++ b/arch/loongarch/kernel/paravirt.c @@ -0,0 +1,281 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; +static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64); +static int has_steal_clock; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} +early_param("no-steal-acc", parse_no_stealacc); + +static u64 para_steal_clock(int cpu) +{ + u64 steal; + struct kvm_steal_time *src; + int version; + + src = &per_cpu(steal_time, cpu); + do { + + version = src->version; + /* Make sure that the version is read before the steal */ + virt_rmb(); + steal = src->steal; + /* Make sure that the steal is read before the next version */ + virt_rmb(); + + } while ((version & 1) || (version != src->version)); + return steal; +} + +static int pv_register_steal_time(void) +{ + int cpu = smp_processor_id(); + struct kvm_steal_time *st; + unsigned long addr; + + if (!has_steal_clock) + return -EPERM; + + st = &per_cpu(steal_time, cpu); + addr = per_cpu_ptr_to_phys(st); + + /* The whole structure kvm_steal_time should be one page */ + if (PFN_DOWN(addr) != PFN_DOWN(addr + sizeof(*st))) { + pr_warn("Illegal PV steal time addr %lx\n", addr); + return -EFAULT; + } + + addr |= KVM_STEAL_PHYS_VALID; + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, addr); + return 0; +} + +#ifdef CONFIG_SMP +static void pv_send_ipi_single(int cpu, unsigned int action) +{ + unsigned int min, old; + irq_cpustat_t *info = &per_cpu(irq_stat, cpu); + + old = atomic_fetch_or(BIT(action), &info->message); + if (old) + return; + + min = cpu_logical_map(cpu); + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, 1, 0, min); +} + +#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG) +static void pv_send_ipi_mask(const struct cpumask *mask, unsigned int action) +{ + unsigned int cpu, i, min = 0, max = 0, old; + __uint128_t bitmap = 0; + irq_cpustat_t *info; + + if (cpumask_empty(mask)) + return; + + action = BIT(action); + for_each_cpu(i, mask) { + info = &per_cpu(irq_stat, i); + old = atomic_fetch_or(action, &info->message); + if (old) + continue; + + cpu = cpu_logical_map(i); + if (!bitmap) { + min = max = cpu; + } else if (cpu > min && cpu < min + KVM_IPI_CLUSTER_SIZE) { + max = cpu > max ? cpu : max; + } else if (cpu < min && (max - cpu) < KVM_IPI_CLUSTER_SIZE) { + bitmap <<= min - cpu; + min = cpu; + } else { + /* + * Physical cpuid is sorted in ascending order ascend + * for the next mask calculation, send IPI here + * directly and skip the remainding cpus + */ + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, + (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); + min = max = cpu; + bitmap = 0; + } + __set_bit(cpu - min, (unsigned long *)&bitmap); + } + + if (bitmap) + kvm_hypercall3(KVM_HCALL_FUNC_PV_IPI, (unsigned long)bitmap, + (unsigned long)(bitmap >> BITS_PER_LONG), min); +} + +static irqreturn_t loongson_do_swi(int irq, void *dev) +{ + irq_cpustat_t *info; + long action; + + /* Clear swi interrupt */ + clear_csr_estat(1 << INT_SWI0); + info = this_cpu_ptr(&irq_stat); + action = atomic_xchg(&info->message, 0); + if (action & SMP_CALL_FUNCTION) { + generic_smp_call_function_interrupt(); + info->ipi_irqs[IPI_CALL_FUNCTION]++; + } + + if (action & SMP_RESCHEDULE) { + scheduler_ipi(); + info->ipi_irqs[IPI_RESCHEDULE]++; + } + + return IRQ_HANDLED; +} + +static void pv_init_ipi(void) +{ + int r, swi0; + + swi0 = get_percpu_irq(INT_SWI0); + if (swi0 < 0) + panic("SWI0 IRQ mapping failed\n"); + irq_set_percpu_devid(swi0); + r = request_percpu_irq(swi0, loongson_do_swi, "SWI0", &irq_stat); + if (r < 0) + panic("SWI0 IRQ request failed\n"); +} + +static void pv_disable_steal_time(void) +{ + if (has_steal_clock) + kvm_hypercall2(KVM_HCALL_FUNC_NOTIFY, KVM_FEATURE_STEAL_TIME, 0); +} + +static int pv_cpu_online(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_register_steal_time(); + local_irq_restore(flags); + return 0; +} + +static int pv_cpu_down_prepare(unsigned int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + pv_disable_steal_time(); + local_irq_restore(flags); + return 0; +} +#endif + +static bool kvm_para_available(void) +{ + static int hypervisor_type; + int config; + + if (!hypervisor_type) { + config = read_cpucfg(CPUCFG_KVM_SIG); + if (!memcmp(&config, KVM_SIGNATURE, 4)) + hypervisor_type = HYPERVISOR_KVM; + } + + return hypervisor_type == HYPERVISOR_KVM; +} + +int __init pv_ipi_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + /* + * check whether KVM hypervisor supports pv_ipi or not + */ + feature = read_cpucfg(CPUCFG_KVM_FEATURE); +#ifdef CONFIG_SMP + if (feature & KVM_FEATURE_PV_IPI) { + smp_ops.init_ipi = pv_init_ipi; + smp_ops.send_ipi_single = pv_send_ipi_single; + smp_ops.send_ipi_mask = pv_send_ipi_mask; + } +#endif + + return 1; +} + +static void pv_cpu_reboot(void *unused) +{ + pv_disable_steal_time(); +} + +static int pv_reboot_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + on_each_cpu(pv_cpu_reboot, NULL, 1); + return NOTIFY_DONE; +} + +static struct notifier_block pv_reboot_nb = { + .notifier_call = pv_reboot_notify, +}; + +int __init pv_time_init(void) +{ + int feature; + + if (!cpu_has_hypervisor) + return 0; + if (!kvm_para_available()) + return 0; + + feature = read_cpucfg(CPUCFG_KVM_FEATURE); + if (!(feature & KVM_FEATURE_STEAL_TIME)) + return 0; + + has_steal_clock = 1; + if (pv_register_steal_time()) { + has_steal_clock = 0; + return 0; + } + + register_reboot_notifier(&pv_reboot_nb); + static_call_update(pv_steal_clock, para_steal_clock); + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + +#ifdef CONFIG_SMP + if (cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "loongarch/pv:online", + pv_cpu_online, pv_cpu_down_prepare) < 0) + pr_err("Failed to install cpu hotplug callbacks\n"); +#endif + pr_info("Using stolen time PV\n"); + return 0; +} diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c index 0491bf453cd49601c4f8b7b35565ea4a2b83c689..3265c8f33223fc9da4a1deb38dae70d411e83b38 100644 --- a/arch/loongarch/kernel/perf_event.c +++ b/arch/loongarch/kernel/perf_event.c @@ -456,16 +456,6 @@ static void loongarch_pmu_disable(struct pmu *pmu) static DEFINE_MUTEX(pmu_reserve_mutex); static atomic_t active_events = ATOMIC_INIT(0); -static int get_pmc_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_PCOV); - - return -EINVAL; -} - static void reset_counters(void *arg); static int __hw_perf_event_init(struct perf_event *event); @@ -473,7 +463,7 @@ static void hw_perf_event_destroy(struct perf_event *event) { if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { on_each_cpu(reset_counters, NULL, 1); - free_irq(get_pmc_irq(), &loongarch_pmu); + free_irq(get_percpu_irq(INT_PCOV), &loongarch_pmu); mutex_unlock(&pmu_reserve_mutex); } } @@ -562,7 +552,7 @@ static int loongarch_pmu_event_init(struct perf_event *event) if (event->cpu >= 0 && !cpu_online(event->cpu)) return -ENODEV; - irq = get_pmc_irq(); + irq = get_percpu_irq(INT_PCOV); flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED; if (!atomic_inc_not_zero(&active_events)) { mutex_lock(&pmu_reserve_mutex); diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c index 1ef8c63835351ba7b04eb4838464927c1ac1c32c..e7282e8de1cda0e9bde69f4cdfc7430d3b0c319c 100644 --- a/arch/loongarch/kernel/reset.c +++ b/arch/loongarch/kernel/reset.c @@ -49,7 +49,8 @@ void machine_power_off(void) #endif do_kernel_power_off(); #ifdef CONFIG_EFI - efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); + if (efi.reset_system) + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); #endif while (true) { diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c index aed65915e932e2963913ac2b4ada44102eed9e32..d1f1f2250db9fc2fb44a30831a4b9ad4567e757a 100644 --- a/arch/loongarch/kernel/setup.c +++ b/arch/loongarch/kernel/setup.c @@ -48,6 +48,7 @@ #include #include #include +#include "legacy_boot.h" #define SMBIOS_BIOSSIZE_OFFSET 0x09 #define SMBIOS_BIOSEXTERN_OFFSET 0x13 @@ -68,6 +69,8 @@ EXPORT_SYMBOL(cpu_data); struct loongson_board_info b_info; static const char dmi_empty_string[] = " "; +static phys_addr_t crashmem_start, crashmem_size; + /* * Setup information * @@ -134,9 +137,22 @@ static void __init parse_cpu_table(const struct dmi_header *dm) static void __init parse_bios_table(const struct dmi_header *dm) { + int bios_extern; char *dmi_data = (char *)dm; + bios_extern = *(dmi_data + SMBIOS_BIOSEXTERN_OFFSET); b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6; + + if (bpi_version == BPI_VERSION_V2) { + if ((!!(efi_bp->flags & BPI_FLAGS_UEFI_SUPPORTED)) != (!!(bios_extern & LOONGSON_EFI_ENABLE))) + pr_err("There is a conflict of definitions between efi_bp->flags and smbios\n"); + return; + } + + if (bios_extern & LOONGSON_EFI_ENABLE) + set_bit(EFI_BOOT, &efi.flags); + else + clear_bit(EFI_BOOT, &efi.flags); } static void __init find_tokens(const struct dmi_header *dm, void *dummy) @@ -192,16 +208,6 @@ static int __init early_parse_mem(char *p) return -EINVAL; } - /* - * If a user specifies memory size, we - * blow away any automatically generated - * size. - */ - if (usermem == 0) { - usermem = 1; - memblock_remove(memblock_start_of_DRAM(), - memblock_end_of_DRAM() - memblock_start_of_DRAM()); - } start = 0; size = memparse(p, &p); if (*p == '@') @@ -211,6 +217,23 @@ static int __init early_parse_mem(char *p) return -EINVAL; } + /* + * If a user specifies memory size, we + * blow away any automatically generated + * size. + */ + if (usermem == 0) { + usermem = 1; + if (!strstr(boot_command_line, "elfcorehdr")) { + memblock_remove(memblock_start_of_DRAM(), + memblock_end_of_DRAM() - memblock_start_of_DRAM()); + } else { + crashmem_start = start; + crashmem_size = size; + return 0; + } + } + if (!IS_ENABLED(CONFIG_NUMA)) memblock_add(start, size); else @@ -355,11 +378,59 @@ static void __init bootcmdline_init(char **cmdline_p) *cmdline_p = boot_command_line; } -void __init platform_init(void) +/* + * After the kdump operation is performed to enter the capture kernel, the + * memory area used by the previous production kernel should be reserved to + * avoid destroy to the captured data. + */ +static void reserve_oldmem_region(int node, unsigned long s0, unsigned long e0) { - arch_reserve_vmcore(); - arch_parse_crashkernel(); +#ifdef CONFIG_CRASH_DUMP + unsigned long s1, e1; + if (!is_kdump_kernel()) + return; + + if ((e0 - s0) > (SZ_1G >> PAGE_SHIFT)) + e0 = e0 - (SZ_512M >> PAGE_SHIFT); + + /* crashmem_start is crashk_res reserved by primary production kernel */ + s1 = PFN_UP(crashmem_start); + e1 = PFN_DOWN(crashmem_start + crashmem_size); + + if (s1 == 0) + return; + + if (node == 0) { + memblock_reserve(PFN_PHYS(s0), (s1 - s0) << PAGE_SHIFT); + memblock_reserve(PFN_PHYS(e1), (e0 - e1) << PAGE_SHIFT); + } else { + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); + } +#endif +} + +/* Traditionally, LoongArch's contiguous low memory is 256M, so crashkernel=X@Y is + * unable to be large enough in some cases. Thus, if the total memory of a node + * is more than 1GB, we reserve the top 512MB for the capture kernel + */ +static void reserve_crashm_region(int node, unsigned long s0, unsigned long e0) +{ +#ifdef CONFIG_KEXEC + if (crashk_res.start == crashk_res.end) + return; + + if ((e0 - s0) <= (SZ_1G >> PAGE_SHIFT)) + return; + + s0 = e0 - (SZ_512M >> PAGE_SHIFT); + + memblock_reserve(PFN_PHYS(s0), (e0 - s0) << PAGE_SHIFT); +#endif +} + +void __init platform_init(void) +{ #ifdef CONFIG_ACPI_TABLE_UPGRADE acpi_table_upgrade(); #endif @@ -395,6 +466,17 @@ static void __init check_kernel_sections_mem(void) */ static void __init arch_mem_init(char **cmdline_p) { + unsigned int node; + unsigned long start_pfn, end_pfn; + + arch_reserve_vmcore(); + arch_parse_crashkernel(); + for_each_online_node(node) { + get_pfn_range_for_nid(node, &start_pfn, &end_pfn); + reserve_crashm_region(node, start_pfn, end_pfn); + reserve_oldmem_region(node, start_pfn, end_pfn); + } + if (usermem) pr_info("User-defined physical RAM map overwrite\n"); @@ -614,7 +696,9 @@ void __init setup_arch(char **cmdline_p) pagetable_init(); bootcmdline_init(cmdline_p); parse_early_param(); - reserve_initrd_mem(); + /* The small fdt method should be skipped directly to avoid two reserved operations. */ + if (fw_arg2) + reserve_initrd_mem(); platform_init(); arch_mem_init(cmdline_p); diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c index ef35c871244f0825e998359a3913318bfb872434..014f824abe17faa95ef1fff77265667262e4e925 100644 --- a/arch/loongarch/kernel/smp.c +++ b/arch/loongarch/kernel/smp.c @@ -29,9 +29,11 @@ #include #include #include +#include #include #include #include +#include "legacy_boot.h" int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ EXPORT_SYMBOL(__cpu_number_map); @@ -66,11 +68,6 @@ static cpumask_t cpu_core_setup_map; struct secondary_data cpuboot_data; static DEFINE_PER_CPU(int, cpu_state); -enum ipi_msg_type { - IPI_RESCHEDULE, - IPI_CALL_FUNCTION, -}; - static const char *ipi_types[NR_IPI] __tracepoint_string = { [IPI_RESCHEDULE] = "Rescheduling interrupts", [IPI_CALL_FUNCTION] = "Function call interrupts", @@ -123,24 +120,19 @@ static u32 ipi_read_clear(int cpu) static void ipi_write_action(int cpu, u32 action) { - unsigned int irq = 0; - - while ((irq = ffs(action))) { - uint32_t val = IOCSR_IPI_SEND_BLOCKING; + uint32_t val; - val |= (irq - 1); - val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); - iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); - action &= ~BIT(irq - 1); - } + val = IOCSR_IPI_SEND_BLOCKING | action; + val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT); + iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND); } -void loongson_send_ipi_single(int cpu, unsigned int action) +static void loongson_send_ipi_single(int cpu, unsigned int action) { ipi_write_action(cpu_logical_map(cpu), (u32)action); } -void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) +static void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; @@ -148,6 +140,16 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) ipi_write_action(cpu_logical_map(i), (u32)action); } +void arch_send_call_function_single_ipi(int cpu) +{ + smp_ops.send_ipi_single(cpu, ACTION_CALL_FUNCTION); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_ops.send_ipi_mask(mask, ACTION_CALL_FUNCTION); +} + /* * This function sends a 'reschedule' IPI to another CPU. * it goes straight through and wastes no time serializing @@ -155,11 +157,11 @@ void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action) */ void arch_smp_send_reschedule(int cpu) { - loongson_send_ipi_single(cpu, SMP_RESCHEDULE); + smp_ops.send_ipi_single(cpu, ACTION_RESCHEDULE); } EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); -irqreturn_t loongson_ipi_interrupt(int irq, void *dev) +static irqreturn_t loongson_ipi_interrupt(int irq, void *dev) { unsigned int action; unsigned int cpu = smp_processor_id(); @@ -179,6 +181,26 @@ irqreturn_t loongson_ipi_interrupt(int irq, void *dev) return IRQ_HANDLED; } +static void loongson_init_ipi(void) +{ + int r, ipi_irq; + + ipi_irq = get_percpu_irq(INT_IPI); + if (ipi_irq < 0) + panic("IPI IRQ mapping failed\n"); + + irq_set_percpu_devid(ipi_irq); + r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &irq_stat); + if (r < 0) + panic("IPI IRQ request failed\n"); +} + +struct smp_ops smp_ops = { + .init_ipi = loongson_init_ipi, + .send_ipi_single = loongson_send_ipi_single, + .send_ipi_mask = loongson_send_ipi_mask, +}; + static void __init fdt_smp_setup(void) { #ifdef CONFIG_OF @@ -219,6 +241,7 @@ void __init loongson_smp_setup(void) cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package; cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package; + pv_ipi_init(); iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN); pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus); } @@ -243,17 +266,18 @@ void __init loongson_prepare_cpus(unsigned int max_cpus) */ void loongson_boot_secondary(int cpu, struct task_struct *idle) { - unsigned long entry; + unsigned long entry = (unsigned long)&smpboot_entry; pr_info("Booting CPU#%d...\n", cpu); - entry = __pa_symbol((unsigned long)&smpboot_entry); + if (!efi_bp) + entry = __pa_symbol((unsigned long)&smpboot_entry); cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle); cpuboot_data.thread_info = (unsigned long)task_thread_info(idle); csr_mail_send(entry, cpu_logical_map(cpu), 0); - loongson_send_ipi_single(cpu, SMP_BOOT_CPU); + loongson_send_ipi_single(cpu, ACTION_BOOT_CPU); } /* @@ -262,7 +286,7 @@ void loongson_boot_secondary(int cpu, struct task_struct *idle) void loongson_init_secondary(void) { unsigned int cpu = smp_processor_id(); - unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | + unsigned int imask = ECFGF_SIP0 | ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER; change_csr_ecfg(ECFG0_IM, imask); @@ -302,8 +326,7 @@ int loongson_cpu_disable(void) set_cpu_online(cpu, false); calculate_cpu_foreign_map(); local_irq_save(flags); - irq_migrate_all_off_this_cpu(); - clear_csr_ecfg(ECFG0_IM); + fixup_irqs(); local_irq_restore(flags); local_flush_tlb_all(); diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c index 3064af94db9c2e14e953a4aad68c2a8d28588447..46d7d40c87e38e097af74385be1e518bf95d5251 100644 --- a/arch/loongarch/kernel/time.c +++ b/arch/loongarch/kernel/time.c @@ -15,6 +15,7 @@ #include #include +#include #include u64 cpu_clock_freq; @@ -58,14 +59,16 @@ static int constant_set_state_oneshot(struct clock_event_device *evt) return 0; } -static int constant_set_state_oneshot_stopped(struct clock_event_device *evt) +static int constant_set_state_periodic(struct clock_event_device *evt) { + unsigned long period; unsigned long timer_config; raw_spin_lock(&state_lock); - timer_config = csr_read64(LOONGARCH_CSR_TCFG); - timer_config &= ~CSR_TCFG_EN; + period = const_clock_freq / HZ; + timer_config = period & CSR_TCFG_VAL; + timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN); csr_write64(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -73,16 +76,14 @@ static int constant_set_state_oneshot_stopped(struct clock_event_device *evt) return 0; } -static int constant_set_state_periodic(struct clock_event_device *evt) +static int constant_set_state_shutdown(struct clock_event_device *evt) { - unsigned long period; unsigned long timer_config; raw_spin_lock(&state_lock); - period = const_clock_freq / HZ; - timer_config = period & CSR_TCFG_VAL; - timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN); + timer_config = csr_read64(LOONGARCH_CSR_TCFG); + timer_config &= ~CSR_TCFG_EN; csr_write64(timer_config, LOONGARCH_CSR_TCFG); raw_spin_unlock(&state_lock); @@ -90,11 +91,6 @@ static int constant_set_state_periodic(struct clock_event_device *evt) return 0; } -static int constant_set_state_shutdown(struct clock_event_device *evt) -{ - return 0; -} - static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt) { unsigned long timer_config; @@ -128,16 +124,6 @@ void sync_counter(void) csr_write64(init_offset, LOONGARCH_CSR_CNTC); } -static int get_timer_irq(void) -{ - struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY); - - if (d) - return irq_create_mapping(d, INT_TI); - - return -EINVAL; -} - int constant_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -147,7 +133,7 @@ int constant_clockevent_init(void) static int irq = 0, timer_irq_installed = 0; if (!timer_irq_installed) { - irq = get_timer_irq(); + irq = get_percpu_irq(INT_TI); if (irq < 0) pr_err("Failed to map irq %d (timer)\n", irq); } @@ -161,7 +147,7 @@ int constant_clockevent_init(void) cd->rating = 320; cd->cpumask = cpumask_of(cpu); cd->set_state_oneshot = constant_set_state_oneshot; - cd->set_state_oneshot_stopped = constant_set_state_oneshot_stopped; + cd->set_state_oneshot_stopped = constant_set_state_shutdown; cd->set_state_periodic = constant_set_state_periodic; cd->set_state_shutdown = constant_set_state_shutdown; cd->set_next_event = constant_timer_next_event; @@ -229,4 +215,5 @@ void __init time_init(void) constant_clockevent_init(); constant_clocksource_init(); + pv_time_init(); } diff --git a/arch/loongarch/kvm/Kconfig b/arch/loongarch/kvm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..fda425babfb203d3fc36052b9722515ab7c568e6 --- /dev/null +++ b/arch/loongarch/kvm/Kconfig @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and + disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on AS_HAS_LVZ_EXTENSION + depends on HAVE_KVM + select HAVE_KVM_DIRTY_RING_ACQ_REL + select HAVE_KVM_EVENTFD + select HAVE_KVM_VCPU_ASYNC_IOCTL + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select KVM_GENERIC_HARDWARE_ENABLING + select KVM_MMIO + select KVM_XFER_TO_GUEST_WORK + select MMU_NOTIFIER + select PREEMPT_NOTIFIERS + help + Support hosting virtualized guest machines using + hardware virtualization extensions. You will need + a processor equipped with virtualization extensions. + + If unsure, say N. + +endif # VIRTUALIZATION diff --git a/arch/loongarch/kvm/Makefile b/arch/loongarch/kvm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..244467d7792a97c80c6558c39b2b7bf8a898fafd --- /dev/null +++ b/arch/loongarch/kvm/Makefile @@ -0,0 +1,22 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for LoongArch KVM support +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += exit.o +kvm-y += interrupt.o +kvm-y += main.o +kvm-y += mmu.o +kvm-y += switch.o +kvm-y += timer.o +kvm-y += tlb.o +kvm-y += vcpu.o +kvm-y += vm.o + +CFLAGS_exit.o += $(call cc-option,-Wno-override-init,) diff --git a/arch/loongarch/kvm/exit.c b/arch/loongarch/kvm/exit.c new file mode 100644 index 0000000000000000000000000000000000000000..8affc6d4a66e6973473673fb176b39064b53287c --- /dev/null +++ b/arch/loongarch/kvm/exit.c @@ -0,0 +1,877 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long kvm_emu_read_csr(struct kvm_vcpu *vcpu, int csrid) +{ + unsigned long val = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * From LoongArch Reference Manual Volume 1 Chapter 4.2.1 + * For undefined CSR id, return value is 0 + */ + if (get_gcsr_flag(csrid) & SW_GCSR) + val = kvm_read_sw_gcsr(csr, csrid); + else + pr_warn_once("Unsupported csrrd 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return val; +} + +static unsigned long kvm_emu_write_csr(struct kvm_vcpu *vcpu, int csrid, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + kvm_write_sw_gcsr(csr, csrid, val); + } else + pr_warn_once("Unsupported csrwr 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static unsigned long kvm_emu_xchg_csr(struct kvm_vcpu *vcpu, int csrid, + unsigned long csr_mask, unsigned long val) +{ + unsigned long old = 0; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(csrid) & SW_GCSR) { + old = kvm_read_sw_gcsr(csr, csrid); + val = (old & ~csr_mask) | (val & csr_mask); + kvm_write_sw_gcsr(csr, csrid, val); + old = old & csr_mask; + } else + pr_warn_once("Unsupported csrxchg 0x%x with pc %lx\n", csrid, vcpu->arch.pc); + + return old; +} + +static int kvm_handle_csr(struct kvm_vcpu *vcpu, larch_inst inst) +{ + unsigned int rd, rj, csrid; + unsigned long csr_mask, val = 0; + + /* + * CSR value mask imm + * rj = 0 means csrrd + * rj = 1 means csrwr + * rj != 0,1 means csrxchg + */ + rd = inst.reg2csr_format.rd; + rj = inst.reg2csr_format.rj; + csrid = inst.reg2csr_format.csr; + + if (csrid >= LOONGARCH_CSR_PERFCTRL0 && csrid <= LOONGARCH_CSR_PERFCNTR3) { + if (!kvm_own_pmu(vcpu)) { + vcpu->arch.pc -= 4; + return EMULATE_DONE; + } + } + + /* Process CSR ops */ + switch (rj) { + case 0: /* process csrrd */ + val = kvm_emu_read_csr(vcpu, csrid); + vcpu->arch.gprs[rd] = val; + break; + case 1: /* process csrwr */ + val = vcpu->arch.gprs[rd]; + val = kvm_emu_write_csr(vcpu, csrid, val); + vcpu->arch.gprs[rd] = val; + break; + default: /* process csrxchg */ + val = vcpu->arch.gprs[rd]; + csr_mask = vcpu->arch.gprs[rj]; + val = kvm_emu_xchg_csr(vcpu, csrid, csr_mask, val); + vcpu->arch.gprs[rd] = val; + } + + return EMULATE_DONE; +} + +int kvm_emu_iocsr(larch_inst inst, struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret; + unsigned long val; + u32 addr, rd, rj, opcode; + + /* + * Each IOCSR with different opcode + */ + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + opcode = inst.reg2_format.opcode; + addr = vcpu->arch.gprs[rj]; + ret = EMULATE_DO_IOCSR; + run->iocsr_io.phys_addr = addr; + run->iocsr_io.is_write = 0; + + /* LoongArch is Little endian */ + switch (opcode) { + case iocsrrdb_op: + run->iocsr_io.len = 1; + break; + case iocsrrdh_op: + run->iocsr_io.len = 2; + break; + case iocsrrdw_op: + run->iocsr_io.len = 4; + break; + case iocsrrdd_op: + run->iocsr_io.len = 8; + break; + case iocsrwrb_op: + run->iocsr_io.len = 1; + run->iocsr_io.is_write = 1; + break; + case iocsrwrh_op: + run->iocsr_io.len = 2; + run->iocsr_io.is_write = 1; + break; + case iocsrwrw_op: + run->iocsr_io.len = 4; + run->iocsr_io.is_write = 1; + break; + case iocsrwrd_op: + run->iocsr_io.len = 8; + run->iocsr_io.is_write = 1; + break; + default: + ret = EMULATE_FAIL; + break; + } + + if (ret == EMULATE_DO_IOCSR) { + if (run->iocsr_io.is_write) { + val = vcpu->arch.gprs[rd]; + memcpy(run->iocsr_io.data, &val, run->iocsr_io.len); + } + vcpu->arch.io_gpr = rd; + } + + return ret; +} + +int kvm_complete_iocsr_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + switch (run->iocsr_io.len) { + case 1: + *gpr = *(s8 *)run->iocsr_io.data; + break; + case 2: + *gpr = *(s16 *)run->iocsr_io.data; + break; + case 4: + *gpr = *(s32 *)run->iocsr_io.data; + break; + case 8: + *gpr = *(s64 *)run->iocsr_io.data; + break; + default: + kvm_err("Bad IOCSR length: %d, addr is 0x%lx\n", + run->iocsr_io.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_emu_idle(struct kvm_vcpu *vcpu) +{ + ++vcpu->stat.idle_exits; + trace_kvm_exit_idle(vcpu, KVM_TRACE_EXIT_IDLE); + + if (!kvm_arch_vcpu_runnable(vcpu)) + kvm_vcpu_halt(vcpu); + + return EMULATE_DONE; +} + +static int kvm_emu_cpucfg(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int rd, rj; + unsigned int index, ret; + unsigned long plv; + + rd = inst.reg2_format.rd; + rj = inst.reg2_format.rj; + ++vcpu->stat.cpucfg_exits; + index = vcpu->arch.gprs[rj]; + + /* + * By LoongArch Reference Manual 2.2.10.5 + * Return value is 0 for undefined cpucfg index + * + * Disable preemption since hw gcsr is accessed + */ + preempt_disable(); + plv = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD) >> CSR_CRMD_PLV_SHIFT; + switch (index) { + case 0 ... (KVM_MAX_CPUCFG_REGS - 1): + vcpu->arch.gprs[rd] = vcpu->arch.cpucfg[index]; + break; + case CPUCFG_KVM_SIG: + /* + * Cpucfg emulation between 0x40000000 -- 0x400000ff + * Return value with 0 if executed in user mode + */ + if ((plv & CSR_CRMD_PLV) == PLV_KERN) + vcpu->arch.gprs[rd] = *(unsigned int *)KVM_SIGNATURE; + else + vcpu->arch.gprs[rd] = 0; + break; + case CPUCFG_KVM_FEATURE: + ret = 0; + if ((plv & CSR_CRMD_PLV) == PLV_KERN) { + ret = KVM_FEATURE_PV_IPI; + if (sched_info_on()) + ret |= KVM_FEATURE_STEAL_TIME; + } + vcpu->arch.gprs[rd] = ret; + break; + default: + vcpu->arch.gprs[rd] = 0; + break; + } + + preempt_enable(); + return EMULATE_DONE; +} + +static int kvm_trap_handle_gspr(struct kvm_vcpu *vcpu) +{ + unsigned long curr_pc; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + + /* Fetch the instruction */ + inst.word = vcpu->arch.badi; + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + trace_kvm_exit_gspr(vcpu, inst.word); + er = EMULATE_FAIL; + switch (((inst.word >> 24) & 0xff)) { + case 0x0: /* CPUCFG GSPR */ + if (inst.reg2_format.opcode == cpucfg_op) + er = kvm_emu_cpucfg(vcpu, inst); + break; + case 0x4: /* CSR{RD,WR,XCHG} GSPR */ + er = kvm_handle_csr(vcpu, inst); + break; + case 0x6: /* Cache, Idle and IOCSR GSPR */ + switch (((inst.word >> 22) & 0x3ff)) { + case 0x18: /* Cache GSPR */ + er = EMULATE_DONE; + trace_kvm_exit_cache(vcpu, KVM_TRACE_EXIT_CACHE); + break; + case 0x19: /* Idle/IOCSR GSPR */ + switch (((inst.word >> 15) & 0x1ffff)) { + case 0xc90: /* IOCSR GSPR */ + er = kvm_emu_iocsr(inst, run, vcpu); + break; + case 0xc91: /* Idle GSPR */ + er = kvm_emu_idle(vcpu); + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + break; + default: + er = EMULATE_FAIL; + break; + } + + /* Rollback PC only if emulation was unsuccessful */ + if (er == EMULATE_FAIL) { + kvm_err("[%#lx]%s: unsupported gspr instruction 0x%08x\n", + curr_pc, __func__, inst.word); + + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->arch.pc = curr_pc; + } + + return er; +} + +/* + * Trigger GSPR: + * 1) Execute CPUCFG instruction; + * 2) Execute CACOP/IDLE instructions; + * 3) Access to unimplemented CSRs/IOCSRs. + */ +static int kvm_handle_gspr(struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + enum emulation_result er = EMULATE_DONE; + + er = kvm_trap_handle_gspr(vcpu); + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + vcpu->run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else if (er == EMULATE_DO_IOCSR) { + vcpu->run->exit_reason = KVM_EXIT_LOONGARCH_IOCSR; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + ret = RESUME_GUEST; + } + + return ret; +} + +int kvm_emu_mmio_read(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int op8, opcode, rd; + struct kvm_run *run = vcpu->run; + + run->mmio.phys_addr = vcpu->arch.badv; + vcpu->mmio_needed = 2; /* signed */ + op8 = (inst.word >> 24) & 0xff; + ret = EMULATE_DO_MMIO; + + switch (op8) { + case 0x24 ... 0x27: /* ldptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case ldptrw_op: + run->mmio.len = 4; + break; + case ldptrd_op: + run->mmio.len = 8; + break; + default: + break; + } + break; + case 0x28 ... 0x2e: /* ld.b/h/w/d, ld.bu/hu/wu process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + + switch (opcode) { + case ldb_op: + run->mmio.len = 1; + break; + case ldbu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 1; + break; + case ldh_op: + run->mmio.len = 2; + break; + case ldhu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 2; + break; + case ldw_op: + run->mmio.len = 4; + break; + case ldwu_op: + vcpu->mmio_needed = 1; /* unsigned */ + run->mmio.len = 4; + break; + case ldd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* ldx.b/h/w/d, ldx.bu/hu/wu process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case ldxb_op: + run->mmio.len = 1; + break; + case ldxbu_op: + run->mmio.len = 1; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxh_op: + run->mmio.len = 2; + break; + case ldxhu_op: + run->mmio.len = 2; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxw_op: + run->mmio.len = 4; + break; + case ldxwu_op: + run->mmio.len = 4; + vcpu->mmio_needed = 1; /* unsigned */ + break; + case ldxd_op: + run->mmio.len = 8; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + /* Set for kvm_complete_mmio_read() use */ + vcpu->arch.io_gpr = rd; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + } else { + kvm_err("Read not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + vcpu->mmio_needed = 0; + } + + return ret; +} + +int kvm_complete_mmio_read(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + enum emulation_result er = EMULATE_DONE; + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + + /* Update with new PC */ + update_pc(&vcpu->arch); + switch (run->mmio.len) { + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(s8 *)run->mmio.data; + else + *gpr = *(u8 *)run->mmio.data; + break; + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(s16 *)run->mmio.data; + else + *gpr = *(u16 *)run->mmio.data; + break; + case 4: + if (vcpu->mmio_needed == 2) + *gpr = *(s32 *)run->mmio.data; + else + *gpr = *(u32 *)run->mmio.data; + break; + case 8: + *gpr = *(s64 *)run->mmio.data; + break; + default: + kvm_err("Bad MMIO length: %d, addr is 0x%lx\n", + run->mmio.len, vcpu->arch.badv); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_emu_mmio_write(struct kvm_vcpu *vcpu, larch_inst inst) +{ + int ret; + unsigned int rd, op8, opcode; + unsigned long curr_pc, rd_val = 0; + struct kvm_run *run = vcpu->run; + void *data = run->mmio.data; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + update_pc(&vcpu->arch); + + op8 = (inst.word >> 24) & 0xff; + run->mmio.phys_addr = vcpu->arch.badv; + ret = EMULATE_DO_MMIO; + switch (op8) { + case 0x24 ... 0x27: /* stptr.w/d process */ + rd = inst.reg2i14_format.rd; + opcode = inst.reg2i14_format.opcode; + + switch (opcode) { + case stptrw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stptrd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x28 ... 0x2e: /* st.b/h/w/d process */ + rd = inst.reg2i12_format.rd; + opcode = inst.reg2i12_format.opcode; + rd_val = vcpu->arch.gprs[rd]; + + switch (opcode) { + case stb_op: + run->mmio.len = 1; + *(unsigned char *)data = rd_val; + break; + case sth_op: + run->mmio.len = 2; + *(unsigned short *)data = rd_val; + break; + case stw_op: + run->mmio.len = 4; + *(unsigned int *)data = rd_val; + break; + case std_op: + run->mmio.len = 8; + *(unsigned long *)data = rd_val; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + case 0x38: /* stx.b/h/w/d process */ + rd = inst.reg3_format.rd; + opcode = inst.reg3_format.opcode; + + switch (opcode) { + case stxb_op: + run->mmio.len = 1; + *(unsigned char *)data = vcpu->arch.gprs[rd]; + break; + case stxh_op: + run->mmio.len = 2; + *(unsigned short *)data = vcpu->arch.gprs[rd]; + break; + case stxw_op: + run->mmio.len = 4; + *(unsigned int *)data = vcpu->arch.gprs[rd]; + break; + case stxd_op: + run->mmio.len = 8; + *(unsigned long *)data = vcpu->arch.gprs[rd]; + break; + default: + ret = EMULATE_FAIL; + break; + } + break; + default: + ret = EMULATE_FAIL; + } + + if (ret == EMULATE_DO_MMIO) { + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + } else { + vcpu->arch.pc = curr_pc; + kvm_err("Write not supported Inst=0x%08x @%lx BadVaddr:%#lx\n", + inst.word, vcpu->arch.pc, vcpu->arch.badv); + kvm_arch_vcpu_dump_regs(vcpu); + /* Rollback PC if emulation was unsuccessful */ + } + + return ret; +} + +static int kvm_handle_rdwr_fault(struct kvm_vcpu *vcpu, bool write) +{ + int ret; + larch_inst inst; + enum emulation_result er = EMULATE_DONE; + struct kvm_run *run = vcpu->run; + unsigned long badv = vcpu->arch.badv; + + ret = kvm_handle_mm_fault(vcpu, badv, write); + if (ret) { + /* Treat as MMIO */ + inst.word = vcpu->arch.badi; + if (write) { + er = kvm_emu_mmio_write(vcpu, inst); + } else { + /* A code fetch fault doesn't count as an MMIO */ + if (kvm_is_ifetch_fault(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEF); + return RESUME_GUEST; + } + + er = kvm_emu_mmio_read(vcpu, inst); + } + } + + if (er == EMULATE_DONE) { + ret = RESUME_GUEST; + } else if (er == EMULATE_DO_MMIO) { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } else { + kvm_queue_exception(vcpu, EXCCODE_ADE, EXSUBCODE_ADEM); + ret = RESUME_GUEST; + } + + return ret; +} + +static int kvm_handle_read_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, false); +} + +static int kvm_handle_write_fault(struct kvm_vcpu *vcpu) +{ + return kvm_handle_rdwr_fault(vcpu, true); +} + +/** + * kvm_handle_fpu_disabled() - Guest used fpu however it is disabled at host + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use fpu which hasn't been allowed + * by the root context. + */ +static int kvm_handle_fpu_disabled(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (!kvm_guest_has_fpu(&vcpu->arch)) { + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + return RESUME_GUEST; + } + + /* + * If guest FPU not present, the FPU operation should have been + * treated as a reserved instruction! + * If FPU already in use, we shouldn't get this at all. + */ + if (WARN_ON(vcpu->arch.aux_inuse & KVM_LARCH_FPU)) { + kvm_err("%s internal error\n", __func__); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + return RESUME_HOST; + } + + kvm_own_fpu(vcpu); + + return RESUME_GUEST; +} + +/* + * kvm_handle_lsx_disabled() - Guest used LSX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LSX when it is disabled in the root + * context. + */ +static int kvm_handle_lsx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lsx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +/* + * kvm_handle_lasx_disabled() - Guest used LASX while disabled in root. + * @vcpu: Virtual CPU context. + * + * Handle when the guest attempts to use LASX when it is disabled in the root + * context. + */ +static int kvm_handle_lasx_disabled(struct kvm_vcpu *vcpu) +{ + if (kvm_own_lasx(vcpu)) + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static int kvm_pv_send_ipi(struct kvm_vcpu *vcpu) +{ + unsigned long ipi_bitmap; + unsigned int min, cpu, i; + struct kvm_vcpu *dest; + + min = vcpu->arch.gprs[LOONGARCH_GPR_A3]; + for (i = 0; i < 2; i++, min += BITS_PER_LONG) { + ipi_bitmap = vcpu->arch.gprs[LOONGARCH_GPR_A1 + i]; + if (!ipi_bitmap) + continue; + + cpu = find_first_bit((void *)&ipi_bitmap, BITS_PER_LONG); + while (cpu < BITS_PER_LONG) { + dest = kvm_get_vcpu_by_cpuid(vcpu->kvm, cpu + min); + cpu = find_next_bit((void *)&ipi_bitmap, BITS_PER_LONG, + cpu + 1); + if (!dest) + continue; + + /* + * Send SWI0 to dest vcpu to emulate IPI interrupt + */ + kvm_queue_irq(dest, INT_SWI0); + kvm_vcpu_kick(dest); + } + } + + return 0; +} + +static int kvm_save_notify(struct kvm_vcpu *vcpu) +{ + unsigned long id, data; + + id = vcpu->arch.gprs[LOONGARCH_GPR_A1]; + data = vcpu->arch.gprs[LOONGARCH_GPR_A2]; + switch (id) { + case KVM_FEATURE_STEAL_TIME: + vcpu->arch.st.guest_addr = data; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + break; + default: + break; + }; + + return 0; +}; + +/* + * hypercall emulation always return to guest, Caller should check retval. + */ +static void kvm_handle_pv_service(struct kvm_vcpu *vcpu) +{ + unsigned long func = vcpu->arch.gprs[LOONGARCH_GPR_A0]; + long ret; + + switch (func) { + case KVM_HCALL_FUNC_PV_IPI: + kvm_pv_send_ipi(vcpu); + ret = KVM_HCALL_STATUS_SUCCESS; + break; + case KVM_HCALL_FUNC_NOTIFY: + ret = kvm_save_notify(vcpu); + break; + default: + ret = KVM_HCALL_INVALID_CODE; + break; + }; + + vcpu->arch.gprs[LOONGARCH_GPR_A0] = ret; +} + +static int kvm_handle_hypercall(struct kvm_vcpu *vcpu) +{ + larch_inst inst; + unsigned int code; + int ret; + + inst.word = vcpu->arch.badi; + code = inst.reg0i15_format.immediate; + ret = RESUME_GUEST; + + switch (code) { + case KVM_HCALL_PV_SERVICE: + vcpu->stat.hypercall_exits++; + kvm_handle_pv_service(vcpu); + break; + case KVM_HCALL_SWDBG: + /* KVM_HC_SWDBG only in effective when SW_BP is enabled */ + if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + ret = RESUME_HOST; + } else + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; + default: + /* Treat it as noop intruction, only set return value */ + vcpu->arch.gprs[LOONGARCH_GPR_A0] = KVM_HCALL_INVALID_CODE; + break; + } + + if (ret == RESUME_GUEST) + update_pc(&vcpu->arch); + + return ret; +} + +/* + * LoongArch KVM callback handling for unimplemented guest exiting + */ +static int kvm_fault_ni(struct kvm_vcpu *vcpu) +{ + unsigned int ecode, inst; + unsigned long estat, badv; + + /* Fetch the instruction */ + inst = vcpu->arch.badi; + badv = vcpu->arch.badv; + estat = vcpu->arch.host_estat; + ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + kvm_err("ECode: %d PC=%#lx Inst=0x%08x BadVaddr=%#lx ESTAT=%#lx\n", + ecode, vcpu->arch.pc, inst, badv, read_gcsr_estat()); + kvm_arch_vcpu_dump_regs(vcpu); + kvm_queue_exception(vcpu, EXCCODE_INE, 0); + + return RESUME_GUEST; +} + +static exit_handle_fn kvm_fault_tables[EXCCODE_INT_START] = { + [0 ... EXCCODE_INT_START - 1] = kvm_fault_ni, + [EXCCODE_TLBI] = kvm_handle_read_fault, + [EXCCODE_TLBL] = kvm_handle_read_fault, + [EXCCODE_TLBS] = kvm_handle_write_fault, + [EXCCODE_TLBM] = kvm_handle_write_fault, + [EXCCODE_FPDIS] = kvm_handle_fpu_disabled, + [EXCCODE_LSXDIS] = kvm_handle_lsx_disabled, + [EXCCODE_LASXDIS] = kvm_handle_lasx_disabled, + [EXCCODE_GSPR] = kvm_handle_gspr, + [EXCCODE_HVC] = kvm_handle_hypercall, +}; + +int kvm_handle_fault(struct kvm_vcpu *vcpu, int fault) +{ + return kvm_fault_tables[fault](vcpu); +} diff --git a/arch/loongarch/kvm/interrupt.c b/arch/loongarch/kvm/interrupt.c new file mode 100644 index 0000000000000000000000000000000000000000..4c3f22de4b40a321811947f2ebff47a9e7b45ea6 --- /dev/null +++ b/arch/loongarch/kvm/interrupt.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include + +static unsigned int priority_to_irq[EXCCODE_INT_NUM] = { + [INT_TI] = CPU_TIMER, + [INT_IPI] = CPU_IPI, + [INT_SWI0] = CPU_SIP0, + [INT_SWI1] = CPU_SIP1, + [INT_HWI0] = CPU_IP0, + [INT_HWI1] = CPU_IP1, + [INT_HWI2] = CPU_IP2, + [INT_HWI3] = CPU_IP3, + [INT_HWI4] = CPU_IP4, + [INT_HWI5] = CPU_IP5, + [INT_HWI6] = CPU_IP6, + [INT_HWI7] = CPU_IP7, +}; + +static int kvm_irq_deliver(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_pending); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + set_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + set_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +static int kvm_irq_clear(struct kvm_vcpu *vcpu, unsigned int priority) +{ + unsigned int irq = 0; + + clear_bit(priority, &vcpu->arch.irq_clear); + if (priority < EXCCODE_INT_NUM) + irq = priority_to_irq[priority]; + + switch (priority) { + case INT_TI: + case INT_IPI: + case INT_SWI0: + case INT_SWI1: + clear_gcsr_estat(irq); + break; + + case INT_HWI0 ... INT_HWI7: + clear_csr_gintc(irq); + break; + + default: + break; + } + + return 1; +} + +void kvm_deliver_intr(struct kvm_vcpu *vcpu) +{ + unsigned int priority; + unsigned long *pending = &vcpu->arch.irq_pending; + unsigned long *pending_clr = &vcpu->arch.irq_clear; + + if (!(*pending) && !(*pending_clr)) + return; + + if (*pending_clr) { + priority = __ffs(*pending_clr); + while (priority <= INT_IPI) { + kvm_irq_clear(vcpu, priority); + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + } + + if (*pending) { + priority = __ffs(*pending); + while (priority <= INT_IPI) { + kvm_irq_deliver(vcpu, priority); + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + } +} + +int kvm_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(INT_TI, &vcpu->arch.irq_pending); +} + +/* + * Only support illegal instruction or illegal Address Error exception, + * Other exceptions are injected by hardware in kvm mode + */ +static void _kvm_deliver_exception(struct kvm_vcpu *vcpu, + unsigned int code, unsigned int subcode) +{ + unsigned long val, vec_size; + + /* + * BADV is added for EXCCODE_ADE exception + * Use PC register (GVA address) if it is instruction exeception + * Else use BADV from host side (GPA address) for data exeception + */ + if (code == EXCCODE_ADE) { + if (subcode == EXSUBCODE_ADEF) + val = vcpu->arch.pc; + else + val = vcpu->arch.badv; + kvm_write_hw_gcsr(LOONGARCH_CSR_BADV, val); + } + + /* Set exception instruction */ + kvm_write_hw_gcsr(LOONGARCH_CSR_BADI, vcpu->arch.badi); + + /* + * Save CRMD in PRMD + * Set IRQ disabled and PLV0 with CRMD + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD); + kvm_write_hw_gcsr(LOONGARCH_CSR_PRMD, val); + val = val & ~(CSR_CRMD_PLV | CSR_CRMD_IE); + kvm_write_hw_gcsr(LOONGARCH_CSR_CRMD, val); + + /* Set exception PC address */ + kvm_write_hw_gcsr(LOONGARCH_CSR_ERA, vcpu->arch.pc); + + /* + * Set exception code + * Exception and interrupt can be inject at the same time + * Hardware will handle exception first and then extern interrupt + * Exception code is Ecode in ESTAT[16:21] + * Interrupt code in ESTAT[0:12] + */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT); + val = (val & ~CSR_ESTAT_EXC) | code; + kvm_write_hw_gcsr(LOONGARCH_CSR_ESTAT, val); + + /* Calculate expcetion entry address */ + val = kvm_read_hw_gcsr(LOONGARCH_CSR_ECFG); + vec_size = (val & CSR_ECFG_VS) >> CSR_ECFG_VS_SHIFT; + if (vec_size) + vec_size = (1 << vec_size) * 4; + val = kvm_read_hw_gcsr(LOONGARCH_CSR_EENTRY); + vcpu->arch.pc = val + code * vec_size; +} + +void kvm_deliver_exception(struct kvm_vcpu *vcpu) +{ + unsigned int code; + unsigned long *pending = &vcpu->arch.exception_pending; + + if (*pending) { + code = __ffs(*pending); + _kvm_deliver_exception(vcpu, code, vcpu->arch.esubcode); + *pending = 0; + vcpu->arch.esubcode = 0; + } +} diff --git a/arch/loongarch/kvm/main.c b/arch/loongarch/kvm/main.c new file mode 100644 index 0000000000000000000000000000000000000000..86a2f2d0cb27e3d213012d6987abde4ab1dae60e --- /dev/null +++ b/arch/loongarch/kvm/main.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include "trace.h" + +unsigned long vpid_mask; +struct kvm_world_switch *kvm_loongarch_ops; +static int gcsr_flag[CSR_MAX_NUMS]; +static struct kvm_context __percpu *vmcs; + +int get_gcsr_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + return gcsr_flag[csr]; + + return INVALID_GCSR; +} + +static inline void set_gcsr_sw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= SW_GCSR; +} + +static inline void set_gcsr_hw_flag(int csr) +{ + if (csr < CSR_MAX_NUMS) + gcsr_flag[csr] |= HW_GCSR; +} + +/* + * The default value of gcsr_flag[CSR] is 0, and we use this + * function to set the flag to 1 (SW_GCSR) or 2 (HW_GCSR) if the + * gcsr is software or hardware. It will be used by get/set_gcsr, + * if gcsr_flag is HW we should use gcsrrd/gcsrwr to access it, + * else use software csr to emulate it. + */ +static void kvm_init_gcsr_flag(void) +{ + set_gcsr_hw_flag(LOONGARCH_CSR_CRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_PRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_EUEN); + set_gcsr_hw_flag(LOONGARCH_CSR_MISC); + set_gcsr_hw_flag(LOONGARCH_CSR_ECFG); + set_gcsr_hw_flag(LOONGARCH_CSR_ESTAT); + set_gcsr_hw_flag(LOONGARCH_CSR_ERA); + set_gcsr_hw_flag(LOONGARCH_CSR_BADV); + set_gcsr_hw_flag(LOONGARCH_CSR_BADI); + set_gcsr_hw_flag(LOONGARCH_CSR_EENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBIDX); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBEHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_ASID); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDL); + set_gcsr_hw_flag(LOONGARCH_CSR_PGDH); + set_gcsr_hw_flag(LOONGARCH_CSR_PGD); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL0); + set_gcsr_hw_flag(LOONGARCH_CSR_PWCTL1); + set_gcsr_hw_flag(LOONGARCH_CSR_STLBPGSIZE); + set_gcsr_hw_flag(LOONGARCH_CSR_RVACFG); + set_gcsr_hw_flag(LOONGARCH_CSR_CPUID); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG1); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG2); + set_gcsr_hw_flag(LOONGARCH_CSR_PRCFG3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS0); + set_gcsr_hw_flag(LOONGARCH_CSR_KS1); + set_gcsr_hw_flag(LOONGARCH_CSR_KS2); + set_gcsr_hw_flag(LOONGARCH_CSR_KS3); + set_gcsr_hw_flag(LOONGARCH_CSR_KS4); + set_gcsr_hw_flag(LOONGARCH_CSR_KS5); + set_gcsr_hw_flag(LOONGARCH_CSR_KS6); + set_gcsr_hw_flag(LOONGARCH_CSR_KS7); + set_gcsr_hw_flag(LOONGARCH_CSR_TMID); + set_gcsr_hw_flag(LOONGARCH_CSR_TCFG); + set_gcsr_hw_flag(LOONGARCH_CSR_TVAL); + set_gcsr_hw_flag(LOONGARCH_CSR_TINTCLR); + set_gcsr_hw_flag(LOONGARCH_CSR_CNTC); + set_gcsr_hw_flag(LOONGARCH_CSR_LLBCTL); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRENTRY); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRBADV); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRERA); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRSAVE); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO0); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRELO1); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBREHI); + set_gcsr_hw_flag(LOONGARCH_CSR_TLBRPRMD); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN0); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN1); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN2); + set_gcsr_hw_flag(LOONGARCH_CSR_DMWIN3); + + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL1); + set_gcsr_sw_flag(LOONGARCH_CSR_IMPCTL2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRCTL); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO1); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRINFO2); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRENTRY); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRERA); + set_gcsr_sw_flag(LOONGARCH_CSR_MERRSAVE); + set_gcsr_sw_flag(LOONGARCH_CSR_CTAG); + set_gcsr_sw_flag(LOONGARCH_CSR_DEBUG); + set_gcsr_sw_flag(LOONGARCH_CSR_DERA); + set_gcsr_sw_flag(LOONGARCH_CSR_DESAVE); + + set_gcsr_sw_flag(LOONGARCH_CSR_FWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_FWPS); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPC); + set_gcsr_sw_flag(LOONGARCH_CSR_MWPS); + + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_DB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB0ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB1ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB2ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB3ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB4ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB5ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB6ASID); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ADDR); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7MASK); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7CTRL); + set_gcsr_sw_flag(LOONGARCH_CSR_IB7ASID); + + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR0); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR1); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR2); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCTRL3); + set_gcsr_sw_flag(LOONGARCH_CSR_PERFCNTR3); +} + +static void kvm_update_vpid(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long vpid; + struct kvm_context *context; + + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + vpid = context->vpid_cache + 1; + if (!(vpid & vpid_mask)) { + /* finish round of vpid loop */ + if (unlikely(!vpid)) + vpid = vpid_mask + 1; + + ++vpid; /* vpid 0 reserved for root */ + + /* start new vpid cycle */ + kvm_flush_tlb_all(); + } + + context->vpid_cache = vpid; + vcpu->arch.vpid = vpid; +} + +void kvm_check_vpid(struct kvm_vcpu *vcpu) +{ + int cpu; + bool migrated; + unsigned long ver, old, vpid; + struct kvm_context *context; + + cpu = smp_processor_id(); + /* + * Are we entering guest context on a different CPU to last time? + * If so, the vCPU's guest TLB state on this CPU may be stale. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + migrated = (vcpu->cpu != cpu); + + /* + * Check if our vpid is of an older version + * + * We also discard the stored vpid if we've executed on + * another CPU, as the guest mappings may have changed without + * hypervisor knowledge. + */ + ver = vcpu->arch.vpid & ~vpid_mask; + old = context->vpid_cache & ~vpid_mask; + if (migrated || (ver != old)) { + kvm_update_vpid(vcpu, cpu); + trace_kvm_vpid_change(vcpu, vcpu->arch.vpid); + vcpu->cpu = cpu; + } + + /* Restore GSTAT(0x50).vpid */ + vpid = (vcpu->arch.vpid & vpid_mask) << CSR_GSTAT_GID_SHIFT; + change_csr_gstat(vpid_mask << CSR_GSTAT_GID_SHIFT, vpid); +} + +void kvm_init_vmcs(struct kvm *kvm) +{ + kvm->arch.vmcs = vmcs; +} + +long kvm_arch_dev_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_hardware_enable(void) +{ + unsigned long env, gcfg = 0; + + env = read_csr_gcfg(); + + /* First init gcfg, gstat, gintc, gtlbc. All guest use the same config */ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* + * Enable virtualization features granting guest direct control of + * certain features: + * GCI=2: Trap on init or unimplement cache instruction. + * TORU=0: Trap on Root Unimplement. + * CACTRL=1: Root control cache. + * TOP=0: Trap on Previlege. + * TOE=0: Trap on Exception. + * TIT=0: Trap on Timer. + */ + if (env & CSR_GCFG_GCIP_ALL) + gcfg |= CSR_GCFG_GCI_SECURE; + if (env & CSR_GCFG_MATC_ROOT) + gcfg |= CSR_GCFG_MATC_ROOT; + + write_csr_gcfg(gcfg); + + kvm_flush_tlb_all(); + + /* Enable using TGID */ + set_csr_gtlbc(CSR_GTLBC_USETGID); + kvm_debug("GCFG:%lx GSTAT:%lx GINTC:%lx GTLBC:%lx", + read_csr_gcfg(), read_csr_gstat(), read_csr_gintc(), read_csr_gtlbc()); + + return 0; +} + +void kvm_arch_hardware_disable(void) +{ + write_csr_gcfg(0); + write_csr_gstat(0); + write_csr_gintc(0); + clear_csr_gtlbc(CSR_GTLBC_USETGID | CSR_GTLBC_TOTI); + + /* Flush any remaining guest TLB entries */ + kvm_flush_tlb_all(); +} + +static int kvm_loongarch_env_init(void) +{ + int cpu, order; + void *addr; + struct kvm_context *context; + + vmcs = alloc_percpu(struct kvm_context); + if (!vmcs) { + pr_err("kvm: failed to allocate percpu kvm_context\n"); + return -ENOMEM; + } + + kvm_loongarch_ops = kzalloc(sizeof(*kvm_loongarch_ops), GFP_KERNEL); + if (!kvm_loongarch_ops) { + free_percpu(vmcs); + vmcs = NULL; + return -ENOMEM; + } + + /* + * PGD register is shared between root kernel and kvm hypervisor. + * So world switch entry should be in DMW area rather than TLB area + * to avoid page fault reenter. + * + * In future if hardware pagetable walking is supported, we won't + * need to copy world switch code to DMW area. + */ + order = get_order(kvm_exception_size + kvm_enter_guest_size); + addr = (void *)__get_free_pages(GFP_KERNEL, order); + if (!addr) { + free_percpu(vmcs); + vmcs = NULL; + kfree(kvm_loongarch_ops); + kvm_loongarch_ops = NULL; + return -ENOMEM; + } + + memcpy(addr, kvm_exc_entry, kvm_exception_size); + memcpy(addr + kvm_exception_size, kvm_enter_guest, kvm_enter_guest_size); + flush_icache_range((unsigned long)addr, (unsigned long)addr + kvm_exception_size + kvm_enter_guest_size); + kvm_loongarch_ops->exc_entry = addr; + kvm_loongarch_ops->enter_guest = addr + kvm_exception_size; + kvm_loongarch_ops->page_order = order; + + vpid_mask = read_csr_gstat(); + vpid_mask = (vpid_mask & CSR_GSTAT_GIDBIT) >> CSR_GSTAT_GIDBIT_SHIFT; + if (vpid_mask) + vpid_mask = GENMASK(vpid_mask - 1, 0); + + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vmcs, cpu); + context->vpid_cache = vpid_mask + 1; + context->last_vcpu = NULL; + } + + kvm_init_gcsr_flag(); + + return 0; +} + +static void kvm_loongarch_env_exit(void) +{ + unsigned long addr; + + if (vmcs) + free_percpu(vmcs); + + if (kvm_loongarch_ops) { + if (kvm_loongarch_ops->exc_entry) { + addr = (unsigned long)kvm_loongarch_ops->exc_entry; + free_pages(addr, kvm_loongarch_ops->page_order); + } + kfree(kvm_loongarch_ops); + } +} + +static int kvm_loongarch_init(void) +{ + int r; + + if (!cpu_has_lvz) { + kvm_info("Hardware virtualization not available\n"); + return -ENODEV; + } + r = kvm_loongarch_env_init(); + if (r) + return r; + + return kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); +} + +static void kvm_loongarch_exit(void) +{ + kvm_exit(); + kvm_loongarch_env_exit(); +} + +module_init(kvm_loongarch_init); +module_exit(kvm_loongarch_exit); + +#ifdef MODULE +static const struct cpu_feature kvm_feature[] = { + { .feature = cpu_feature(LOONGARCH_LVZ) }, + {}, +}; +MODULE_DEVICE_TABLE(cpu, kvm_feature); +#endif diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..915f175278931f26164c1b970663542cf0661a12 --- /dev/null +++ b/arch/loongarch/kvm/mmu.c @@ -0,0 +1,956 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static inline bool kvm_hugepage_capable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_CAPABLE; +} + +static inline bool kvm_hugepage_incapable(struct kvm_memory_slot *slot) +{ + return slot->arch.flags & KVM_MEM_HUGEPAGE_INCAPABLE; +} + +static inline void kvm_ptw_prepare(struct kvm *kvm, kvm_ptw_ctx *ctx) +{ + ctx->level = kvm->arch.root_level; + /* pte table */ + ctx->invalid_ptes = kvm->arch.invalid_ptes; + ctx->pte_shifts = kvm->arch.pte_shifts; + ctx->pgtable_shift = ctx->pte_shifts[ctx->level]; + ctx->invalid_entry = ctx->invalid_ptes[ctx->level]; + ctx->opaque = kvm; +} + +/* + * Mark a range of guest physical address space old (all accesses fault) in the + * VM's GPA page table to allow detection of commonly used pages. + */ +static int kvm_mkold_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + if (kvm_pte_young(*pte)) { + *pte = kvm_pte_mkold(*pte); + return 1; + } + + return 0; +} + +/* + * Mark a range of guest physical address space clean (writes fault) in the VM's + * GPA page table to allow dirty page tracking. + */ +static int kvm_mkclean_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + gfn_t offset; + kvm_pte_t val; + + val = *pte; + /* + * For kvm_arch_mmu_enable_log_dirty_pt_masked with mask, start and end + * may cross hugepage, for first huge page parameter addr is equal to + * start, however for the second huge page addr is base address of + * this huge page, rather than start or end address + */ + if ((ctx->flag & _KVM_HAS_PGMASK) && !kvm_pte_huge(val)) { + offset = (addr >> PAGE_SHIFT) - ctx->gfn; + if (!(BIT(offset) & ctx->mask)) + return 0; + } + + /* + * Need not split huge page now, just set write-proect pte bit + * Split huge page until next write fault + */ + if (kvm_pte_dirty(val)) { + *pte = kvm_pte_mkclean(val); + return 1; + } + + return 0; +} + +/* + * Clear pte entry + */ +static int kvm_flush_pte(kvm_pte_t *pte, phys_addr_t addr, kvm_ptw_ctx *ctx) +{ + struct kvm *kvm; + + kvm = ctx->opaque; + if (ctx->level) + kvm->stat.hugepages--; + else + kvm->stat.pages--; + + *pte = ctx->invalid_entry; + + return 1; +} + +/* + * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. + * + * Allocate a blank KVM GPA page directory (PGD) for representing guest physical + * to host physical page mappings. + * + * Returns: Pointer to new KVM GPA page directory. + * NULL on allocation failure. + */ +kvm_pte_t *kvm_pgd_alloc(void) +{ + kvm_pte_t *pgd; + + pgd = (kvm_pte_t *)__get_free_pages(GFP_KERNEL, 0); + if (pgd) + pgd_init((void *)pgd); + + return pgd; +} + +static void _kvm_pte_init(void *addr, unsigned long val) +{ + unsigned long *p, *end; + + p = (unsigned long *)addr; + end = p + PTRS_PER_PTE; + do { + p[0] = val; + p[1] = val; + p[2] = val; + p[3] = val; + p[4] = val; + p += 8; + p[-3] = val; + p[-2] = val; + p[-1] = val; + } while (p != end); +} + +/* + * Caller must hold kvm->mm_lock + * + * Walk the page tables of kvm to find the PTE corresponding to the + * address @addr. If page tables don't exist for @addr, they will be created + * from the MMU cache if @cache is not NULL. + */ +static kvm_pte_t *kvm_populate_gpa(struct kvm *kvm, + struct kvm_mmu_memory_cache *cache, + unsigned long addr, int level) +{ + kvm_ptw_ctx ctx; + kvm_pte_t *entry, *child; + + kvm_ptw_prepare(kvm, &ctx); + child = kvm->arch.pgd; + while (ctx.level > level) { + entry = kvm_pgtable_offset(&ctx, child, addr); + if (kvm_pte_none(&ctx, entry)) { + if (!cache) + return NULL; + + child = kvm_mmu_memory_cache_alloc(cache); + _kvm_pte_init(child, ctx.invalid_ptes[ctx.level - 1]); + kvm_set_pte(entry, __pa(child)); + } else if (kvm_pte_huge(*entry)) { + return entry; + } else + child = (kvm_pte_t *)__va(PHYSADDR(*entry)); + kvm_ptw_enter(&ctx); + } + + entry = kvm_pgtable_offset(&ctx, child, addr); + + return entry; +} + +/* + * Page walker for VM shadow mmu at last level + * The last level is small pte page or huge pmd page + */ +static int kvm_ptw_leaf(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = addr + (0x1UL << ctx->pgtable_shift); + if (!kvm_pte_present(ctx, entry)) + continue; + + ret |= ctx->ops(entry, addr, ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page table dir level + */ +static int kvm_ptw_dir(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next, start, size; + struct list_head *list; + kvm_pte_t *entry, *child; + + ret = 0; + start = addr; + child = (kvm_pte_t *)__va(PHYSADDR(*dir)); + entry = kvm_pgtable_offset(ctx, child, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + if (kvm_pte_huge(*entry)) { + ret |= ctx->ops(entry, addr, ctx); + continue; + } + + kvm_ptw_enter(ctx); + if (ctx->level == 0) + ret |= kvm_ptw_leaf(entry, addr, next, ctx); + else + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + if (kvm_need_flush(ctx)) { + size = 0x1UL << (ctx->pgtable_shift + PAGE_SHIFT - 3); + if (start + size == end) { + list = (struct list_head *)child; + list_add_tail(list, &ctx->list); + *dir = ctx->invalid_ptes[ctx->level + 1]; + } + } + + return ret; +} + +/* + * Page walker for VM shadow mmu at page root table + */ +static int kvm_ptw_top(kvm_pte_t *dir, phys_addr_t addr, phys_addr_t end, kvm_ptw_ctx *ctx) +{ + int ret; + phys_addr_t next; + kvm_pte_t *entry; + + ret = 0; + entry = kvm_pgtable_offset(ctx, dir, addr); + do { + next = kvm_pgtable_addr_end(ctx, addr, end); + if (!kvm_pte_present(ctx, entry)) + continue; + + kvm_ptw_enter(ctx); + ret |= kvm_ptw_dir(entry, addr, next, ctx); + kvm_ptw_exit(ctx); + } while (entry++, addr = next, addr < end); + + return ret; +} + +/* + * kvm_flush_range() - Flush a range of guest physical addresses. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * @lock: Whether to hold mmu_lock or not + * + * Flushes a range of GPA mappings from the GPA page tables. + */ +static void kvm_flush_range(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn, int lock) +{ + int ret; + kvm_ptw_ctx ctx; + struct list_head *pos, *temp; + + ctx.ops = kvm_flush_pte; + ctx.flag = _KVM_FLUSH_PGTABLE; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + if (lock) { + spin_lock(&kvm->mmu_lock); + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + spin_unlock(&kvm->mmu_lock); + } else + ret = kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, + end_gfn << PAGE_SHIFT, &ctx); + + /* Flush vpid for each vCPU individually */ + if (ret) + kvm_flush_remote_tlbs(kvm); + + /* + * free pte table page after mmu_lock + * the pte table page is linked together with ctx.list + */ + list_for_each_safe(pos, temp, &ctx.list) { + list_del(pos); + free_page((unsigned long)pos); + } +} + +/* + * kvm_mkclean_gpa_pt() - Make a range of guest physical addresses clean. + * @kvm: KVM pointer. + * @start_gfn: Guest frame number of first page in GPA range to flush. + * @end_gfn: Guest frame number of last page in GPA range to flush. + * + * Make a range of GPA mappings clean so that guest writes will fault and + * trigger dirty page logging. + * + * The caller must hold the @kvm->mmu_lock spinlock. + * + * Returns: Whether any GPA mappings were modified, which would require + * derived mappings (GVA page tables & TLB enties) to be + * invalidated. + */ +static int kvm_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) +{ + kvm_ptw_ctx ctx; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = 0; + kvm_ptw_prepare(kvm, &ctx); + return kvm_ptw_top(kvm->arch.pgd, start_gfn << PAGE_SHIFT, end_gfn << PAGE_SHIFT, &ctx); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire @kvm->mmu_lock. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, unsigned long mask) +{ + kvm_ptw_ctx ctx; + gfn_t base_gfn = slot->base_gfn + gfn_offset; + gfn_t start = base_gfn + __ffs(mask); + gfn_t end = base_gfn + __fls(mask) + 1; + + ctx.ops = kvm_mkclean_pte; + ctx.flag = _KVM_HAS_PGMASK; + ctx.mask = mask; + ctx.gfn = base_gfn; + kvm_ptw_prepare(kvm, &ctx); + + kvm_ptw_top(kvm->arch.pgd, start << PAGE_SHIFT, end << PAGE_SHIFT, &ctx); +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, enum kvm_mr_change change) +{ + gpa_t gpa_start; + hva_t hva_start; + size_t size, gpa_offset, hva_offset; + + if ((change != KVM_MR_MOVE) && (change != KVM_MR_CREATE)) + return 0; + /* + * Prevent userspace from creating a memory region outside of the + * VM GPA address space + */ + if ((new->base_gfn + new->npages) > (kvm->arch.gpa_size >> PAGE_SHIFT)) + return -ENOMEM; + + new->arch.flags = 0; + size = new->npages * PAGE_SIZE; + gpa_start = new->base_gfn << PAGE_SHIFT; + hva_start = new->userspace_addr; + if (IS_ALIGNED(size, PMD_SIZE) && IS_ALIGNED(gpa_start, PMD_SIZE) + && IS_ALIGNED(hva_start, PMD_SIZE)) + new->arch.flags |= KVM_MEM_HUGEPAGE_CAPABLE; + else { + /* + * Pages belonging to memslots that don't have the same + * alignment within a PMD for userspace and GPA cannot be + * mapped with PMD entries, because we'll end up mapping + * the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SIZE: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this + * incorrect mapping: + * d -> f + * e -> g + * f -> h + */ + gpa_offset = gpa_start & (PMD_SIZE - 1); + hva_offset = hva_start & (PMD_SIZE - 1); + if (gpa_offset != hva_offset) { + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } else { + if (gpa_offset == 0) + gpa_offset = PMD_SIZE; + if ((size + gpa_offset) < (PMD_SIZE * 2)) + new->arch.flags |= KVM_MEM_HUGEPAGE_INCAPABLE; + } + } + + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + int needs_flush; + + /* + * If dirty page logging is enabled, write protect all pages in the slot + * ready for dirty logging. + * + * There is no need to do this in any of the following cases: + * CREATE: No dirty mappings will already exist. + * MOVE/DELETE: The old mappings will already have been cleaned up by + * kvm_arch_flush_shadow_memslot() + */ + if (change == KVM_MR_FLAGS_ONLY && + (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + spin_lock(&kvm->mmu_lock); + /* Write protect GPA page table entries */ + needs_flush = kvm_mkclean_gpa_pt(kvm, new->base_gfn, + new->base_gfn + new->npages); + spin_unlock(&kvm->mmu_lock); + if (needs_flush) + kvm_flush_remote_tlbs(kvm); + } +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_flush_range(kvm, 0, kvm->arch.gpa_size >> PAGE_SHIFT, 0); +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) +{ + /* + * The slot has been made invalid (ready for moving or deletion), so we + * need to ensure that it can no longer be accessed by any guest vCPUs. + */ + kvm_flush_range(kvm, slot->base_gfn, slot->base_gfn + slot->npages, 1); +} + +bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_flush_pte; + kvm_ptw_prepare(kvm, &ctx); + INIT_LIST_HEAD(&ctx.list); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + unsigned long prot_bits; + kvm_pte_t *ptep; + kvm_pfn_t pfn = pte_pfn(range->arg.pte); + gpa_t gpa = range->start << PAGE_SHIFT; + + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep) + return false; + + /* Replacing an absent or old page doesn't need flushes */ + if (!kvm_pte_present(NULL, ptep) || !kvm_pte_young(*ptep)) { + kvm_set_pte(ptep, 0); + return false; + } + + /* Fill new pte if write protected or page migrated */ + prot_bits = _PAGE_PRESENT | __READABLE; + prot_bits |= _CACHE_MASK & pte_val(range->arg.pte); + + /* + * Set _PAGE_WRITE or _PAGE_DIRTY iff old and new pte both support + * _PAGE_WRITE for map_page_fast if next page write fault + * _PAGE_DIRTY since gpa has already recorded as dirty page + */ + prot_bits |= __WRITEABLE & *ptep & pte_val(range->arg.pte); + kvm_set_pte(ptep, kvm_pfn_pte(pfn, __pgprot(prot_bits))); + + return true; +} + +bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_ptw_ctx ctx; + + ctx.flag = 0; + ctx.ops = kvm_mkold_pte; + kvm_ptw_prepare(kvm, &ctx); + + return kvm_ptw_top(kvm->arch.pgd, range->start << PAGE_SHIFT, + range->end << PAGE_SHIFT, &ctx); +} + +bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) +{ + gpa_t gpa = range->start << PAGE_SHIFT; + kvm_pte_t *ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + + if (ptep && kvm_pte_present(NULL, ptep) && kvm_pte_young(*ptep)) + return true; + + return false; +} + +/* + * kvm_map_page_fast() - Fast path GPA fault handler. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Perform fast path GPA fault handling, doing all that can be done without + * calling into KVM. This handles marking old pages young (for idle page + * tracking), and dirtying of clean pages (for dirty page logging). + * + * Returns: 0 on success, in which case we can update derived mappings and + * resume guest execution. + * -EFAULT on failure due to absent GPA mapping or write to + * read-only page, in which case KVM must be consulted. + */ +static int kvm_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret = 0; + kvm_pfn_t pfn = 0; + kvm_pte_t *ptep, changed, new; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + + spin_lock(&kvm->mmu_lock); + + /* Fast path - just check GPA page table for an existing entry */ + ptep = kvm_populate_gpa(kvm, NULL, gpa, 0); + if (!ptep || !kvm_pte_present(NULL, ptep)) { + ret = -EFAULT; + goto out; + } + + /* Track access to pages marked old */ + new = *ptep; + if (!kvm_pte_young(new)) + new = kvm_pte_mkyoung(new); + /* call kvm_set_pfn_accessed() after unlock */ + + if (write && !kvm_pte_dirty(new)) { + if (!kvm_pte_write(new)) { + ret = -EFAULT; + goto out; + } + + if (kvm_pte_huge(new)) { + /* + * Do not set write permission when dirty logging is + * enabled for HugePages + */ + slot = gfn_to_memslot(kvm, gfn); + if (kvm_slot_dirty_track_enabled(slot)) { + ret = -EFAULT; + goto out; + } + } + + /* Track dirtying of writeable pages */ + new = kvm_pte_mkdirty(new); + } + + changed = new ^ (*ptep); + if (changed) { + kvm_set_pte(ptep, new); + pfn = kvm_pte_pfn(new); + } + spin_unlock(&kvm->mmu_lock); + + /* + * Fixme: pfn may be freed after mmu_lock + * kvm_try_get_pfn(pfn)/kvm_release_pfn pair to prevent this? + */ + if (kvm_pte_young(changed)) + kvm_set_pfn_accessed(pfn); + + if (kvm_pte_dirty(changed)) { + mark_page_dirty(kvm, gfn); + kvm_set_pfn_dirty(pfn); + } + return ret; +out: + spin_unlock(&kvm->mmu_lock); + return ret; +} + +static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, bool write) +{ + hva_t start, end; + + /* Disable dirty logging on HugePages */ + if (kvm_slot_dirty_track_enabled(memslot) && write) + return false; + + if (kvm_hugepage_capable(memslot)) + return true; + + if (kvm_hugepage_incapable(memslot)) + return false; + + start = memslot->userspace_addr; + end = start + memslot->npages * PAGE_SIZE; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva >= ALIGN(start, PMD_SIZE)) && (hva < ALIGN_DOWN(end, PMD_SIZE)); +} + +/* + * Lookup the mapping level for @gfn in the current mm. + * + * WARNING! Use of host_pfn_mapping_level() requires the caller and the end + * consumer to be tied into KVM's handlers for MMU notifier events! + * + * There are several ways to safely use this helper: + * + * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * consuming it. In this case, mmu_lock doesn't need to be held during the + * lookup, but it does need to be held while checking the MMU notifier. + * + * - Hold mmu_lock AND ensure there is no in-progress MMU notifier invalidation + * event for the hva. This can be done by explicit checking the MMU notifier + * or by ensuring that KVM already has a valid mapping that covers the hva. + * + * - Do not use the result to install new mappings, e.g. use the host mapping + * level only to decide whether or not to zap an entry. In this case, it's + * not required to hold mmu_lock (though it's highly likely the caller will + * want to hold mmu_lock anyways, e.g. to modify SPTEs). + * + * Note! The lookup can still race with modifications to host page tables, but + * the above "rules" ensure KVM will not _consume_ the result of the walk if a + * race with the primary MMU occurs. + */ +static int host_pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = 0; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (kvm_pte_huge(pmd_val(pmd))) + level = 1; + +out: + local_irq_restore(flags); + return level; +} + +/* + * Split huge page + */ +static kvm_pte_t *kvm_split_huge(struct kvm_vcpu *vcpu, kvm_pte_t *ptep, gfn_t gfn) +{ + int i; + kvm_pte_t val, *child; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache; + + memcache = &vcpu->arch.mmu_page_cache; + child = kvm_mmu_memory_cache_alloc(memcache); + val = kvm_pte_mksmall(*ptep); + for (i = 0; i < PTRS_PER_PTE; i++) { + kvm_set_pte(child + i, val); + val += PAGE_SIZE; + } + + /* The later kvm_flush_tlb_gpa() will flush hugepage tlb */ + kvm_set_pte(ptep, __pa(child)); + + kvm->stat.hugepages--; + kvm->stat.pages += PTRS_PER_PTE; + + return child + (gfn & (PTRS_PER_PTE - 1)); +} + +/* + * kvm_map_page() - Map a guest physical page. + * @vcpu: vCPU pointer. + * @gpa: Guest physical address of fault. + * @write: Whether the fault was due to a write. + * + * Handle GPA faults by creating a new GPA mapping (or updating an existing + * one). + * + * This takes care of marking pages young or dirty (idle/dirty page tracking), + * asking KVM for the corresponding PFN, and creating a mapping in the GPA page + * tables. Derived mappings (GVA page tables and TLBs) must be handled by the + * caller. + * + * Returns: 0 on success + * -EFAULT if there is no memory region at @gpa or a write was + * attempted to a read-only memory region. This is usually handled + * as an MMIO access. + */ +static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + bool writeable; + int srcu_idx, err, retry_no = 0, level; + unsigned long hva, mmu_seq, prot_bits; + kvm_pfn_t pfn; + kvm_pte_t *ptep, new_pte; + gfn_t gfn = gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *memslot; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + + /* Try the fast path to handle old / clean pages */ + srcu_idx = srcu_read_lock(&kvm->srcu); + err = kvm_map_page_fast(vcpu, gpa, write); + if (!err) + goto out; + + memslot = gfn_to_memslot(kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writeable); + if (kvm_is_error_hva(hva) || (write && !writeable)) { + err = -EFAULT; + goto out; + } + + /* We need a minimum of cached pages ready for page table creation */ + err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); + if (err) + goto out; + +retry: + /* + * Used to check for invalidations in progress, of the pfn that is + * returned by pfn_to_pfn_prot below. + */ + mmu_seq = kvm->mmu_invalidate_seq; + /* + * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads in + * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * risk the page we get a reference to getting unmapped before we have a + * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. + * + * This smp_rmb() pairs with the effective smp_wmb() of the combination + * of the pte_unmap_unlock() after the PTE is zapped, and the + * spin_lock() in kvm_mmu_invalidate_invalidate_() before + * mmu_invalidate_seq is incremented. + */ + smp_rmb(); + + /* Slow path - ask KVM core whether we can access this GPA */ + pfn = gfn_to_pfn_prot(kvm, gfn, write, &writeable); + if (is_error_noslot_pfn(pfn)) { + err = -EFAULT; + goto out; + } + + /* Check if an invalidation has taken place since we got pfn */ + spin_lock(&kvm->mmu_lock); + if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) { + /* + * This can happen when mappings are changed asynchronously, but + * also synchronously if a COW is triggered by + * gfn_to_pfn_prot(). + */ + spin_unlock(&kvm->mmu_lock); + kvm_release_pfn_clean(pfn); + if (retry_no > 100) { + retry_no = 0; + schedule(); + } + retry_no++; + goto retry; + } + + /* + * For emulated devices such virtio device, actual cache attribute is + * determined by physical machine. + * For pass through physical device, it should be uncachable + */ + prot_bits = _PAGE_PRESENT | __READABLE; + if (pfn_valid(pfn)) + prot_bits |= _CACHE_CC; + else + prot_bits |= _CACHE_SUC; + + if (writeable) { + prot_bits |= _PAGE_WRITE; + if (write) + prot_bits |= __WRITEABLE; + } + + /* Disable dirty logging on HugePages */ + level = 0; + if (!fault_supports_huge_mapping(memslot, hva, write)) { + level = 0; + } else { + level = host_pfn_mapping_level(kvm, gfn, memslot); + if (level == 1) { + gfn = gfn & ~(PTRS_PER_PTE - 1); + pfn = pfn & ~(PTRS_PER_PTE - 1); + } + } + + /* Ensure page tables are allocated */ + ptep = kvm_populate_gpa(kvm, memcache, gpa, level); + new_pte = kvm_pfn_pte(pfn, __pgprot(prot_bits)); + if (level == 1) { + new_pte = kvm_pte_mkhuge(new_pte); + /* + * previous pmd entry is invalid_pte_table + * there is invalid tlb with small page + * need flush these invalid tlbs for current vcpu + */ + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + ++kvm->stat.hugepages; + } else if (kvm_pte_huge(*ptep) && write) + ptep = kvm_split_huge(vcpu, ptep, gfn); + else + ++kvm->stat.pages; + kvm_set_pte(ptep, new_pte); + spin_unlock(&kvm->mmu_lock); + + if (prot_bits & _PAGE_DIRTY) { + mark_page_dirty_in_slot(kvm, memslot, gfn); + kvm_set_pfn_dirty(pfn); + } + + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; +} + +int kvm_handle_mm_fault(struct kvm_vcpu *vcpu, unsigned long gpa, bool write) +{ + int ret; + + ret = kvm_map_page(vcpu, gpa, write); + if (ret) + return ret; + + /* Invalidate this entry in the TLB */ + kvm_flush_tlb_gpa(vcpu, gpa); + + return 0; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + const struct kvm_memory_slot *memslot) +{ + kvm_flush_remote_tlbs(kvm); +} diff --git a/arch/loongarch/kvm/switch.S b/arch/loongarch/kvm/switch.S new file mode 100644 index 0000000000000000000000000000000000000000..ba976509bfe819ec51fdaa08f2a1ba4a334755cd --- /dev/null +++ b/arch/loongarch/kvm/switch.S @@ -0,0 +1,281 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define HGPR_OFFSET(x) (PT_R0 + 8*x) +#define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x) + +.macro kvm_save_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_host_gpr base + .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, HGPR_OFFSET(\n) + .endr +.endm + +/* + * Save and restore all GPRs except base register, + * and default value of base register is a2. + */ +.macro kvm_save_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + st.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +.macro kvm_restore_guest_gprs base + .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + ld.d $r\n, \base, GGPR_OFFSET(\n) + .endr +.endm + +/* + * Prepare switch to guest, save host regs and restore guest regs. + * a2: kvm_vcpu_arch, don't touch it until 'ertn' + * t0, t1: temp register + */ +.macro kvm_switch_to_guest + /* Set host ECFG.VS=0, all exceptions share one exception entry */ + csrrd t0, LOONGARCH_CSR_ECFG + bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT + csrwr t0, LOONGARCH_CSR_ECFG + + /* Load up the new EENTRY */ + ld.d t0, a2, KVM_ARCH_GEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Set Guest ERA */ + ld.d t0, a2, KVM_ARCH_GPC + csrwr t0, LOONGARCH_CSR_ERA + + /* Save host PGDL */ + csrrd t0, LOONGARCH_CSR_PGDL + st.d t0, a2, KVM_ARCH_HPGD + + /* Switch to kvm */ + ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH + + /* Load guest PGDL */ + li.w t0, KVM_GPGD + ldx.d t0, t1, t0 + csrwr t0, LOONGARCH_CSR_PGDL + + /* Mix GID and RID */ + csrrd t1, LOONGARCH_CSR_GSTAT + bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + + /* + * Enable intr in root mode with future ertn so that host interrupt + * can be responsed during VM runs + * Guest CRMD comes from separate GCSR_CRMD register + */ + ori t0, zero, CSR_PRMD_PIE + csrxchg t0, t0, LOONGARCH_CSR_PRMD + + /* Set PVM bit to setup ertn to guest context */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg t0, t0, LOONGARCH_CSR_GSTAT + + /* Load Guest GPRs */ + kvm_restore_guest_gprs a2 + /* Load KVM_ARCH register */ + ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */ +.endm + + /* + * Exception entry for general exception from guest mode + * - IRQ is disabled + * - kernel privilege in root mode + * - page mode keep unchanged from previous PRMD in root mode + * - Fixme: tlb exception cannot happen since registers relative with TLB + * - is still in guest mode, such as pgd table/vmid registers etc, + * - will fix with hw page walk enabled in future + * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS + */ + .text + .cfi_sections .debug_frame +SYM_CODE_START(kvm_exc_entry) + csrwr a2, KVM_TEMP_KS + csrrd a2, KVM_VCPU_KS + addi.d a2, a2, KVM_VCPU_ARCH + + /* After save GPRs, free to use any GPR */ + kvm_save_guest_gprs a2 + /* Save guest A2 */ + csrrd t0, KVM_TEMP_KS + st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2) + + /* A2 is kvm_vcpu_arch, A1 is free to use */ + csrrd s1, KVM_VCPU_KS + ld.d s0, s1, KVM_VCPU_RUN + + csrrd t0, LOONGARCH_CSR_ESTAT + st.d t0, a2, KVM_ARCH_HESTAT + csrrd t0, LOONGARCH_CSR_ERA + st.d t0, a2, KVM_ARCH_GPC + csrrd t0, LOONGARCH_CSR_BADV + st.d t0, a2, KVM_ARCH_HBADV + csrrd t0, LOONGARCH_CSR_BADI + st.d t0, a2, KVM_ARCH_HBADI + + /* Restore host ECFG.VS */ + csrrd t0, LOONGARCH_CSR_ECFG + ld.d t1, a2, KVM_ARCH_HECFG + or t0, t0, t1 + csrwr t0, LOONGARCH_CSR_ECFG + + /* Restore host EENTRY */ + ld.d t0, a2, KVM_ARCH_HEENTRY + csrwr t0, LOONGARCH_CSR_EENTRY + + /* Restore host pgd table */ + ld.d t0, a2, KVM_ARCH_HPGD + csrwr t0, LOONGARCH_CSR_PGDL + + /* + * Disable PGM bit to enter root mode by default with next ertn + */ + ori t0, zero, CSR_GSTAT_PVM + csrxchg zero, t0, LOONGARCH_CSR_GSTAT + + /* + * Clear GTLBC.TGID field + * 0: for root tlb update in future tlb instr + * others: for guest tlb update like gpa to hpa in future tlb instr + */ + csrrd t0, LOONGARCH_CSR_GTLBC + bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT + csrwr t0, LOONGARCH_CSR_GTLBC + ld.d tp, a2, KVM_ARCH_HTP + ld.d sp, a2, KVM_ARCH_HSP + /* restore per cpu register */ + ld.d u0, a2, KVM_ARCH_HPERCPU + addi.d sp, sp, -PT_SIZE + + /* Prepare handle exception */ + or a0, s0, zero + or a1, s1, zero + ld.d t8, a2, KVM_ARCH_HANDLE_EXIT + jirl ra, t8, 0 + + or a2, s1, zero + addi.d a2, a2, KVM_VCPU_ARCH + + /* Resume host when ret <= 0 */ + blez a0, ret_to_host + + /* + * Return to guest + * Save per cpu register again, maybe switched to another cpu + */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr s1, KVM_VCPU_KS + kvm_switch_to_guest + +ret_to_host: + ld.d a2, a2, KVM_ARCH_HSP + addi.d a2, a2, -PT_SIZE + kvm_restore_host_gpr a2 + jr ra + +SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL) +SYM_CODE_END(kvm_exc_entry) + +/* + * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu) + * + * @register_param: + * a0: kvm_run* run + * a1: kvm_vcpu* vcpu + */ +SYM_FUNC_START(kvm_enter_guest) + /* Allocate space in stack bottom */ + addi.d a2, sp, -PT_SIZE + /* Save host GPRs */ + kvm_save_host_gpr a2 + + /* Save host CRMD, PRMD to stack */ + csrrd a3, LOONGARCH_CSR_CRMD + st.d a3, a2, PT_CRMD + csrrd a3, LOONGARCH_CSR_PRMD + st.d a3, a2, PT_PRMD + + addi.d a2, a1, KVM_VCPU_ARCH + st.d sp, a2, KVM_ARCH_HSP + st.d tp, a2, KVM_ARCH_HTP + /* Save per cpu register */ + st.d u0, a2, KVM_ARCH_HPERCPU + + /* Save kvm_vcpu to kscratch */ + csrwr a1, KVM_VCPU_KS + kvm_switch_to_guest +SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL) +SYM_FUNC_END(kvm_enter_guest) + +SYM_FUNC_START(kvm_save_fpu) + fpu_save_csr a0 t1 + fpu_save_double a0 t1 + fpu_save_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_save_fpu) + +SYM_FUNC_START(kvm_restore_fpu) + fpu_restore_double a0 t1 + fpu_restore_csr a0 t1 t2 + fpu_restore_cc a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_fpu) + +#ifdef CONFIG_CPU_HAS_LSX +SYM_FUNC_START(kvm_save_lsx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lsx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lsx) + +SYM_FUNC_START(kvm_restore_lsx) + lsx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lsx) +#endif + +#ifdef CONFIG_CPU_HAS_LASX +SYM_FUNC_START(kvm_save_lasx) + fpu_save_csr a0 t1 + fpu_save_cc a0 t1 t2 + lasx_save_data a0 t1 + jr ra +SYM_FUNC_END(kvm_save_lasx) + +SYM_FUNC_START(kvm_restore_lasx) + lasx_restore_data a0 t1 + fpu_restore_cc a0 t1 t2 + fpu_restore_csr a0 t1 t2 + jr ra +SYM_FUNC_END(kvm_restore_lasx) +#endif + .section ".rodata" +SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry) +SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest) diff --git a/arch/loongarch/kvm/timer.c b/arch/loongarch/kvm/timer.c new file mode 100644 index 0000000000000000000000000000000000000000..bcc6b6d063d914dbf820b43f2c1308803646b395 --- /dev/null +++ b/arch/loongarch/kvm/timer.c @@ -0,0 +1,197 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * ktime_to_tick() - Scale ktime_t to timer tick value. + */ +static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now) +{ + u64 delta; + + delta = ktime_to_ns(now); + return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC); +} + +static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick) +{ + return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz); +} + +/* Low level hrtimer wake routine */ +enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer); + kvm_queue_irq(vcpu, INT_TI); + rcuwait_wake_up(&vcpu->wait); + + return HRTIMER_NORESTART; +} + +/* + * Initialise the timer to the specified frequency, zero it + */ +void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz) +{ + vcpu->arch.timer_mhz = timer_hz >> 20; + + /* Starting at 0 */ + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0); +} + +/* + * Restore soft timer state from saved context. + */ +void kvm_restore_timer(struct kvm_vcpu *vcpu) +{ + unsigned long cfg, estat; + unsigned long ticks, delta, period; + ktime_t expire, now; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Set guest stable timer cfg csr + * Disable timer before restore estat CSR register, avoid to + * get invalid timer interrupt for old timer cfg + */ + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + + write_gcsr_timercfg(0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + if (!(cfg & CSR_TCFG_EN)) { + /* Guest timer is disabled, just restore timer registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + return; + } + + /* + * Freeze the soft-timer and sync the guest stable timer with it. + */ + if (kvm_vcpu_is_blocking(vcpu)) + hrtimer_cancel(&vcpu->arch.swtimer); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If oneshot timer is fired, CSR TVAL will be -1, there are two + * conditions: + * 1) timer is fired during exiting to host + * 2) timer is fired and vm is doing timer irq, and then exiting to + * host. Host should not inject timer irq to avoid spurious + * timer interrupt again + */ + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) { + /* + * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq + * and set CSR TVAL with -1 + */ + write_gcsr_timertick(0); + + /* + * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear + * timer interrupt, and CSR TVAL keeps unchanged with -1, it + * avoids spurious timer interrupt + */ + if (!(estat & CPU_TIMER)) + gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR); + return; + } + + /* + * Set remainder tick value if not expired + */ + delta = 0; + now = ktime_get(); + expire = vcpu->arch.expire; + if (ktime_before(now, expire)) + delta = ktime_to_tick(vcpu, ktime_sub(expire, now)); + else if (cfg & CSR_TCFG_PERIOD) { + period = cfg & CSR_TCFG_VAL; + delta = ktime_to_tick(vcpu, ktime_sub(now, expire)); + delta = period - (delta % period); + + /* + * Inject timer here though sw timer should inject timer + * interrupt async already, since sw timer may be cancelled + * during injecting intr async + */ + kvm_queue_irq(vcpu, INT_TI); + } + + write_gcsr_timertick(delta); +} + +/* + * Save guest timer state and switch to software emulation of guest + * timer. The hard timer must already be in use, so preemption should be + * disabled. + */ +static void _kvm_save_timer(struct kvm_vcpu *vcpu) +{ + unsigned long ticks, delta, cfg; + ktime_t expire; + struct loongarch_csrs *csr = vcpu->arch.csr; + + cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG); + ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL); + + /* + * From LoongArch Reference Manual Volume 1 Chapter 7.6.2 + * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG + * If oneshot timer is fired, CSR TVAL will be -1 + * Here judge one-shot timer fired by checking whether TVAL is larger + * than TCFG + */ + if (ticks < cfg) + delta = tick_to_ns(vcpu, ticks); + else + delta = 0; + + expire = ktime_add_ns(ktime_get(), delta); + vcpu->arch.expire = expire; + if (kvm_vcpu_is_blocking(vcpu)) { + + /* + * HRTIMER_MODE_PINNED is suggested since vcpu may run in + * the same physical cpu in next time + */ + hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED); + } +} + +/* + * Save guest timer state and switch to soft guest timer if hard timer was in + * use. + */ +void kvm_save_timer(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + preempt_disable(); + + /* Save hard timer state */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL); + if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN) + _kvm_save_timer(vcpu); + + /* Save timer-related state to vCPU context */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT); + preempt_enable(); +} + +void kvm_reset_timer(struct kvm_vcpu *vcpu) +{ + write_gcsr_timercfg(0); + kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TCFG, 0); + hrtimer_cancel(&vcpu->arch.swtimer); +} diff --git a/arch/loongarch/kvm/tlb.c b/arch/loongarch/kvm/tlb.c new file mode 100644 index 0000000000000000000000000000000000000000..02535df6b51fbebdf12c23373c36b6f44e2cd144 --- /dev/null +++ b/arch/loongarch/kvm/tlb.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include + +/* + * kvm_flush_tlb_all() - Flush all root TLB entries for guests. + * + * Invalidate all entries including GVA-->GPA and GPA-->HPA mappings. + */ +void kvm_flush_tlb_all(void) +{ + unsigned long flags; + + local_irq_save(flags); + invtlb_all(INVTLB_ALLGID, 0, 0); + local_irq_restore(flags); +} + +void kvm_flush_tlb_gpa(struct kvm_vcpu *vcpu, unsigned long gpa) +{ + unsigned long flags; + + local_irq_save(flags); + gpa &= (PAGE_MASK << 1); + invtlb(INVTLB_GID_ADDR, read_csr_gstat() & CSR_GSTAT_GID, gpa); + local_irq_restore(flags); +} diff --git a/arch/loongarch/kvm/trace.h b/arch/loongarch/kvm/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..c2484ad4cffa2102a61f2bd4d0a6f537a6501906 --- /dev/null +++ b/arch/loongarch/kvm/trace.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoints for VM enters + */ +DECLARE_EVENT_CLASS(kvm_transition, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu), + TP_STRUCT__entry( + __field(unsigned long, pc) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + ), + + TP_printk("PC: 0x%08lx", __entry->pc) +); + +DEFINE_EVENT(kvm_transition, kvm_enter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_reenter, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +DEFINE_EVENT(kvm_transition, kvm_out, + TP_PROTO(struct kvm_vcpu *vcpu), + TP_ARGS(vcpu)); + +/* Further exit reasons */ +#define KVM_TRACE_EXIT_IDLE 64 +#define KVM_TRACE_EXIT_CACHE 65 + +/* Tracepoints for VM exits */ +#define kvm_trace_symbol_exit_types \ + { KVM_TRACE_EXIT_IDLE, "IDLE" }, \ + { KVM_TRACE_EXIT_CACHE, "CACHE" } + +DECLARE_EVENT_CLASS(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + __print_symbolic(__entry->reason, + kvm_trace_symbol_exit_types), + __entry->pc) +); + +DEFINE_EVENT(kvm_exit, kvm_exit_idle, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit_cache, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +DEFINE_EVENT(kvm_exit, kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason)); + +TRACE_EVENT(kvm_exit_gspr, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int inst_word), + TP_ARGS(vcpu, inst_word), + TP_STRUCT__entry( + __field(unsigned int, inst_word) + ), + + TP_fast_assign( + __entry->inst_word = inst_word; + ), + + TP_printk("Inst word: 0x%08x", __entry->inst_word) +); + +#define KVM_TRACE_AUX_SAVE 0 +#define KVM_TRACE_AUX_RESTORE 1 +#define KVM_TRACE_AUX_ENABLE 2 +#define KVM_TRACE_AUX_DISABLE 3 +#define KVM_TRACE_AUX_DISCARD 4 + +#define KVM_TRACE_AUX_FPU 1 +#define KVM_TRACE_AUX_LSX 2 +#define KVM_TRACE_AUX_LASX 3 + +#define kvm_trace_symbol_aux_op \ + { KVM_TRACE_AUX_SAVE, "save" }, \ + { KVM_TRACE_AUX_RESTORE, "restore" }, \ + { KVM_TRACE_AUX_ENABLE, "enable" }, \ + { KVM_TRACE_AUX_DISABLE, "disable" }, \ + { KVM_TRACE_AUX_DISCARD, "discard" } + +#define kvm_trace_symbol_aux_state \ + { KVM_TRACE_AUX_FPU, "FPU" }, \ + { KVM_TRACE_AUX_LSX, "LSX" }, \ + { KVM_TRACE_AUX_LASX, "LASX" } + +TRACE_EVENT(kvm_aux, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, + unsigned int state), + TP_ARGS(vcpu, op, state), + TP_STRUCT__entry( + __field(unsigned long, pc) + __field(u8, op) + __field(u8, state) + ), + + TP_fast_assign( + __entry->pc = vcpu->arch.pc; + __entry->op = op; + __entry->state = state; + ), + + TP_printk("%s %s PC: 0x%08lx", + __print_symbolic(__entry->op, + kvm_trace_symbol_aux_op), + __print_symbolic(__entry->state, + kvm_trace_symbol_aux_state), + __entry->pc) +); + +TRACE_EVENT(kvm_vpid_change, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned long vpid), + TP_ARGS(vcpu, vpid), + TP_STRUCT__entry( + __field(unsigned long, vpid) + ), + + TP_fast_assign( + __entry->vpid = vpid; + ), + + TP_printk("VPID: 0x%08lx", __entry->vpid) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../arch/loongarch/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c new file mode 100644 index 0000000000000000000000000000000000000000..685f2826d022f1d1e3148e530219db6e4a240f3b --- /dev/null +++ b/arch/loongarch/kvm/vcpu.c @@ -0,0 +1,1514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), + STATS_DESC_COUNTER(VCPU, int_exits), + STATS_DESC_COUNTER(VCPU, idle_exits), + STATS_DESC_COUNTER(VCPU, cpucfg_exits), + STATS_DESC_COUNTER(VCPU, signal_exits), + STATS_DESC_COUNTER(VCPU, hypercall_exits) +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + +static void kvm_update_stolen_time(struct kvm_vcpu *vcpu) +{ + struct kvm_steal_time __user *st; + struct gfn_to_hva_cache *ghc; + struct kvm_memslots *slots; + gpa_t gpa; + u64 steal; + u32 version; + + ghc = &vcpu->arch.st.cache; + gpa = vcpu->arch.st.guest_addr; + if (!(gpa & KVM_STEAL_PHYS_VALID)) + return; + + gpa &= KVM_STEAL_PHYS_MASK; + slots = kvm_memslots(vcpu->kvm); + if (slots->generation != ghc->generation || gpa != ghc->gpa) { + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, + sizeof(*st))) { + ghc->gpa = INVALID_GPA; + return; + } + } + + st = (struct kvm_steal_time __user *)ghc->hva; + unsafe_get_user(version, &st->version, out); + if (version & 1) + version += 1; + version += 1; + unsafe_put_user(version, &st->version, out); + /* Make sure st->version is written first */ + smp_wmb(); + + unsafe_get_user(steal, &st->steal, out); + steal += current->sched_info.run_delay - + vcpu->arch.st.last_steal; + vcpu->arch.st.last_steal = current->sched_info.run_delay; + unsafe_put_user(steal, &st->steal, out); + + /* Make sure st->steal is written first */ + smp_wmb(); + version += 1; + unsafe_put_user(version, &st->version, out); +out: + mark_page_dirty_in_slot(vcpu->kvm, ghc->memslot, gpa_to_gfn(ghc->gpa)); +} + +static bool kvm_pvtime_supported(void) +{ + return !!sched_info_on(); +} + +static int kvm_loongarch_pvtime_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + struct kvm *kvm = vcpu->kvm; + u64 gpa; + int ret = 0; + int idx; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + if (get_user(gpa, user)) + return -EFAULT; + + /* Check the address is in a valid memslot */ + idx = srcu_read_lock(&kvm->srcu); + if (kvm_is_error_hva(gfn_to_hva(kvm, gpa >> PAGE_SHIFT))) + ret = -EINVAL; + srcu_read_unlock(&kvm->srcu, idx); + + if (!ret) + vcpu->arch.st.guest_addr = gpa; + + return ret; +} + +static int kvm_loongarch_pvtime_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + u64 __user *user = (u64 __user *)attr->addr; + u64 gpa; + + if (!kvm_pvtime_supported() || + attr->attr != KVM_LOONGARCH_VCPU_PVTIME_GPA) + return -ENXIO; + + gpa = vcpu->arch.st.guest_addr; + if (put_user(gpa, user)) + return -EFAULT; + + return 0; +} + +static int kvm_loongarch_pvtime_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case KVM_LOONGARCH_VCPU_PVTIME_GPA: + if (kvm_pvtime_supported()) + return 0; + } + + return -ENXIO; +} + +/* + * kvm_check_requests - check and handle pending vCPU requests + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + */ +static int kvm_check_requests(struct kvm_vcpu *vcpu) +{ + if (!kvm_request_pending(vcpu)) + return RESUME_GUEST; + + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) + vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ + + if (kvm_dirty_ring_check_request(vcpu)) + return RESUME_HOST; + + if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) + kvm_update_stolen_time(vcpu); + + return RESUME_GUEST; +} + +/* + * Check and handle pending signal and vCPU requests etc + * Run with irq enabled and preempt enabled + * + * Return: RESUME_GUEST if we should enter the guest + * RESUME_HOST if we should exit to userspace + * < 0 if we should exit to userspace, where the return value + * indicates an error + */ +static int kvm_enter_guest_check(struct kvm_vcpu *vcpu) +{ + int ret; + + /* + * Check conditions before entering the guest + */ + ret = xfer_to_guest_mode_handle_work(vcpu); + if (ret < 0) + return ret; + + ret = kvm_check_requests(vcpu); + + return ret; +} + +/* + * Called with irq enabled + * + * Return: RESUME_GUEST if we should enter the guest, and irq disabled + * Others if we should exit to userspace + */ +static int kvm_pre_enter_guest(struct kvm_vcpu *vcpu) +{ + int ret; + + do { + ret = kvm_enter_guest_check(vcpu); + if (ret != RESUME_GUEST) + break; + + /* + * Handle vcpu timer, interrupts, check requests and + * check vmid before vcpu enter guest + */ + local_irq_disable(); + kvm_deliver_intr(vcpu); + kvm_deliver_exception(vcpu); + /* Make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, IN_GUEST_MODE); + kvm_check_vpid(vcpu); + vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); + /* Clear KVM_LARCH_SWCSR_LATEST as CSR will change when enter guest */ + vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; + + if (kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { + /* make sure the vcpu mode has been written */ + smp_store_mb(vcpu->mode, OUTSIDE_GUEST_MODE); + local_irq_enable(); + ret = -EAGAIN; + } + } while (ret != RESUME_GUEST); + + return ret; +} + +/* + * Return 1 for resume guest and "<= 0" for resume host. + */ +static int kvm_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + int ret = RESUME_GUEST; + unsigned long estat = vcpu->arch.host_estat; + u32 intr = estat & 0x1fff; /* Ignore NMI */ + u32 ecode = (estat & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT; + + vcpu->mode = OUTSIDE_GUEST_MODE; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + + guest_timing_exit_irqoff(); + guest_state_exit_irqoff(); + local_irq_enable(); + + trace_kvm_exit(vcpu, ecode); + if (ecode) { + ret = kvm_handle_fault(vcpu, ecode); + } else { + WARN(!intr, "vm exiting with suspicious irq\n"); + ++vcpu->stat.int_exits; + } + + if (ret == RESUME_GUEST) + ret = kvm_pre_enter_guest(vcpu); + + if (ret != RESUME_GUEST) { + local_irq_disable(); + return ret; + } + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_reenter(vcpu); + + return RESUME_GUEST; +} + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.irq_pending) && + vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return false; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + int ret; + + /* Protect from TOD sync and vcpu_load/put() */ + preempt_disable(); + ret = kvm_pending_timer(vcpu) || + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT) & (1 << INT_TI); + preempt_enable(); + + return ret; +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + + kvm_debug("vCPU Register Dump:\n"); + kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); + kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); + + for (i = 0; i < 32; i += 4) { + kvm_debug("\tGPR%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + + kvm_debug("\tCRMD: 0x%08lx, ESTAT: 0x%08lx\n", + kvm_read_hw_gcsr(LOONGARCH_CSR_CRMD), + kvm_read_hw_gcsr(LOONGARCH_CSR_ESTAT)); + + kvm_debug("\tERA: 0x%08lx\n", kvm_read_hw_gcsr(LOONGARCH_CSR_ERA)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + *mp_state = vcpu->arch.mp_state; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + int ret = 0; + + switch (mp_state->mp_state) { + case KVM_MP_STATE_RUNNABLE: + vcpu->arch.mp_state = *mp_state; + break; + default: + ret = -EINVAL; + } + + return ret; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + if (dbg->control & ~KVM_GUESTDBG_VALID_MASK) + return -EINVAL; + + if (dbg->control & KVM_GUESTDBG_ENABLE) + vcpu->guest_debug = dbg->control; + else + vcpu->guest_debug = 0; + + return 0; +} + +static int _kvm_getcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 *val) +{ + unsigned long gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 get from GINTC */ + gintc = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_GINTC) & 0xff; + *val = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT) | (gintc << 2); + return 0; + } + + /* + * Get software CSR state since software state is consistent + * with hardware for synchronous ioctl + */ + *val = kvm_read_sw_gcsr(csr, id); + + return 0; +} + +static inline int kvm_set_cpuid(struct kvm_vcpu *vcpu, u64 val) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + if (val >= KVM_MAX_PHYID) + return -EINVAL; + + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + map = vcpu->kvm->arch.phyid_map; + spin_lock(&vcpu->kvm->arch.phyid_map_lock); + if (map->phys_map[cpuid].enabled) { + /* + * Cpuid is already set before + * Forbid changing different cpuid at runtime + */ + if (cpuid != val) { + /* + * Cpuid 0 is initial value for vcpu, maybe invalid + * unset value for vcpu + */ + if (cpuid) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + } else { + /* Discard duplicated cpuid set */ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + } + + if (map->phys_map[val].enabled) { + /* + * New cpuid is already set with other vcpu + * Forbid sharing the same cpuid between different vcpus + */ + if (map->phys_map[val].vcpu != vcpu) { + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return -EINVAL; + } + + /* Discard duplicated cpuid set operation*/ + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; + } + + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, val); + map->phys_map[val].enabled = true; + map->phys_map[val].vcpu = vcpu; + if (map->max_phyid < val) + map->max_phyid = val; + spin_unlock(&vcpu->kvm->arch.phyid_map_lock); + return 0; +} + +struct kvm_vcpu *kvm_get_vcpu_by_cpuid(struct kvm *kvm, int cpuid) +{ + struct kvm_phyid_map *map; + + if (cpuid >= KVM_MAX_PHYID) + return NULL; + + map = kvm->arch.phyid_map; + if (map->phys_map[cpuid].enabled) + return map->phys_map[cpuid].vcpu; + + return NULL; +} + +static inline void kvm_drop_cpuid(struct kvm_vcpu *vcpu) +{ + int cpuid; + struct loongarch_csrs *csr = vcpu->arch.csr; + struct kvm_phyid_map *map; + + map = vcpu->kvm->arch.phyid_map; + cpuid = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT); + if (cpuid >= KVM_MAX_PHYID) + return; + + if (map->phys_map[cpuid].enabled) { + map->phys_map[cpuid].vcpu = NULL; + map->phys_map[cpuid].enabled = false; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CPUID, 0); + } +} + +static int _kvm_setcsr(struct kvm_vcpu *vcpu, unsigned int id, u64 val) +{ + int ret = 0, gintc; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (get_gcsr_flag(id) & INVALID_GCSR) + return -EINVAL; + + if (id == LOONGARCH_CSR_ESTAT) { + /* ESTAT IP0~IP7 inject through GINTC */ + gintc = (val >> 2) & 0xff; + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_GINTC, gintc); + + gintc = val & ~(0xffUL << 2); + kvm_set_sw_gcsr(csr, LOONGARCH_CSR_ESTAT, gintc); + + return ret; + } else if (id == LOONGARCH_CSR_CPUID) + return kvm_set_cpuid(vcpu, val); + + kvm_write_sw_gcsr(csr, id, val); + + return ret; +} + +static int _kvm_get_cpucfg_mask(int id, u64 *v) +{ + if (id < 0 || id >= KVM_MAX_CPUCFG_REGS) + return -EINVAL; + + switch (id) { + case LOONGARCH_CPUCFG0: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG1: + /* CPUCFG1_MSGINT is not supported by KVM */ + *v = GENMASK(25, 0); + return 0; + case LOONGARCH_CPUCFG2: + /* CPUCFG2 features unconditionally supported by KVM */ + *v = CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | + CPUCFG2_FPVERS | CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | + CPUCFG2_LSPW | CPUCFG2_LAM; + /* + * For the ISA extensions listed below, if one is supported + * by the host, then it is also supported by KVM. + */ + if (cpu_has_lsx) + *v |= CPUCFG2_LSX; + if (cpu_has_lasx) + *v |= CPUCFG2_LASX; + + return 0; + case LOONGARCH_CPUCFG3: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG4: + case LOONGARCH_CPUCFG5: + *v = GENMASK(31, 0); + return 0; + case LOONGARCH_CPUCFG6: + if (cpu_has_pmp) + *v = GENMASK(14, 0); + else + *v = 0; + return 0; + case LOONGARCH_CPUCFG16: + *v = GENMASK(16, 0); + return 0; + case LOONGARCH_CPUCFG17 ... LOONGARCH_CPUCFG20: + *v = GENMASK(30, 0); + return 0; + default: + /* + * CPUCFG bits should be zero if reserved by HW or not + * supported by KVM. + */ + *v = 0; + return 0; + } +} + +static int kvm_check_cpucfg(int id, u64 val) +{ + int ret, host; + u64 mask = 0; + + ret = _kvm_get_cpucfg_mask(id, &mask); + if (ret) + return ret; + + if (val & ~mask) + /* Unsupported features and/or the higher 32 bits should not be set */ + return -EINVAL; + + switch (id) { + case LOONGARCH_CPUCFG2: + if (!(val & CPUCFG2_LLFTP)) + /* Guests must have a constant timer */ + return -EINVAL; + if ((val & CPUCFG2_FP) && (!(val & CPUCFG2_FPSP) || !(val & CPUCFG2_FPDP))) + /* Single and double float point must both be set when FP is enabled */ + return -EINVAL; + if ((val & CPUCFG2_LSX) && !(val & CPUCFG2_FP)) + /* LSX architecturally implies FP but val does not satisfy that */ + return -EINVAL; + if ((val & CPUCFG2_LASX) && !(val & CPUCFG2_LSX)) + /* LASX architecturally implies LSX and FP but val does not satisfy that */ + return -EINVAL; + return 0; + case LOONGARCH_CPUCFG6: + if (val & CPUCFG6_PMP) { + host = read_cpucfg(6); + if ((val & CPUCFG6_PMBITS) != (host & CPUCFG6_PMBITS)) + /* Guest pmbits must be the same with host */ + return -EINVAL; + if ((val & CPUCFG6_PMNUM) > (host & CPUCFG6_PMNUM)) + return -EINVAL; + if ((val & CPUCFG6_UPM) && !(host & CPUCFG6_UPM)) + return -EINVAL; + } + return 0; + default: + /* + * Values for the other CPUCFG IDs are not being further validated + * besides the mask check above. + */ + return 0; + } +} + +static int kvm_get_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 *v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_getcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + if (id >= 0 && id < KVM_MAX_CPUCFG_REGS) + *v = vcpu->arch.cpucfg[id]; + else + ret = -EINVAL; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + *v = drdtime() + vcpu->kvm->arch.time_offset; + break; + case KVM_REG_LOONGARCH_DEBUG_INST: + *v = INSN_HVCL + KVM_HCALL_SWDBG; + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = kvm_get_one_reg(vcpu, reg, &v); + if (ret) + return ret; + ret = put_user(v, (u64 __user *)(long)reg->addr); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_one_reg(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, u64 v) +{ + int id, ret = 0; + u64 type = reg->id & KVM_REG_LOONGARCH_MASK; + + switch (type) { + case KVM_REG_LOONGARCH_CSR: + id = KVM_GET_IOC_CSR_IDX(reg->id); + ret = _kvm_setcsr(vcpu, id, v); + break; + case KVM_REG_LOONGARCH_CPUCFG: + id = KVM_GET_IOC_CPUCFG_IDX(reg->id); + ret = kvm_check_cpucfg(id, v); + if (ret) + break; + vcpu->arch.cpucfg[id] = (u32)v; + break; + case KVM_REG_LOONGARCH_KVM: + switch (reg->id) { + case KVM_REG_LOONGARCH_COUNTER: + /* + * gftoffset is relative with board, not vcpu + * only set for the first time for smp system + */ + if (vcpu->vcpu_id == 0) + vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); + break; + case KVM_REG_LOONGARCH_VCPU_RESET: + kvm_reset_timer(vcpu); + memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); + memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); + break; + default: + ret = -EINVAL; + break; + } + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int kvm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ + int ret = 0; + u64 v, size = reg->id & KVM_REG_SIZE_MASK; + + switch (size) { + case KVM_REG_SIZE_U64: + ret = get_user(v, (u64 __user *)(long)reg->addr); + if (ret) + return ret; + break; + default: + return -EINVAL; + } + + return kvm_set_one_reg(vcpu, reg, v); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + regs->gpr[i] = vcpu->arch.gprs[i]; + + regs->pc = vcpu->arch.pc; + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) + vcpu->arch.gprs[i] = regs->gpr[i]; + + vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ + vcpu->arch.pc = regs->pc; + + return 0; +} + +static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, + struct kvm_enable_cap *cap) +{ + /* FPU is enabled by default, will support LSX/LASX later. */ + return -EINVAL; +} + +static int kvm_loongarch_cpucfg_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + switch (attr->attr) { + case 2: + case 6: + return 0; + default: + return -ENXIO; + } + + return -ENXIO; +} + +static int kvm_loongarch_vcpu_has_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_has_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_has_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_get_cpucfg_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = 0; + uint64_t val; + uint64_t __user *uaddr = (uint64_t __user *)attr->addr; + + ret = _kvm_get_cpucfg_mask(attr->attr, &val); + if (ret) + return ret; + + put_user(val, uaddr); + + return ret; +} + +static int kvm_loongarch_vcpu_get_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_get_cpucfg_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_get_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +static int kvm_loongarch_cpucfg_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + return -ENXIO; +} + +static int kvm_loongarch_vcpu_set_attr(struct kvm_vcpu *vcpu, + struct kvm_device_attr *attr) +{ + int ret = -ENXIO; + + switch (attr->group) { + case KVM_LOONGARCH_VCPU_CPUCFG: + ret = kvm_loongarch_cpucfg_set_attr(vcpu, attr); + break; + case KVM_LOONGARCH_VCPU_PVTIME_CTRL: + ret = kvm_loongarch_pvtime_set_attr(vcpu, attr); + break; + default: + break; + } + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + long r; + struct kvm_device_attr attr; + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + /* + * Only software CSR should be modified + * + * If any hardware CSR register is modified, vcpu_load/vcpu_put pair + * should be used. Since CSR registers owns by this vcpu, if switch + * to other vcpus, other vcpus need reload CSR registers. + * + * If software CSR is modified, bit KVM_LARCH_HWCSR_USABLE should + * be clear in vcpu->arch.aux_inuse, and vcpu_load will check + * aux_inuse flag and reload CSR registers form software. + */ + + switch (ioctl) { + case KVM_SET_ONE_REG: + case KVM_GET_ONE_REG: { + struct kvm_one_reg reg; + + r = -EFAULT; + if (copy_from_user(®, argp, sizeof(reg))) + break; + if (ioctl == KVM_SET_ONE_REG) { + r = kvm_set_reg(vcpu, ®); + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + } else + r = kvm_get_reg(vcpu, ®); + break; + } + case KVM_ENABLE_CAP: { + struct kvm_enable_cap cap; + + r = -EFAULT; + if (copy_from_user(&cap, argp, sizeof(cap))) + break; + r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); + break; + } + case KVM_HAS_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_has_attr(vcpu, &attr); + break; + } + case KVM_GET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_get_attr(vcpu, &attr); + break; + } + case KVM_SET_DEVICE_ATTR: { + r = -EFAULT; + if (copy_from_user(&attr, argp, sizeof(attr))) + break; + r = kvm_loongarch_vcpu_set_attr(vcpu, &attr); + break; + } + default: + r = -ENOIOCTLCMD; + break; + } + + return r; +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + fpu->fcc = vcpu->arch.fpu.fcc; + fpu->fcsr = vcpu->arch.fpu.fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + int i = 0; + + vcpu->arch.fpu.fcc = fpu->fcc; + vcpu->arch.fpu.fcsr = fpu->fcsr; + for (i = 0; i < NUM_FPU_REGS; i++) + memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); + + return 0; +} + +/* Enable FPU and restore context */ +void kvm_own_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + /* Enable FPU */ + set_csr_euen(CSR_EUEN_FPEN); + + kvm_restore_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse |= KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); + + preempt_enable(); +} + +#ifdef CONFIG_CPU_HAS_LSX +/* Enable LSX and restore context */ +int kvm_own_lsx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + /* Enable LSX for guest */ + set_csr_euen(CSR_EUEN_LSXEN | CSR_EUEN_FPEN); + switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + case KVM_LARCH_FPU: + /* + * Guest FPU state already loaded, + * only restore upper LSX state + */ + _restore_lsx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, + * restore full LSX state + */ + kvm_restore_lsx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LSX); + vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + +#ifdef CONFIG_CPU_HAS_LASX +/* Enable LASX and restore context */ +int kvm_own_lasx(struct kvm_vcpu *vcpu) +{ + if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + + set_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { + case KVM_LARCH_LSX: + case KVM_LARCH_LSX | KVM_LARCH_FPU: + /* Guest LSX state already loaded, only restore upper LASX state */ + _restore_lasx_upper(&vcpu->arch.fpu); + break; + case KVM_LARCH_FPU: + /* Guest FP state already loaded, only restore upper LSX & LASX state */ + _restore_lsx_upper(&vcpu->arch.fpu); + _restore_lasx_upper(&vcpu->arch.fpu); + break; + default: + /* Neither FP or LSX already active, restore full LASX state */ + kvm_restore_lasx(&vcpu->arch.fpu); + break; + } + + trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_LASX); + vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; + preempt_enable(); + + return 0; +} +#endif + +/* Save context and disable FPU */ +void kvm_lose_fpu(struct kvm_vcpu *vcpu) +{ + preempt_disable(); + + if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { + kvm_save_lasx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LASX); + + /* Disable LASX & LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN | CSR_EUEN_LASXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { + kvm_save_lsx(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_LSX); + + /* Disable LSX & FPU */ + clear_csr_euen(CSR_EUEN_FPEN | CSR_EUEN_LSXEN); + } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { + kvm_save_fpu(&vcpu->arch.fpu); + vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; + trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); + + /* Disable FPU */ + clear_csr_euen(CSR_EUEN_FPEN); + } + + preempt_enable(); +} + +int kvm_own_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + + if (!kvm_guest_has_pmu(&vcpu->arch)) + return -EINVAL; + + preempt_disable(); + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + + vcpu->arch.aux_inuse |= KVM_LARCH_PERF; + preempt_enable(); + return 0; +} + +static void kvm_lose_pmu(struct kvm_vcpu *vcpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* save guest pmu csr */ + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL0, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL1, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL2, 0); + kvm_write_hw_gcsr(LOONGARCH_CSR_PERFCTRL3, 0); + /* Disable pmu access from guest */ + write_csr_gcfg(read_csr_gcfg() & ~CSR_GCFG_GPERF); + + if (((kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2) | + kvm_read_sw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3)) + & KVM_PMU_PLV_ENABLE) == 0) + vcpu->arch.aux_inuse &= ~KVM_LARCH_PERF; +} + +static void kvm_restore_pmu(struct kvm_vcpu *vcpu) +{ + unsigned long val; + struct loongarch_csrs *csr = vcpu->arch.csr; + + if (!(vcpu->arch.aux_inuse & KVM_LARCH_PERF)) + return; + + /* Set PM0-PM(num) to Guest */ + val = read_csr_gcfg() & ~CSR_GCFG_GPERF; + val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; + write_csr_gcfg(val); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCTRL3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PERFCNTR3); +} + + +int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) +{ + int intr = (int)irq->irq; + + if (intr > 0) + kvm_queue_irq(vcpu, intr); + else if (intr < 0) + kvm_dequeue_irq(vcpu, -intr); + else { + kvm_err("%s: invalid interrupt ioctl %d\n", __func__, irq->irq); + return -EINVAL; + } + + kvm_vcpu_kick(vcpu); + + return 0; +} + +long kvm_arch_vcpu_async_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct kvm_vcpu *vcpu = filp->private_data; + + if (ioctl == KVM_INTERRUPT) { + struct kvm_interrupt irq; + + if (copy_from_user(&irq, argp, sizeof(irq))) + return -EFAULT; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, irq.irq); + + return kvm_vcpu_ioctl_interrupt(vcpu, &irq); + } + + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + unsigned long timer_hz; + struct loongarch_csrs *csr; + + vcpu->arch.vpid = 0; + + hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + vcpu->arch.swtimer.function = kvm_swtimer_wakeup; + + vcpu->arch.handle_exit = kvm_handle_exit; + vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; + vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); + if (!vcpu->arch.csr) + return -ENOMEM; + + /* + * All kvm exceptions share one exception entry, and host <-> guest + * switch also switch ECFG.VS field, keep host ECFG.VS info here. + */ + vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* + * Initialize guest register state to valid architectural reset state. + */ + timer_hz = calc_const_freq(); + kvm_init_timer(vcpu, timer_hz); + + /* Set Initialize mode for guest */ + csr = vcpu->arch.csr; + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_CRMD, CSR_CRMD_DA); + + /* Set cpuid */ + kvm_write_sw_gcsr(csr, LOONGARCH_CSR_TMID, vcpu->vcpu_id); + + /* Start with no pending virtual guest interrupts */ + csr->csrs[LOONGARCH_CSR_GINTC] = 0; + + return 0; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + int cpu; + struct kvm_context *context; + + hrtimer_cancel(&vcpu->arch.swtimer); + kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); + kfree(vcpu->arch.csr); + kvm_drop_cpuid(vcpu); + + /* + * If the vCPU is freed and reused as another vCPU, we don't want the + * matching pointer wrongly hanging around in last_vcpu. + */ + for_each_possible_cpu(cpu) { + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (context->last_vcpu == vcpu) + context->last_vcpu = NULL; + } +} + +static int _kvm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + bool migrated; + struct kvm_context *context; + struct loongarch_csrs *csr = vcpu->arch.csr; + + /* + * Have we migrated to a different CPU? + * If so, any old guest TLB state may be stale. + */ + migrated = (vcpu->arch.last_sched_cpu != cpu); + + /* + * Was this the last vCPU to run on this CPU? + * If not, any old guest state from this vCPU will have been clobbered. + */ + context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); + if (migrated || (context->last_vcpu != vcpu)) + vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; + context->last_vcpu = vcpu; + + /* Restore timer state regardless */ + kvm_restore_timer(vcpu); + + /* Control guest page CCA attribute */ + change_csr_gcfg(CSR_GCFG_MATC_MASK, CSR_GCFG_MATC_ROOT); + + /* Restore hardware perf csr */ + kvm_restore_pmu(vcpu); + + kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); + + /* Don't bother restoring registers multiple times unless necessary */ + if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) + return 0; + + write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); + + /* Restore guest CSR registers */ + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + + /* Restore Root.GINTC from unused Guest.GINTC register */ + write_csr_gintc(csr->csrs[LOONGARCH_CSR_GINTC]); + + /* + * We should clear linked load bit to break interrupted atomics. This + * prevents a SC on the next vCPU from succeeding by matching a LL on + * the previous vCPU. + */ + if (vcpu->kvm->created_vcpus > 1) + set_gcsr_llbctl(CSR_LLBCTL_WCLLB); + + vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; + + return 0; +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + + local_irq_save(flags); + /* Restore guest state to registers */ + _kvm_vcpu_load(vcpu, cpu); + local_irq_restore(flags); +} + +static int _kvm_vcpu_put(struct kvm_vcpu *vcpu, int cpu) +{ + struct loongarch_csrs *csr = vcpu->arch.csr; + + kvm_lose_fpu(vcpu); + kvm_lose_pmu(vcpu); + + /* + * Update CSR state from hardware if software CSR state is stale, + * most CSR registers are kept unchanged during process context + * switch except CSR registers like remaining timer tick value and + * injected interrupt state. + */ + if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) + goto out; + + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EUEN); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_MISC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ECFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_BADI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_EENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBIDX); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBEHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ASID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PGDH); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PWCTL1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_STLBPGSIZE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_RVACFG); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CPUID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_PRCFG3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS3); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS4); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS5); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS6); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_KS7); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TMID); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_CNTC); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_LLBCTL); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRENTRY); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRBADV); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRERA); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRSAVE); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRELO1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBREHI); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TLBRPRMD); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN0); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN1); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN2); + kvm_save_hw_gcsr(csr, LOONGARCH_CSR_DMWIN3); + + vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; + +out: + kvm_save_timer(vcpu); + /* Save Root.GINTC into unused Guest.GINTC register */ + csr->csrs[LOONGARCH_CSR_GINTC] = read_csr_gintc(); + + return 0; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + cpu = smp_processor_id(); + vcpu->arch.last_sched_cpu = cpu; + + /* Save guest state in registers */ + _kvm_vcpu_put(vcpu, cpu); + local_irq_restore(flags); +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + int r = -EINTR; + struct kvm_run *run = vcpu->run; + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_complete_mmio_read(vcpu, run); + vcpu->mmio_needed = 0; + } + + if (run->exit_reason == KVM_EXIT_LOONGARCH_IOCSR) { + if (!run->iocsr_io.is_write) + kvm_complete_iocsr_read(vcpu, run); + } + + if (run->immediate_exit) + return r; + + /* Clear exit_reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + lose_fpu(1); + vcpu_load(vcpu); + kvm_sigset_activate(vcpu); + r = kvm_pre_enter_guest(vcpu); + if (r != RESUME_GUEST) + goto out; + + guest_timing_enter_irqoff(); + guest_state_enter_irqoff(); + trace_kvm_enter(vcpu); + r = kvm_loongarch_ops->enter_guest(run, vcpu); + + trace_kvm_out(vcpu); + /* + * Guest exit is already recorded at kvm_handle_exit() + * return value must not be RESUME_GUEST + */ + local_irq_enable(); +out: + kvm_sigset_deactivate(vcpu); + vcpu_put(vcpu); + + return r; +} diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c new file mode 100644 index 0000000000000000000000000000000000000000..06fd746b03b6193b9560a0fce85ef9db5fbcb8a1 --- /dev/null +++ b/arch/loongarch/kvm/vm.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2023 Loongson Technology Corporation Limited + */ + +#include +#include + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS(), + STATS_DESC_ICOUNTER(VM, pages), + STATS_DESC_ICOUNTER(VM, hugepages), +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + int i; + + /* Allocate page table to map GPA -> RPA */ + kvm->arch.pgd = kvm_pgd_alloc(); + if (!kvm->arch.pgd) + return -ENOMEM; + + kvm->arch.phyid_map = kvzalloc(sizeof(struct kvm_phyid_map), + GFP_KERNEL_ACCOUNT); + if (!kvm->arch.phyid_map) { + free_page((unsigned long)kvm->arch.pgd); + kvm->arch.pgd = NULL; + return -ENOMEM; + } + + kvm_init_vmcs(kvm); + kvm->arch.gpa_size = BIT(cpu_vabits - 1); + kvm->arch.root_level = CONFIG_PGTABLE_LEVELS - 1; + kvm->arch.invalid_ptes[0] = 0; + kvm->arch.invalid_ptes[1] = (unsigned long)invalid_pte_table; +#if CONFIG_PGTABLE_LEVELS > 2 + kvm->arch.invalid_ptes[2] = (unsigned long)invalid_pmd_table; +#endif +#if CONFIG_PGTABLE_LEVELS > 3 + kvm->arch.invalid_ptes[3] = (unsigned long)invalid_pud_table; +#endif + for (i = 0; i <= kvm->arch.root_level; i++) + kvm->arch.pte_shifts[i] = PAGE_SHIFT + i * (PAGE_SHIFT - 3); + + spin_lock_init(&kvm->arch.phyid_map_lock); + return 0; +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); + free_page((unsigned long)kvm->arch.pgd); + kvfree(kvm->arch.phyid_map); + kvm->arch.pgd = NULL; + kvm->arch.phyid_map = NULL; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_ONE_REG: + case KVM_CAP_ENABLE_CAP: + case KVM_CAP_READONLY_MEM: + case KVM_CAP_SYNC_MMU: + case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_MP_STATE: + case KVM_CAP_SET_GUEST_DEBUG: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + r = num_online_cpus(); + break; + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + case KVM_CAP_MAX_VCPU_ID: + r = KVM_MAX_VCPU_IDS; + break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; + default: + r = 0; + break; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -ENOIOCTLCMD; +} diff --git a/arch/loongarch/pci/acpi.c b/arch/loongarch/pci/acpi.c index 365f7de771cbb9e6c72d1d8d0133af476510c2a0..7dabf8d304eb79923b85c82c7a67289ce067bc77 100644 --- a/arch/loongarch/pci/acpi.c +++ b/arch/loongarch/pci/acpi.c @@ -26,16 +26,17 @@ void pcibios_add_bus(struct pci_bus *bus) int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) { - struct acpi_device *adev = NULL; - struct device *bus_dev = &bridge->bus->dev; - struct pci_config_window *cfg = bridge->bus->sysdata; - if (!acpi_disabled) - adev = to_acpi_device(cfg->parent); + if (!acpi_disabled) { + struct acpi_device *adev = NULL; + struct device *bus_dev = &bridge->bus->dev; + struct pci_config_window *cfg = bridge->bus->sysdata; - ACPI_COMPANION_SET(&bridge->dev, adev); - set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + adev = to_acpi_device(cfg->parent); + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, pa_to_nid(cfg->res.start)); + } return 0; } @@ -58,19 +59,167 @@ static void acpi_release_root_info(struct acpi_pci_root_info *ci) kfree(info); } +static void arch_pci_root_validate_resources(struct device *dev, + struct list_head *resources, + unsigned long type) +{ + LIST_HEAD(list); + struct resource *res1, *res2, *root = NULL; + struct resource_entry *tmp, *entry, *entry2; + + WARN_ON((type & (IORESOURCE_MEM | IORESOURCE_IO)) == 0); + root = (type & IORESOURCE_MEM) ? &iomem_resource : &ioport_resource; + + list_splice_init(resources, &list); + resource_list_for_each_entry_safe(entry, tmp, &list) { + bool free = false; + resource_size_t end; + + res1 = entry->res; + if (!(res1->flags & type)) + goto next; + + /* Exclude non-addressable range or non-addressable portion */ + end = min(res1->end, root->end); + if (end <= res1->start) { + dev_info(dev, "host bridge window %pR (ignored, not CPU addressable)\n", + res1); + free = true; + goto next; + } else if (res1->end != end) { + dev_info(dev, "host bridge window %pR ([%#llx-%#llx] ignored, not CPU addressable)\n", + res1, (unsigned long long)end + 1, + (unsigned long long)res1->end); + res1->end = end; + } + + resource_list_for_each_entry(entry2, resources) { + res2 = entry2->res; + if (!(res2->flags & type)) + continue; + + /* + * I don't like throwing away windows because then + * our resources no longer match the ACPI _CRS, but + * the kernel resource tree doesn't allow overlaps. + */ + if (resource_overlaps(res1, res2)) { + res2->start = min(res1->start, res2->start); + res2->end = max(res1->end, res2->end); + dev_info(dev, "host bridge window expanded to %pR; %pR ignored\n", + res2, res1); + free = true; + goto next; + } + } + +next: + resource_list_del(entry); + if (free) + resource_list_free_entry(entry); + else + resource_list_add_tail(entry, resources); + } +} +static void arch_pci_root_remap_iospace(struct fwnode_handle *fwnode, + struct resource_entry *entry) +{ + struct resource *res = entry->res; + resource_size_t cpu_addr = res->start; + resource_size_t pci_addr = cpu_addr - entry->offset; + resource_size_t length = resource_size(res); + unsigned long port; + + if (pci_register_io_range(fwnode, cpu_addr, length)) { + res->start += ISA_IOSIZE; + cpu_addr = res->start; + pci_addr = cpu_addr - entry->offset; + length = resource_size(res); + if (pci_register_io_range(fwnode, cpu_addr, length)) + goto err; + } + + port = pci_address_to_pio(cpu_addr); + if (port == (unsigned long)-1) + goto err; + + res->start = port; + res->end = port + length - 1; + entry->offset = port - pci_addr; + + if (pci_remap_iospace(res, cpu_addr) < 0) + goto err; + + pr_info("Remapped I/O %pa to %pR\n", &cpu_addr, res); + return; +err: + res->flags |= IORESOURCE_DISABLED; +} + +static int arch_pci_probe_root_resources(struct acpi_pci_root_info *info) +{ + int ret; + struct list_head *list = &info->resources; + struct acpi_device *device = info->bridge; + struct resource_entry *entry, *tmp; + unsigned long flags; + struct resource *res; + + flags = IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT; + ret = acpi_dev_get_resources(device, list, + acpi_dev_filter_resource_type_cb, + (void *)flags); + if (ret < 0) + dev_warn(&device->dev, + "failed to parse _CRS method, error code %d\n", ret); + else if (ret == 0) + dev_dbg(&device->dev, + "no IO and memory resources present in _CRS\n"); + else { + resource_list_for_each_entry_safe(entry, tmp, list) { + if (entry->res->flags & IORESOURCE_IO) { + res = entry->res; + res->start = PFN_ALIGN(res->start); + res->end += 1; + res->end = PFN_ALIGN(res->end); + res->end -= 1; + if (!entry->offset) { + entry->offset = LOONGSON_LIO_BASE; + res->start |= LOONGSON_LIO_BASE; + res->end |= LOONGSON_LIO_BASE; + } + arch_pci_root_remap_iospace(&device->fwnode, + entry); + } + if (entry->res->flags & IORESOURCE_DISABLED) + resource_list_destroy_entry(entry); + else + entry->res->name = info->name; + } + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_MEM); + arch_pci_root_validate_resources(&device->dev, list, + IORESOURCE_IO); + } + + return ret; +} + static int acpi_prepare_root_resources(struct acpi_pci_root_info *ci) { int status; struct resource_entry *entry, *tmp; struct acpi_device *device = ci->bridge; - status = acpi_pci_probe_root_resources(ci); + status = arch_pci_probe_root_resources(ci); if (status > 0) { resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { if (entry->res->flags & IORESOURCE_MEM) { - entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); - entry->res->start |= entry->offset; - entry->res->end |= entry->offset; + if (!entry->offset) { + entry->offset = ci->root->mcfg_addr & GENMASK_ULL(63, 40); + entry->res->start |= entry->offset; + entry->res->end |= entry->offset; + } } } return status; diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 10b946e9c6e756e3d4623495a5613cdcd425e628..b7ff680cde9649a36350743d2ffe771564f4225b 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2312,7 +2312,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val, struct cpu_hw_events *cpuhw; cpuhw = this_cpu_ptr(&cpu_hw_events); power_pmu_bhrb_read(event, cpuhw); - perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack); + perf_sample_save_brstack(&data, event, &cpuhw->bhrb_stack, NULL); } if (event->attr.sample_type & PERF_SAMPLE_DATA_SRC && diff --git a/arch/sw_64/Kbuild b/arch/sw_64/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..aa0bf0507406c9790b10d0b0c0b6dd57d22286ae --- /dev/null +++ b/arch/sw_64/Kbuild @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += kernel/ mm/ platform/ +obj-$(CONFIG_NET) += net/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_MATHEMU) += math-emu/ + +obj-$(CONFIG_BUILTIN_DTB) += boot/dts/ diff --git a/arch/sw_64/Kconfig b/arch/sw_64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..62f655ceae3f709b0f151c89ed7241add7714d90 --- /dev/null +++ b/arch/sw_64/Kconfig @@ -0,0 +1,670 @@ +# SPDX-License-Identifier: GPL-2.0 +config SW64 + bool + default y + select ACPI + select ACPI_MCFG if (ACPI && PCI) + select ACPI_REDUCED_HARDWARE_ONLY + select ARCH_ATOMIC + select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_PHYS_TO_DMA + select ARCH_HAS_PMEM_API + select ARCH_HAS_PTE_DEVMAP + select ARCH_HAS_PTE_SPECIAL + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_UACCESS_FLUSHCACHE + select ARCH_HAS_VM_GET_PAGE_PROT + select ARCH_HAS_ZONE_DEVICE + select ARCH_HAVE_NMI_SAFE_CMPXCHG + select ARCH_INLINE_READ_LOCK + select ARCH_INLINE_READ_LOCK_BH + select ARCH_INLINE_READ_LOCK_IRQ + select ARCH_INLINE_READ_LOCK_IRQSAVE + select ARCH_INLINE_READ_UNLOCK + select ARCH_INLINE_READ_UNLOCK_BH + select ARCH_INLINE_READ_UNLOCK_IRQ + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE + select ARCH_INLINE_SPIN_LOCK + select ARCH_INLINE_SPIN_LOCK_BH + select ARCH_INLINE_SPIN_LOCK_IRQ + select ARCH_INLINE_SPIN_LOCK_IRQSAVE + select ARCH_INLINE_SPIN_TRYLOCK + select ARCH_INLINE_SPIN_TRYLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK + select ARCH_INLINE_SPIN_UNLOCK_BH + select ARCH_INLINE_SPIN_UNLOCK_IRQ + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + select ARCH_INLINE_WRITE_LOCK + select ARCH_INLINE_WRITE_LOCK_BH + select ARCH_INLINE_WRITE_LOCK_IRQ + select ARCH_INLINE_WRITE_LOCK_IRQSAVE + select ARCH_INLINE_WRITE_UNLOCK + select ARCH_INLINE_WRITE_UNLOCK_BH + select ARCH_INLINE_WRITE_UNLOCK_IRQ + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + select ARCH_NO_PREEMPT + select ARCH_SUPPORTS_ACPI + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_UPROBES + select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_QUEUED_RWLOCKS + select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_WANT_DEFAULT_BPF_JIT + select ARCH_WANT_FRAME_POINTERS + select ARCH_WANT_IPC_PARSE_VERSION + select AUDIT_ARCH + select COMMON_CLK + select DMA_OPS if PCI + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_LEGACY + select GENERIC_IRQ_MIGRATION if SMP + select GENERIC_IRQ_PROBE + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP if PCI + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL + select HANDLE_DOMAIN_IRQ + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_PREL32_RELOCATIONS + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select HAVE_ASM_MODVERSIONS + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_BUGVERBOSE + select HAVE_DYNAMIC_FTRACE + select HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_EBPF_JIT + select HAVE_FAST_GUP + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER + select HAVE_IDE + select HAVE_KPROBES + select HAVE_KPROBES_ON_FTRACE + select HAVE_KRETPROBES + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PCI + select HAVE_PCSPKR_PLATFORM + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_RELIABLE_STACKTRACE if STACKTRACE + select HAVE_RSEQ + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_FORCED_THREADING + select LOCK_MM_AND_FIND_VMA + select MEMORY_HOTPLUG_SPARSE if MEMORY_HOTPLUG + select MODULES_USE_ELF_RELA + select NO_BOOTMEM + select OF_EARLY_FLATTREE if OF + select OLD_SIGSUSPEND + select PCI_DOMAINS_GENERIC if PCI + select PCI_ECAM if (ACPI && PCI) + select PCI_MSI_ARCH_FALLBACKS if PCI_MSI + select PCI_SW64 if PCI + select SET_FS + select SPARSEMEM_EXTREME if SPARSEMEM + select SW64_IRQ_CPU + select SW64_IRQ_MSI if PCI_MSI + select SW64_IRQ_MSI_VT if PCI_MSI + select SW64_TIMER + select SWIOTLB + select THREAD_INFO_IN_TASK + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select IOMMU_DMA if IOMMU_SUPPORT + select ARCH_SUPPORTS_MEMORY_FAILURE + select HAVE_CONTEXT_TRACKING + select HAVE_NMI + select HAVE_DMA_CONTIGUOUS + +config LOCKDEP_SUPPORT + def_bool y + +config 64BIT + def_bool y + +config MMU + bool + default y + +config PGTABLE_LEVELS + int + default 4 + +config ARCH_SUPPORTS_HUGETLBFS + def_bool y + +config ARCH_ENABLE_MEMORY_HOTPLUG + bool + default y + +config ARCH_ENABLE_MEMORY_HOTREMOVE + bool + default y + +config ARCH_HAS_ILOG2_U32 + bool + default n + +config ARCH_HAS_ILOG2_U64 + bool + default n + +config GENERIC_GPIO + bool + +config GENERIC_CALIBRATE_DELAY + bool + default y + +config ZONE_DMA + bool "Support DMA zone" if EXPERT + default y + +config ZONE_DMA32 + bool + default y + +config NEED_DMA_MAP_STATE + def_bool y + +config NEED_SG_DMA_LENGTH + def_bool y + +config ARCH_WANT_HUGE_PMD_SHARE + def_bool y + +config GENERIC_ISA_DMA + bool + default y + +config NONCACHE_PAGE + bool + depends on SW64 + default y + +config AUDIT_ARCH + bool + +config SYS_HAS_EARLY_PRINTK + bool + +config HAVE_CSRRW + bool + +config ILLEGAL_POINTER_VALUE + hex + default 0xdead000000000000 + +menu "System setup" + +menu "Machine Configuration" + +choice + prompt "Subarchitecture Configuration" + +config SUBARCH_C3B + bool "C3B" + +config SUBARCH_C4 + bool "C4" + select HAVE_CSRRW + select GENERIC_SCHED_CLOCK +endchoice + +choice + prompt "Uncore Configuration" + +config UNCORE_XUELANG + bool "Uncore for C3B" + depends on SUBARCH_C3B + help + Sunway cpu uncore for C3B + +config UNCORE_JUNZHANG + bool "Uncore for C4" + depends on SUBARCH_C4 + help + Sunway cpu uncore for C4 +endchoice + +choice + prompt "Platform Type" + +config PLATFORM_XUELANG + bool "Xuelang" + depends on UNCORE_XUELANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + select SW64_INTC_V2 + select I2C_SUNWAY if I2C + help + Sunway board chipset for C3B + +config PLATFORM_JUNZHANG + bool "JunZhang" + depends on UNCORE_JUNZHANG + select SPARSE_IRQ + select SYS_HAS_EARLY_PRINTK + help + Sunway board chipset for C4 + +endchoice + +config MIGHT_HAVE_PC_SERIO + bool "Use PC serio device i8042" + select ARCH_MIGHT_HAVE_PC_SERIO + default n + +endmenu + +menu "CPU Power Management" +source "drivers/cpufreq/Kconfig" + +config SW64_CPUAUTOPLUG + bool "sw64 CPU Autoplug interface" + depends on SW64_CPUFREQ + default y + help + Turns on the interface for SW64_CPU CPUAUTOPLUG. + +endmenu +# clear all implied options (don't want default values for those): +# Most of these machines have ISA slots; not exactly sure which don't, +# and this doesn't activate hordes of code, so do it always. +config ISA + bool + default y + help + Find out whether you have ISA slots on your motherboard. ISA is the + name of a bus system, i.e. the way the CPU talks to the other stuff + inside your box. Other bus systems are PCI, EISA, MicroChannel + (MCA) or VESA. ISA is an older system, now being displaced by PCI; + newer boards don't support it. If you have ISA, say Y, otherwise N. + +config ISA_DMA_API + bool + default y + +config PCI_DOMAINS + def_bool PCI + +config PCI_DOMAINS_GENERIC + def_bool PCI + +config PCI_SYSCALL + def_bool PCI + +config IOMMU_HELPER + def_bool PCI + +config PHYSICAL_START + hex "Physical address where the kernel starts" + default "0x900000" + help + This gives the physical address where the kernel starts, and it + is 0x10000 before _text. If you plan to use kernel for capturing + the crash dump change this value to start of the reserved region + (the "X" value as specified in the "crashkernel=YM@XM" command + line boot parameter passed to the panic-ed kernel). + +config KEXEC + bool "Kexec system call (EXPERIMENTAL)" + select KEXEC_CORE + help + kexec is a system call that implements the ability to shutdown your + current kernel, and to start another kernel. It is like a reboot + but it is independent of the system firmware. And like a reboot + you can start any kernel with it, not just Linux. + + The name comes from the similarity to the exec system call. + + It is an ongoing process to be certain the hardware in a machine + is properly shutdown, so do not be surprised if this code does not + initially work for you. As of this writing the exact hardware + interface is strongly in flux, so no good recommendation can be + made. + +config CRASH_DUMP + bool "Kernel crash dumps (EXPERIMENTAL)" + help + Generate crash dump after being started by kexec. + This should be normally only set in special crash dump kernels + which are loaded in the main kernel with kexec-tools into + a specially reserved region and then later executed after + a crash by kdump/kexec. The crash dump kernel must be compiled + to a memory address not used by the main kernel or firmware using + PHYSICAL_START. + +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + +config GENERIC_HWEIGHT + bool + default y + +config SMP + bool "Symmetric multi-processing support" + depends on SW64 + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one CPU. If you have + a system with only one CPU, like most personal computers, say N. If + you have a system with more than one CPU, say Y. + + If you say N here, the kernel will run on single and multiprocessor + machines, but will use only one CPU of a multiprocessor machine. If + you say Y here, the kernel will run on many, but not all, + singleprocessor machines. On a singleprocessor machine, the kernel + will run faster if you say N here. + + See also the SMP-HOWTO available at + . + + If you don't know what to do here, say N. + +config ARCH_PROC_KCORE_TEXT + def_bool y + +config HAVE_DEC_LOCK + bool "Use arch-specified dec_and_lock" + depends on SMP && !NUMA + default y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + +config ARCH_SUPPORTS_UPROBES + def_bool y + +config SCHED_SMT + bool "SMT scheduler support" + depends on SMP && SUBARCH_C4 + help + Improves the CPU scheduler's decision making when dealing with + MultiThreading at a cost of slightly increased overhead in some + places. If unsure say N here. + +config SCHED_MC + bool "Multi-core scheduler support" + help + Multi-core scheduler support improves the CPU scheduler's decision + making when dealing with multi-core CPU chips at a cost of slightly + increased overhead in some places. If unsure say N here. + +config NR_CPUS + int "Maximum number of CPUs (2-256)" + range 2 256 + depends on SMP + default "64" if UNCORE_XUELANG + help + SW6 support can handle a maximum of 256 CPUs. + +config HOTPLUG_CPU + bool "Support for hot-pluggable CPUs" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + ( Note: power management support will enable this option + automatically on SMP systems. ) + Say N if you want to disable CPU hotplug. + +config ARCH_SPARSEMEM_ENABLE + bool "Sparse Memory Support" + depends on SMP + select SPARSEMEM_VMEMMAP_ENABLE + +source "kernel/livepatch/Kconfig" + +config NUMA + bool "NUMA Support" + depends on SMP && !FLATMEM + select ACPI_NUMA if ACPI + help + Say Y to compile the kernel to support NUMA (Non-Uniform Memory + Access). This option is for configuring high-end multiprocessor + server machines. If in doubt, say N. + +config USE_PERCPU_NUMA_NODE_ID + def_bool y + depends on NUMA + +config NODES_SHIFT + int + default "7" + depends on NUMA + +config RELOCATABLE + bool "Relocatable kernel" + help + This builds a kernel image that retains relocation information + so it can be loaded someplace besides the default 1MB. + The relocations make the kernel binary about 15% larger, + but are discarded at runtime + +config RELOCATION_TABLE_SIZE + hex "Relocation table size" + depends on RELOCATABLE + range 0x0 0x01000000 + default "0x80000" + help + A table of relocation data will be appended to the kernel binary + and parsed at boot to fix up the relocated kernel. + + This option allows the amount of space reserved for the table to be + adjusted, although the default of 1Mb should be ok in most cases. + + The build will fail and a valid size suggested if this is too small. + + If unsure, leave at the default value. + +config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on RELOCATABLE + help + Randomizes the physical and virtual address at which the + kernel image is loaded, as a security feature that + deters exploit attempts relying on knowledge of the location + of kernel internals. + + Entropy is generated using any coprocessor 0 registers available. + + The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET. + + If unsure, say N. + +config RANDOMIZE_BASE_MAX_OFFSET + hex "Maximum kASLR offset" if EXPERT + depends on RANDOMIZE_BASE + range 0x0 0x20000000 + default "0x10000000" + help + When kASLR is active, this provides the maximum offset that will + be applied to the kernel image. It should be set according to the + amount of physical RAM available in the target system minus + PHYSICAL_START and must be a power of 2. + + This is limited by the size of KTEXT space, 512Mb. The default is 256MB. + +config HZ + int "HZ of the short timer" + default 500 + +source "drivers/eisa/Kconfig" + +source "drivers/pcmcia/Kconfig" + +source "fs/Kconfig.binfmt" + +source "arch/sw_64/lib/Kconfig" + +endmenu + +menu "Boot options" + +config USE_OF + bool "Flattened Device Tree support" + select OF + select IRQ_DOMAIN + help + Include support for flattened device tree machine descriptions. + +config BUILTIN_DTB + bool "Embed DTB in kernel image" + depends on OF + default n + help + Embeds a device tree binary in the kernel image. + +config BUILTIN_DTB_NAME + string "Built in DTB" + depends on BUILTIN_DTB + help + Set the name of the DTB to embed, leave blank to pick one + automatically based on kernel configuration. + +config EFI + bool "UEFI runtime support" + select UCS2_STRING + select EFI_RUNTIME_WRAPPERS + default y + help + This option provides support for runtime services provided + by UEFI firmware (such as non-volatile variables, realtime + clock, and platform reset). A UEFI stub is also provided to + allow the kernel to be booted as an EFI application. This + is only useful on systems that have UEFI firmware. + +config DMI + bool "Enable support for SMBIOS (DMI) tables" + depends on EFI + default y + help + This enables SMBIOS/DMI feature for systems. + + This option is only useful on systems that have UEFI firmware. + However, even with this option, the resultant kernel should + continue to boot on existing non-UEFI platforms. + + NOTE: This does *NOT* enable or encourage the use of DMI quirks, + i.e., the practice of identifying the platform via DMI to + decide whether certain workarounds for buggy hardware and/or + firmware need to be enabled. This would require the DMI subsystem + to be enabled much earlier than we do on ARM, which is non-trivial. + +config CMDLINE_BOOL + bool "Built-in kernel command line" + help + Allow for specifying boot arguments to the kernel at + build time. On some systems (e.g. embedded ones), it is + necessary or convenient to provide some or all of the + kernel boot arguments with the kernel itself (that is, + to not rely on the boot loader to provide them.) + + To compile command line arguments into the kernel, + set this option to 'Y', then fill in the + boot arguments in CONFIG_CMDLINE. + + Systems with fully functional boot loaders (i.e. non-embedded) + should leave this option set to 'N'. + +config CMDLINE + string "Built-in kernel command string" + depends on CMDLINE_BOOL + default "" + help + Enter arguments here that should be compiled into the kernel + image and used at boot time. If the boot loader provides a + command line at boot time, it is appended to this string to + form the full kernel command line, when the system boots. + + However, you can use the CONFIG_CMDLINE_OVERRIDE option to + change this behavior. + + In most cases, the command line (whether built-in or provided + by the boot loader) should specify the device for the root + file system. + +config CMDLINE_OVERRIDE + bool "Built-in command line overrides boot loader arguments" + depends on CMDLINE_BOOL + help + Set this option to 'Y' to have the kernel ignore the boot loader + command line, and use ONLY the built-in command line. + + This is used to work around broken boot loaders. This should + be set to 'N' under normal conditions. + +config FORCE_MAX_ZONEORDER + int + default "16" if (HUGETLB_PAGE) + default "11" + help + The kernel memory allocator divides physically contiguous memory + blocks into "zones", where each zone is a power of two number of + pages. This option selects the largest power of two that the kernel + keeps in the memory allocator. If you need to allocate very large + blocks of physically contiguous memory, then you may need to + increase this value. + + This config option is actually maximum order plus one. For example, + a value of 11 means that the largest free memory block is 2^10 pages. + + We make sure that we can allocate up to a HugePage size for each configuration. + Hence we have : + MAX_ORDER = (PMD_SHIFT - PAGE_SHIFT) + 1 => PAGE_SHIFT - 2 + +endmenu + +source "drivers/firmware/Kconfig" + +menu "Power management options" + +source "kernel/power/Kconfig" + +source "drivers/acpi/Kconfig" + +config ARCH_SUSPEND_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_HIBERNATION_POSSIBLE + depends on SW64 + def_bool y + +config ARCH_SELECT_MEMORY_MODEL + def_bool ARCH_SPARSEMEM_ENABLE + +source "drivers/cpuidle/Kconfig" + +source "drivers/idle/Kconfig" + +endmenu + +source "arch/sw_64/kvm/Kconfig" diff --git a/arch/sw_64/Kconfig.debug b/arch/sw_64/Kconfig.debug new file mode 100644 index 0000000000000000000000000000000000000000..6cb3c2488b368e3acc38cd32b33fe658036ac8d6 --- /dev/null +++ b/arch/sw_64/Kconfig.debug @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +config EARLY_PRINTK + bool "Early printk" if EXPERT + depends on SYS_HAS_EARLY_PRINTK + default y + help + This option enables special console drivers which allow the kernel + to print messages very early in the bootup process. + + This is useful for kernel debugging when your machine crashes very + early before the console code is initialized. For normal operation, + it is not recommended because it looks ugly on some machines and + doesn't cooperate with an X server. You should normally say N here, + unless you want to debug such a crash. + +config UNA_PRINT + bool "Show debug info about user unalign memory access" + default n + +config MATHEMU + tristate "Kernel FP software completion" if DEBUG_KERNEL && !SMP + default y if !DEBUG_KERNEL || SMP + help + This option is required for IEEE compliant floating point arithmetic + on the SW. The only time you would ever not say Y is to say M in + order to debug the code. Say Y unless you know what you are doing. + +config STACKTRACE_SUPPORT + bool + default y + +config SW64_RRU + bool "Enable RRU(Remote Read User)" + depends on SW64 + default n + help + Duplicate user stdout and stderr to specific space. + Do not enable it in a production kernel. + +config SW64_RRK + bool "Enable RRK(Remote Read Kernel)" + depends on SW64 + default y + help + Duplicate kernel log to specific space. + Do not enable it in a production kernel. + +config DEBUG_MATCH + bool "instruction-flow and data-flow match debugfs interface" + depends on DEBUG_FS + default n + help + Turns on the DebugFS interface for instruction-flow and data-flow match. diff --git a/arch/sw_64/Makefile b/arch/sw_64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..84f0dca5e9f710596d5b36dbff995570c6ef304d --- /dev/null +++ b/arch/sw_64/Makefile @@ -0,0 +1,69 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# sw/Makefile +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Copyright (C) 1994 by Linus Torvalds +# + + +archscripts: scripts_basic + $(Q)$(MAKE) $(build)=arch/sw_64/tools relocs + +archheaders: + $(Q)$(MAKE) $(build)=arch/sw_64/kernel/syscalls all + +NM := $(NM) -B +CCVERSION := $(shell $(CC) -dumpversion) +LDFLAGS_vmlinux := -static -N #-relax +CHECKFLAGS += -D__sw__ + +ifeq ($(CONFIG_RELOCATABLE),y) +LDFLAGS_vmlinux += --emit-relocs +endif + +CHECKFLAGS += -D__sw__ +cflags-y := -pipe -ffixed-8 -mno-fp-regs #-msmall-data +ifeq ($(CONFIG_SUBARCH_C4),y) + cflags-y += -fsw-rev +endif +cflags-y += $(call cc-option, -fno-jump-tables) + +cflags-y += $(cpuflags-y) + +KBUILD_CFLAGS += $(cflags-y) +KBUILD_DEFCONFIG = xuelang_defconfig + +head-y := arch/sw_64/kernel/head.o + +core-y += arch/sw_64/ +drivers-$(CONFIG_PCI) += arch/sw_64/pci/ +libs-y += arch/sw_64/lib/ + +# export what is needed by arch/sw_64/boot/Makefile +LIBS_Y := $(patsubst %/, %/lib.a, $(libs-y)) +export LIBS_Y + +boot := arch/sw_64/boot + +#Default target when executing make with no arguments +all: $(boot)/vmlinux.bin.gz + +$(boot)/vmlinux.bin.gz: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $@ + +bootimage bootpfile bootpzfile: vmlinux + $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ + +archclean: + $(Q)$(MAKE) $(clean)=$(boot) + $(Q)$(MAKE) $(clean)=arch/sw_64/tools + +KBUILD_IMAGE := $(boot)/vmlinux.bin + +define archhelp + echo '* boot - Compressed kernel image (arch/sw_64/boot/vmlinux.bin.gz)' +endef diff --git a/arch/sw_64/Makefile.postlink b/arch/sw_64/Makefile.postlink new file mode 100644 index 0000000000000000000000000000000000000000..248844d141dd23098d874d219adc362ee21e99e9 --- /dev/null +++ b/arch/sw_64/Makefile.postlink @@ -0,0 +1,36 @@ +# SPDX-License-Identifier: GPL-2.0 +# =========================================================================== +# Post-link SW64 pass +# =========================================================================== +# +# 1. Insert relocations into vmlinux + +PHONY := __archpost +__archpost: + +-include include/config/auto.conf +include scripts/Kbuild.include + +CMD_RELOCS = arch/sw_64/tools/relocs +quiet_cmd_relocs = RELOCS $@ + cmd_relocs = $(CMD_RELOCS) $@ + +# `@true` prevents complaint when there is nothing to be done + +vmlinux: FORCE + @true +ifeq ($(CONFIG_RELOCATABLE),y) + $(call if_changed,relocs) +endif + +%.ko: FORCE + @true + +clean: + @true + +PHONY += FORCE clean + +FORCE: + +.PHONY: $(PHONY) diff --git a/arch/loongarch/include/uapi/asm/Kbuild b/arch/sw_64/boot/.gitignore similarity index 59% rename from arch/loongarch/include/uapi/asm/Kbuild rename to arch/sw_64/boot/.gitignore index 4aa680ca2e5fdf6407f8692264599da101d46ab3..8a90e24c76ab83a8c475375c2ecd5f070c5bbad5 100644 --- a/arch/loongarch/include/uapi/asm/Kbuild +++ b/arch/sw_64/boot/.gitignore @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 -generic-y += kvm_para.h +vmlinux diff --git a/arch/sw_64/boot/Makefile b/arch/sw_64/boot/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..dd09764846493072121e61e8630291d80c318232 --- /dev/null +++ b/arch/sw_64/boot/Makefile @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# arch/sw_64/boot/Makefile +# +# +# This file is subject to the terms and conditions of the GNU General Public +# License. See the file "COPYING" in the main directory of this archive +# for more details. +# +# Based on arch/arm64/boot/Makefile. +# + +OBJCOPYFLAGS_vmlinux.bin := -O binary + +targets := vmlinux vmlinux.bin vmlinux.bin.gz + +quiet_cmd_strip = STRIP $@ + cmd_strip = $(STRIP) -o $@ $< + +# Compressed kernel image +$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE + $(call if_changed,gzip) + @echo ' Kernel $@ is ready' + +$(obj)/vmlinux: vmlinux FORCE + $(call if_changed,strip) + +$(obj)/vmlinux.bin: $(obj)/vmlinux FORCE + $(call if_changed,objcopy) diff --git a/arch/sw_64/boot/dts/Makefile b/arch/sw_64/boot/dts/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e32c159cab641fb82d09f0e9eaeb428c7770f006 --- /dev/null +++ b/arch/sw_64/boot/dts/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# Built-in dtb + +ifeq ($(CONFIG_PLATFORM_XUELANG),y) +builtindtb-y := chip3 +endif + +ifeq ($(CONFIG_PLATFORM_JUNZHANG),y) +builtindtb-y := empty +endif + +ifeq ($(CONFIG_BUILTIN_DTB), y) +ifneq ($(CONFIG_BUILTIN_DTB_NAME),"") + builtindtb-y := $(patsubst "%",%,$(CONFIG_BUILTIN_DTB_NAME)) +endif + +obj-y += $(builtindtb-y).dtb.o +dtb-y := $(builtindtb-y).dtb + +# for CONFIG_OF_ALL_DTBS test +dtstree := $(srctree)/$(src) +dtb- := $(patsubst $(dtstree)/%.dts,%.dtb, $(wildcard $(dtstree)/*.dts)) +else +dtb-y := $(builtindtb-y).dtb +endif + +clean-files := *.dtb *.dtb.S diff --git a/arch/sw_64/boot/dts/chip3.dts b/arch/sw_64/boot/dts/chip3.dts new file mode 100644 index 0000000000000000000000000000000000000000..082506393ac98ffb1219d124029cafe8608ce4dd --- /dev/null +++ b/arch/sw_64/boot/dts/chip3.dts @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + clocks { + i2cclk: i2cclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "i2cclk_25mhz"; + }; + spiclk: spiclk { + compatible = "fixed-clock"; + clock-frequency = <25000000>; + #clock-cells = <0>; + clock-output-names = "spiclk_25mhz"; + }; + + }; + + intc: interrupt-controller { + compatible = "sw64,sw6_irq_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + lpc_intc: interrupt-controller@0x8037 { + compatible = "sw64,lpc_intc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&intc>; + interrupts = <2>; + }; + + uart: serial0@8033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x8033 0x0 0x0 0x1000>; + interrupt-parent=<&intc>; + interrupts = <3>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + serial1@9033 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-apb-uart"; + reg = <0x9033 0x0 0x0 0x1000>; + reg-shift = <9>; + reg-io-width = <4>; + clock-frequency = <24000000>; + status = "okay"; + }; + + + i2c0@0x8031 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,designware-i2c"; + reg = <0x8031 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <5>; + status = "okay"; + }; + + i2c1@0x8034 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8034 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <6>; + status = "okay"; + }; + + i2c2@0x8035 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0x8035 0x0 0x0 0x8000>; + clock-frequency = <100000>; + clocks = <&i2cclk>; + interrupt-parent=<&intc>; + interrupts = <7>; + status = "okay"; + + rtc: pcf8523@68 { + compatible = "nxp,pcf8523"; + reg = <0x68>; + }; + + lm75: at30tse752a@48 { + compatible = "microchip,tcn75"; + reg = <0x48>; + }; + }; + + pvt: pvt@0x8030 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw64,pvt-vol"; + reg = <0x8030 0x0 0x0 0x7c00>; + status = "okay"; + }; + + spi: spi@0x8032 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3-spi"; + reg = <0x8032 0x0 0x0 0x8000>; + clocks = <&spiclk>; + interrupt-parent=<&intc>; + interrupts = <4>; + status = "okay"; + + flash@0 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <0 0 0 0 >; /* 0: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares0"; + reg = <0 0x400000>; + }; + }; + }; + + flash@1 { + compatible = "winbond,w25q32dw", "jedec,spi-flash"; + spi-max-frequency = <25000000>; + m25p,fast-read; + spi-cpha; + spi-cpol; + poll_mode = <1>; /* poll_mode:1 interrupt mode: 0 */ + reg-io-width = <2>; + reg = <1 0 0 0 >; /* 1: flash chip selected bit */ + + partitions { + compatible = "fixed-partitions"; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "spares1"; + reg = <0 0x400000>; + }; + }; + }; + }; + + lpc: lpc@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sunway,chip3_lpc"; + reg = <0x8037 0x40000000 0x0 0x8000>; + status = "okay"; + + }; + + ipmi-kcs@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-kcs"; + reg = <0x8037 0x10000ca2 0x0 0x10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + ipmi-bt@0x8037 { + #address-cells = <2>; + #size-cells = <2>; + device_type = "ipmi"; + compatible = "ipmi-bt"; + reg = <0x8037 0x100000e4 0x0 0x10>; + interrupt-parent=<&lpc_intc>; + interrupts = <10>; + reg-size = <1>; + reg-spacing = <1>; + reg-shift = <0>; + status = "disabled"; + }; + + gpio: gpio@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "snps,sw-gpio"; + reg = <0x8036 0x0 0x0 0x8000>; + status = "okay"; + + porta: gpio-contraller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <8>; + reg = <0 0 0 0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent=<&intc>; + interrupts = <0>; + }; + }; + + }; +}; diff --git a/arch/sw_64/boot/dts/chip_vt.dts b/arch/sw_64/boot/dts/chip_vt.dts new file mode 100644 index 0000000000000000000000000000000000000000..f26285367f98c6715f3ff88eacb12496d12e7644 --- /dev/null +++ b/arch/sw_64/boot/dts/chip_vt.dts @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + compatible = "simple-bus"; + #address-cells = <2>; + #size-cells = <2>; + ranges; + + intc: interrupt-controller{ + compatible = "sw64,sw6_irq_vt_controller"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart: serial0@8801 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "ns16550a"; + reg = <0x8801 0x3f8 0x0 0x10>; + interrupt-parent=<&intc>; + interrupts = <12>; + reg-shift = <0>; + reg-io-width = <1>; + clock-frequency = <24000000>; + status = "okay"; + }; + misc: misc0@8036 { + #address-cells = <2>; + #size-cells = <2>; + compatible = "sw6,sunway-ged"; + reg = <0x8036 0x0 0x0 0x20>; + interrupt-parent=<&intc>; + interrupts = <13>; + reg-shift = <0>; + reg-io-width = <8>; + clock-frequency = <24000000>; + status = "okay"; + }; + fw_cfg: fw_cfg@8049 { + dma-coherent; + reg = <0x8049 0x20000000 0x0 0x18>; + compatible = "qemu,fw-cfg-mmio"; + }; + }; +}; diff --git a/arch/sw_64/boot/dts/empty.dts b/arch/sw_64/boot/dts/empty.dts new file mode 100644 index 0000000000000000000000000000000000000000..f8fe34e29641f78dfdc1cc163b63d9becfcaeacd --- /dev/null +++ b/arch/sw_64/boot/dts/empty.dts @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Default device tree; + */ + +/dts-v1/; +/ { + compatible = "sunway,chip3"; + model = "chip3"; + #address-cells = <2>; + #size-cells = <2>; + + soc { + }; +}; diff --git a/arch/sw_64/configs/anolis_xuelang_defconfig b/arch/sw_64/configs/anolis_xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..8c9c4dda69edd6dcf3ddf825baa5cf3c77f30982 --- /dev/null +++ b/arch/sw_64/configs/anolis_xuelang_defconfig @@ -0,0 +1,1104 @@ +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_LSM=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_NUMA_BALANCING=y +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +CONFIG_USER_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_KALLSYMS_ALL=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_PROFILING=y +CONFIG_KEXEC=y +CONFIG_CRASH_DUMP=y +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_SMP=y +CONFIG_SCHED_MC=y +CONFIG_NR_CPUS=256 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_LIVEPATCH=y +CONFIG_NUMA=y +CONFIG_HZ=100 +CONFIG_BINFMT_MISC=m +CONFIG_USE_OF=y +CONFIG_DMI_SYSFS=y +CONFIG_FW_CFG_SYSFS=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_HIBERNATION=y +CONFIG_PM_DEBUG=y +CONFIG_ACPI_SPCR_TABLE=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HED=y +CONFIG_ACPI_NFIT=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_JUMP_LABEL=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_ALL is not set +CONFIG_MODULE_SIG_SHA256=y +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_BLK_CGROUP_IOCOST=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_IOSCHED_BFQ=y +CONFIG_ZSWAP=y +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_COMPAT_BRK is not set +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_KSM=y +CONFIG_MEMORY_FAILURE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_USERFAULTFD=y +CONFIG_LRU_GEN=y +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +CONFIG_DAMON_DBGFS=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_BBR=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XTABLES=y +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_PE_SIP=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NFT_BRIDGE_REJECT=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_ATM=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_X25=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_DEFAULT_FQ_CODEL=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +CONFIG_NET_TC_SKB_EXT=y +CONFIG_DCB=y +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +CONFIG_NET_SWITCHDEV=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# CONFIG_WIRELESS is not set +CONFIG_RFKILL=m +CONFIG_NET_IFE=m +CONFIG_LWTUNNEL=y +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIE_DPC=y +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_STUB=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_CPCI=y +CONFIG_HOTPLUG_PCI_SHPC=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_CONNECTOR=y +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_NULL_BLK=m +CONFIG_ZRAM=m +CONFIG_ZRAM_WRITEBACK=y +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_PVPANIC=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_ISCSI_TCP=m +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_BE2ISCSI=m +CONFIG_SCSI_HPSA=m +CONFIG_SCSI_AHA152X=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_SMARTPQI=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_SCSI_FDOMAIN_ISA=m +CONFIG_SCSI_STEX=m +CONFIG_SCSI_SYM53C8XX_2=m +CONFIG_SCSI_QLOGIC_FAS=m +CONFIG_SCSI_QLOGIC_1280=m +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_SCSI_LPFC=m +CONFIG_SCSI_DEBUG=m +CONFIG_SCSI_PMCRAID=m +CONFIG_SCSI_PM8001=m +CONFIG_SCSI_BFA_FC=m +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +CONFIG_SATA_AHCI_PLATFORM=m +CONFIG_SATA_SIL24=m +CONFIG_ATA_GENERIC=m +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ATM_DRIVERS is not set +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_ET131X=m +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_TIGON3=m +CONFIG_BNX2X=m +CONFIG_BNXT=m +CONFIG_BNXT_DCB=y +CONFIG_THUNDER_NIC_PF=m +CONFIG_THUNDER_NIC_VF=m +CONFIG_CAVIUM_PTP=y +CONFIG_CHELSIO_T1=m +CONFIG_CHELSIO_T1_1G=y +# CONFIG_NET_VENDOR_CIRRUS is not set +CONFIG_DNET=m +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_DCB=y +CONFIG_IXGBEVF=m +CONFIG_I40E=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_FM10K=m +CONFIG_IGC=m +CONFIG_JME=m +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=m +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_CORE_IPOIB=y +CONFIG_MLXSW_CORE=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +CONFIG_QCA7000_SPI=m +CONFIG_QCOM_EMAC=m +CONFIG_RMNET=m +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_SFC=m +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_PHYLIB=y +CONFIG_MDIO_BUS_MUX_MULTIPLEXER=m +CONFIG_MDIO_BUS_MUX_MMIOREG=m +CONFIG_PPP=m +CONFIG_PPPOE=m +CONFIG_SLIP=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +CONFIG_SLIP_MODE_SLIP6=y +# CONFIG_WLAN is not set +CONFIG_WAN=y +CONFIG_NETDEVSIM=m +CONFIG_ISDN=y +CONFIG_INPUT_LEDS=m +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +CONFIG_SERIO_ARC_PS2=m +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_PCI=m +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_TCG_TIS=y +CONFIG_TCG_ATMEL=m +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_I2C_SMBUS=m +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_GPIO_GENERIC_PLATFORM=m +CONFIG_POWER_RESET=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=m +CONFIG_PMBUS=m +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_USER_SPACE=y +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +CONFIG_WATCHDOG_SYSFS=y +CONFIG_SOFT_WATCHDOG=m +CONFIG_I6300ESB_WDT=m +CONFIG_SSB=y +CONFIG_RC_CORE=m +CONFIG_DRM=m +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +# CONFIG_DRM_I2C_SIL164 is not set +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +CONFIG_DRM_NOUVEAU=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +CONFIG_FB=y +CONFIG_FB_EFI=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MODE_HELPERS=y +CONFIG_FB_TILEBLITTING=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +# CONFIG_HID_ITE is not set +CONFIG_HID_LOGITECH=m +# CONFIG_HID_REDRAGON is not set +# CONFIG_I2C_HID is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_ERDMA=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG_DATA=y +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_SYSTOHC is not set +CONFIG_RTC_DRV_DS1307=m +CONFIG_RTC_DRV_PCF8523=m +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=m +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +CONFIG_UIO_DMEM_GENIRQ=m +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_DAX=y +CONFIG_STM=m +CONFIG_EXT3_FS=y +CONFIG_EXT3_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_REISERFS_FS=m +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=y +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_F2FS_FS=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QFMT_V2=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +CONFIG_CACHEFILES=m +CONFIG_CACHEFILES_ONDEMAND=y +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +CONFIG_CRAMFS=m +CONFIG_SQUASHFS=m +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_LZ4=y +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +CONFIG_PSTORE=y +CONFIG_PSTORE_CONSOLE=y +CONFIG_PSTORE_RAM=y +CONFIG_EROFS_FS=m +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_SUNRPC_DEBUG=y +CONFIG_CEPH_FS=m +CONFIG_CEPH_FS_POSIX_ACL=y +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_DLM=m +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_BIG_KEYS=y +CONFIG_TRUSTED_KEYS=m +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +CONFIG_IMA=y +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_HASH_SHA256=y +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_APPRAISE=y +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_EVM=y +CONFIG_EVM_LOAD_X509=y +CONFIG_LSM="lockdown,yama,loadpin,safesetid,integrity,bpf" +CONFIG_BUG_ON_DATA_CORRUPTION=y +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_DH=m +CONFIG_CRYPTO_SM2=y +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4_GENERIC=y +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SM3_GENERIC=y +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_DEV_VIRTIO=m +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y +CONFIG_CRC_ITU_T=y +CONFIG_CRC7=m +CONFIG_CRC8=m +CONFIG_DMA_CMA=y +CONFIG_PRINTK_TIME=y +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_INFO_DWARF4=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_SECTION_MISMATCH=y +# CONFIG_FRAME_POINTER is not set +CONFIG_KGDB=y +CONFIG_KGDB_TESTS=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_KEYBOARD=y +CONFIG_DEBUG_SHIRQ=y +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_TIMEOUT=1 +CONFIG_SOFTLOCKUP_DETECTOR=y +CONFIG_SCHEDSTATS=y +CONFIG_DEBUG_LIST=y +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +# CONFIG_RCU_TRACE is not set +CONFIG_STACK_TRACER=y +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_HIST_TRIGGERS=y +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +CONFIG_TEST_BPF=m diff --git a/arch/sw_64/configs/junzhang_defconfig b/arch/sw_64/configs/junzhang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..4f25770ca19310643bf90898a478eb9d0922d1da --- /dev/null +++ b/arch/sw_64/configs/junzhang_defconfig @@ -0,0 +1,667 @@ +CONFIG_LOCALVERSION="-junzhang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SUBARCH_C4=y +CONFIG_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_NR_CPUS=64 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_guest_defconfig b/arch/sw_64/configs/kata_guest_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..8122155c127659ffdf6b0e0b0df3a2ce5f677cdf --- /dev/null +++ b/arch/sw_64/configs/kata_guest_defconfig @@ -0,0 +1,633 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_BPF_SYSCALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_BINFMT_MISC=y +CONFIG_USE_OF=y +CONFIG_SW64_BUILTIN_DTB=y +CONFIG_SW64_BUILTIN_DTB_NAME="chip_vt" +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_GOOGLE_FIRMWARE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_NONBOOT_CORE=y +CONFIG_SW64_SUSPEND_DEEPSLEEP_BOOTCORE=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=y +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=y +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_VIRTIO_VSOCKETS=y +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +# CONFIG_WIRELESS is not set +CONFIG_NET_9P=y +CONFIG_NET_9P_VIRTIO=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_OF_OVERLAY=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_DEVPORT is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_SUNWAY_SUPERIO_AST2400=y +CONFIG_DRM=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_INDEX=y +CONFIG_OVERLAY_FS_XINO_AUTO=y +CONFIG_OVERLAY_FS_METACOPY=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_SQUASHFS=y +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_9P_FS=y +CONFIG_9P_FSCACHE=y +CONFIG_9P_FS_POSIX_ACL=y +CONFIG_9P_FS_SECURITY=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/kata_xuelang_defconfig b/arch/sw_64/configs/kata_xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..f553f0e71dbf865ff8a49109e0843291b52a98f1 --- /dev/null +++ b/arch/sw_64/configs/kata_xuelang_defconfig @@ -0,0 +1,616 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_NAMESPACES=y +CONFIG_USER_NS=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +# CONFIG_COMPAT_BRK is not set +CONFIG_CPUFREQ_DEBUGFS=y +# CONFIG_LOCK_MEMB is not set +CONFIG_SMP=y +CONFIG_HOTPLUG_CPU=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +# CONFIG_SUSPEND is not set +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_VSOCK=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_BLK_DEV_THROTTLING=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_NETDEV=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_COUNTER=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_OBJREF=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_NFCT=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_FLOW_TABLE_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_NF_LOG_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_VLAN_FILTERING=y +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=y +CONFIG_VSOCKETS_DIAG=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +CONFIG_BPF_JIT=y +CONFIG_NET_DROP_MONITOR=m +# CONFIG_WIRELESS is not set +CONFIG_CAIF=m +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_POLLDEV=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_SUNWAY=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_SPIDEV=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_FIRMWARE_EDID=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +CONFIG_LOGO=y +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_STAGING=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_BTRFS_FS=m +CONFIG_BTRFS_FS_POSIX_ACL=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_AUTOFS4_FS=y +CONFIG_FUSE_FS=y +CONFIG_OVERLAY_FS=m +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/configs/xuelang_defconfig b/arch/sw_64/configs/xuelang_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..b1c0101d00897d0044498ff84411fff456fbed84 --- /dev/null +++ b/arch/sw_64/configs/xuelang_defconfig @@ -0,0 +1,668 @@ +CONFIG_LOCALVERSION="-xuelang" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_USELIB=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_IKHEADERS=y +CONFIG_LOG_BUF_SHIFT=18 +CONFIG_MEMCG=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CPUSETS=y +CONFIG_CGROUP_DEVICE=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_NAMESPACES=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +CONFIG_KALLSYMS_ALL=y +CONFIG_PERF_EVENTS=y +CONFIG_DEBUG_PERF_USE_VMALLOC=y +CONFIG_SMP=y +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_NUMA=y +CONFIG_HZ=100 +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_USE_OF=y +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMI_SYSFS=m +CONFIG_ACPI_TAD=y +# CONFIG_CPU_IDLE is not set +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +CONFIG_VHOST_CROSS_ENDIAN_LEGACY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODULE_FORCE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_PARTITION_ADVANCED=y +CONFIG_OSF_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +CONFIG_LDM_PARTITION=y +CONFIG_SGI_PARTITION=y +CONFIG_ULTRIX_PARTITION=y +# CONFIG_COMPAT_BRK is not set +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_CMA_AREAS=7 +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +CONFIG_XFRM_USER=m +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_STATISTICS=y +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_IP_MROUTE=y +CONFIG_NET_IPVTI=m +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_UDP_DIAG=m +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=m +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +CONFIG_IPV6_ILA=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_GRE=m +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +CONFIG_IPV6_SEG6_HMAC=y +CONFIG_NETFILTER=y +CONFIG_BRIDGE_NETFILTER=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +CONFIG_NETFILTER_XT_SET=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPCOMP=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_SET=m +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_LOG_ARP=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_TABLES_BRIDGE=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_CBS=m +CONFIG_NET_SCH_ETF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +CONFIG_NET_SCH_SKBPRIO=m +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=m +CONFIG_NET_SCH_CAKE=m +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +CONFIG_NET_SCH_DEFAULT=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=m +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +CONFIG_NET_ACT_SKBMOD=m +CONFIG_NET_ACT_IFE=m +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_IFE_SKBMARK=m +CONFIG_NET_IFE_SKBPRIO=m +CONFIG_NET_IFE_SKBTCINDEX=m +CONFIG_OPENVSWITCH=m +CONFIG_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_CGROUP_NET_PRIO=y +# CONFIG_WIRELESS is not set +CONFIG_PCI=y +CONFIG_PCIEPORTBUS=y +CONFIG_PCIEAER=y +# CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y +CONFIG_PCI_IOV=y +CONFIG_UEVENT_HELPER=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +CONFIG_MTD=y +CONFIG_MTD_CMDLINE_PARTS=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_JEDECPROBE=y +CONFIG_MTD_CFI_ADV_OPTIONS=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_ROM=y +CONFIG_MTD_ABSENT=y +CONFIG_MTD_COMPLEX_MAPPINGS=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_PLATRAM=y +CONFIG_MTD_SPI_NOR=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_BLK_DEV_RAM_SIZE=5000000 +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_NVME=y +CONFIG_NVME_MULTIPATH=y +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=y +CONFIG_NVME_TARGET=y +CONFIG_NVME_TARGET_LOOP=y +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=y +CONFIG_NVME_TARGET_FCLOOP=y +CONFIG_RAID_ATTRS=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_ST=y +CONFIG_BLK_DEV_SR=y +CONFIG_CHR_DEV_SG=y +CONFIG_CHR_DEV_SCH=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_SPI_ATTRS=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=y +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SRP_ATTRS=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=y +CONFIG_SCSI_CXGB3_ISCSI=m +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +CONFIG_ATA=y +CONFIG_SATA_AHCI=y +# CONFIG_ATA_SFF is not set +CONFIG_MD=y +CONFIG_MD_LINEAR=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BCACHE=m +CONFIG_BCACHE_DEBUG=y +CONFIG_BCACHE_CLOSURES_DEBUG=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING=y +CONFIG_DM_DEBUG_BLOCK_STACK_TRACING=y +CONFIG_DM_UNSTRIPED=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_WRITECACHE=m +CONFIG_DM_ERA=m +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +CONFIG_DM_DELAY=m +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +CONFIG_DM_VERITY_FEC=y +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +CONFIG_ISCSI_TARGET=m +CONFIG_NET_FC=y +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_VIRTIO_NET=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_CAVIUM_PTP=y +# CONFIG_NET_VENDOR_CIRRUS is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_E100=y +CONFIG_E1000=y +CONFIG_E1000E=y +CONFIG_IGB=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBEVF=m +CONFIG_I40E=y +CONFIG_I40EVF=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_MLX4_EN=y +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLXSW_CORE=y +CONFIG_MLXSW_PCI=y +CONFIG_MLXSW_I2C=y +CONFIG_MLXSW_MINIMAL=y +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_ROCKER is not set +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +CONFIG_INPUT_FF_MEMLESS=y +CONFIG_INPUT_MOUSEDEV=y +CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_SERIAL_8250_PCI is not set +CONFIG_SERIAL_8250_SUNWAY=y +CONFIG_SERIAL_OF_PLATFORM=y +CONFIG_VIRTIO_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MUX=y +CONFIG_SPI=y +CONFIG_SPI_CHIP3=y +CONFIG_SPI_SPIDEV=y +CONFIG_SENSORS_PVT=y +CONFIG_SENSORS_LM75=y +CONFIG_SSB=y +CONFIG_DRM=y +CONFIG_DRM_RADEON=y +CONFIG_DRM_AST=y +CONFIG_DRM_VIRTIO_GPU=y +CONFIG_LCD_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +CONFIG_INFINIBAND_MTHCA=m +# CONFIG_INFINIBAND_MTHCA_DEBUG is not set +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_RTC_CLASS=y +# CONFIG_RTC_NVMEM is not set +# CONFIG_RTC_INTF_PROC is not set +CONFIG_RTC_DRV_PCF8523=y +CONFIG_UIO=y +CONFIG_UIO_PCI_GENERIC=m +CONFIG_VIRTIO_PCI=y +# CONFIG_VIRTIO_PCI_LEGACY is not set +CONFIG_VIRTIO_MMIO=y +CONFIG_STAGING=y +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_SUNWAY_IOMMU=y +CONFIG_SW64_LPC_INTC=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_XFS_FS=y +CONFIG_GFS2_FS=y +CONFIG_FANOTIFY=y +CONFIG_QUOTA=y +CONFIG_FUSE_FS=y +CONFIG_FSCACHE=y +CONFIG_ISO9660_FS=y +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=y +CONFIG_MSDOS_FS=y +CONFIG_VFAT_FS=y +CONFIG_FAT_DEFAULT_UTF8=y +CONFIG_NTFS_FS=y +CONFIG_NTFS_RW=y +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_HUGETLBFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=y +CONFIG_NFS_SWAP=y +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_NFS_V4_1_MIGRATION=y +CONFIG_ROOT_NFS=y +CONFIG_NFS_FSCACHE=y +CONFIG_NFS_USE_LEGACY_DNS=y +CONFIG_NFSD=m +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_SCSILAYOUT=y +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=y +CONFIG_NLS_CODEPAGE_950=y +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=y +CONFIG_SECURITY=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_PATH=y +CONFIG_CRYPTO_AUTHENC=y +CONFIG_CRYPTO_AES=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_SEQIV=y +CONFIG_CRYPTO_ECHAINIV=y +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_HW is not set +CONFIG_CONSOLE_LOGLEVEL_QUIET=7 +# CONFIG_FRAME_POINTER is not set +CONFIG_SCHEDSTATS=y +# CONFIG_RCU_TRACE is not set diff --git a/arch/sw_64/include/asm/Kbuild b/arch/sw_64/include/asm/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..0dd0a704d8f157b5aebd4e9c4715e28c7227d006 --- /dev/null +++ b/arch/sw_64/include/asm/Kbuild @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +generic-y += clkdev.h +generic-y += export.h +generic-y += kvm_types.h +generic-y += mcs_spinlock.h +generic-y += param.h +generic-y += qrwlock.h +generic-y += qspinlock.h +generic-y += rwsem.h +generic-y += seccomp.h +generic-y += segment.h +generic-y += types.h +generic-y += user.h + +generated-y += syscall_table.h diff --git a/arch/sw_64/include/asm/acenv.h b/arch/sw_64/include/asm/acenv.h new file mode 100644 index 0000000000000000000000000000000000000000..53b2898718fe9d08ad95facb8f8b58cd36aa674d --- /dev/null +++ b/arch/sw_64/include/asm/acenv.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACENV_H +#define _ASM_SW64_ACENV_H + +#define COMPILER_DEPENDENT_INT64 long +#define COMPILER_DEPENDENT_UINT64 unsigned long + +/* + * Calling conventions: + * + * ACPI_SYSTEM_XFACE - Interfaces to host OS (handlers, threads) + * ACPI_EXTERNAL_XFACE - External ACPI interfaces + * ACPI_INTERNAL_XFACE - Internal ACPI interfaces + * ACPI_INTERNAL_VAR_XFACE - Internal variable-parameter list interfaces + */ +#define ACPI_SYSTEM_XFACE +#define ACPI_EXTERNAL_XFACE +#define ACPI_INTERNAL_XFACE +#define ACPI_INTERNAL_VAR_XFACE + +/* Asm macros */ +#define ACPI_FLUSH_CPU_CACHE() + +int __acpi_acquire_global_lock(unsigned int *lock); +int __acpi_release_global_lock(unsigned int *lock); + +#define ACPI_ACQUIRE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_acquire_global_lock(&facs->global_lock)) + +#define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ + ((Acq) = __acpi_release_global_lock(&facs->global_lock)) + +/* + * Math helper asm macros + */ +#define ACPI_DIV_64_BY_32(n_hi, n_lo, d32, q32, r32) + +#define ACPI_SHIFT_RIGHT_64(n_hi, n_lo) +#endif /* _ASM_SW64_ACENV_H */ diff --git a/arch/sw_64/include/asm/acpi.h b/arch/sw_64/include/asm/acpi.h new file mode 100644 index 0000000000000000000000000000000000000000..ef46f481e1fdffa89929a0857e85b3d5055c5bad --- /dev/null +++ b/arch/sw_64/include/asm/acpi.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_ACPI_H +#define _ASM_SW64_ACPI_H + +#include +#include +#include +#include + +#ifdef CONFIG_ACPI +extern int acpi_noirq; +extern int acpi_strict; +extern int acpi_disabled; +extern int acpi_pci_disabled; + +/* _ASM_SW64_PDC_H */ +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#define ACPI_TABLE_UPGRADE_MAX_PHYS MEMBLOCK_ALLOC_ACCESSIBLE + +/** + * Use the number 64 is just because this number is the most + * frequently used number in other architectures. Actually, + * SW64 does not have fixmap area in memory layout. + */ +#define NR_FIX_BTMAPS 64 + +static inline void disable_acpi(void) +{ + acpi_disabled = 1; + acpi_pci_disabled = 1; + acpi_noirq = 1; +} + +static inline void enable_acpi(void) +{ + acpi_disabled = 0; + acpi_pci_disabled = 0; + acpi_noirq = 0; +} + +static inline void acpi_noirq_set(void) +{ + acpi_noirq = 1; +} + +static inline void acpi_disable_pci(void) +{ + acpi_pci_disabled = 1; + acpi_noirq_set(); +} + +static inline bool acpi_has_cpu_in_madt(void) +{ + return true; +} + +/* Low-level suspend routine. */ +extern int (*acpi_suspend_lowlevel)(void); +extern unsigned long long arch_acpi_wakeup_start; + +/* Physical address to resume after wakeup */ +#define acpi_wakeup_address arch_acpi_wakeup_start + +/* + * Check if the CPU can handle C2 and deeper + */ +static inline unsigned int acpi_processor_cstate_check(unsigned int max_cstate) +{ + return max_cstate; +} + +static inline bool arch_has_acpi_pdc(void) +{ + return false; +} + +static inline void arch_acpi_set_pdc_bits(u32 *buf) +{ +} +#else /* !CONFIG_ACPI */ + +static inline void acpi_noirq_set(void) { } +static inline void acpi_disable_pci(void) { } +static inline void disable_acpi(void) { } + +#endif /* !CONFIG_ACPI */ + +#define acpi_unlazy_tlb(x) +#endif /* _ASM_SW64_ACPI_H */ diff --git a/arch/sw_64/include/asm/asm-offsets.h b/arch/sw_64/include/asm/asm-offsets.h new file mode 100644 index 0000000000000000000000000000000000000000..d370ee36a182ba510c28459f856b17f321bd57fc --- /dev/null +++ b/arch/sw_64/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include diff --git a/arch/sw_64/include/asm/asm-prototypes.h b/arch/sw_64/include/asm/asm-prototypes.h new file mode 100644 index 0000000000000000000000000000000000000000..67746d6bffb725b41d5997f983cded406e63573a --- /dev/null +++ b/arch/sw_64/include/asm/asm-prototypes.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ASM_PROTOTYPES_H +#define _ASM_SW64_ASM_PROTOTYPES_H + +#include +#include +#include +#include +#include + +#include + +extern void __divl(void); +extern void __reml(void); +extern void __divw(void); +extern void __remw(void); +extern void __divlu(void); +extern void __remlu(void); +extern void __divwu(void); +extern void __remwu(void); + +#endif /* _ASM_SW64_ASM_PROTOTYPES_H */ diff --git a/arch/sw_64/include/asm/ast2400.h b/arch/sw_64/include/asm/ast2400.h new file mode 100644 index 0000000000000000000000000000000000000000..5f4cc84ff3a8cb729ce2d59c796274b548ccd7c9 --- /dev/null +++ b/arch/sw_64/include/asm/ast2400.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015 Weiqiang Su + * + * Both AST2400D and AST2400F package variants are supported. + */ + +#ifndef _ASM_SW64_AST2400_H +#define _ASM_SW64_AST2400_H + +#include + +/* Logical Device Numbers (LDN). */ +#define AST2400_FDC 0x00 /* Floppy */ +#define AST2400_PP 0x01 /* Parallel port */ +#define AST2400_SP1 0x02 /* Com1 */ +#define AST2400_SP2 0x03 /* Com2 & IR */ +#define AST2400_KBC 0x05 /* PS/2 keyboard and mouse */ +#define AST2400_CIR 0x06 +#define AST2400_GPIO6789_V 0x07 +#define AST2400_WDT1_GPIO01A_V 0x08 +#define AST2400_GPIO1234567_V 0x09 +#define AST2400_ACPI 0x0A +#define AST2400_HWM_FPLED 0x0B /* Hardware monitor & front LED */ +#define AST2400_VID 0x0D +#define AST2400_CIRWKUP 0x0E /* CIR wakeup */ +#define AST2400_GPIO_PP_OD 0x0F /* GPIO Push-Pull/Open drain select */ +#define AST2400_SVID 0x14 +#define AST2400_DSLP 0x16 /* Deep sleep */ +#define AST2400_GPIOA_LDN 0x17 + +/* virtual LDN for GPIO and WDT */ +#define AST2400_WDT1 ((0 << 8) | AST2400_WDT1_GPIO01A_V) + +#define AST2400_GPIOBASE ((0 << 8) | AST2400_WDT1_GPIO01A_V) //? + +#define AST2400_GPIO0 ((1 << 8) | AST2400_WDT1_GPIO01A_V) +#define AST2400_GPIO1 ((1 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO2 ((2 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO3 ((3 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO4 ((4 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO5 ((5 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO6 ((6 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO7 ((7 << 8) | AST2400_GPIO1234567_V) +#define AST2400_GPIO8 ((0 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIO9 ((1 << 8) | AST2400_GPIO6789_V) +#define AST2400_GPIOA ((2 << 8) | AST2400_WDT1_GPIO01A_V) + +#define SUPERIO_PNP_PORT 0x2E +#define SUPERIO_CHIPID 0xC333 + +struct device_operations; +typedef struct pnp_device { + unsigned int port; + unsigned int device; + + struct device_operations *ops; +} *device_t; + +struct pnp_mode_ops { + void (*enter_conf_mode)(device_t dev); + void (*exit_conf_mode)(device_t dev); +}; + + +struct device_operations { + void (*read_resources)(device_t dev); + void (*set_resources)(device_t dev); + void (*enable_resources)(device_t dev); + void (*init)(device_t dev); + void (*final)(device_t dev); + void (*enable)(device_t dev); + void (*disable)(device_t dev); + + const struct pnp_mode_ops *ops_pnp_mode; +}; + +/* PNP helper operations */ +struct io_info { + unsigned int mask, set; +}; + +struct pnp_info { + bool enabled; /* set if we should enable the device */ + struct pnp_device pnp_device; + unsigned int function; /* Must be at least 16 bits (virtual LDNs)! */ +}; + +/* Chip operations */ +struct chip_operations { + void (*enable_dev)(struct device *dev); + void (*init)(void *chip_info); + void (*final)(void *chip_info); + unsigned int initialized : 1; + unsigned int finalized : 1; + const char *name; +}; + +typedef struct superio_ast2400_device { + struct device *dev; + const char *name; + unsigned int enabled : 1; /* set if we should enable the device */ + unsigned int superio_ast2400_efir; /* extended function index register */ + unsigned int superio_ast2400_efdr; /* extended function data register */ + struct chip_operations *chip_ops; + const void *chip_info; +} *superio_device_t; + + +static inline void pnp_enter_conf_mode_a5a5(device_t dev) +{ + outb(0xa5, dev->port); + outb(0xa5, dev->port); +} + +static inline void pnp_exit_conf_mode_aa(device_t dev) +{ + outb(0xaa, dev->port); +} + +/* PNP config mode wrappers */ + +static inline void pnp_enter_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->enter_conf_mode(dev); +} + +static inline void pnp_exit_conf_mode(device_t dev) +{ + if (dev->ops->ops_pnp_mode) + dev->ops->ops_pnp_mode->exit_conf_mode(dev); +} + +/* PNP device operations */ +static inline u8 pnp_read_config(device_t dev, u8 reg) +{ + outb(reg, dev->port); + return inb(dev->port + 1); +} + +static inline void pnp_write_config(device_t dev, u8 reg, u8 value) +{ + outb(reg, dev->port); + outb(value, dev->port + 1); +} + +static inline void pnp_set_logical_device(device_t dev) +{ + pnp_write_config(dev, 0x07, dev->device & 0xff); +// pnp_write_config(dev, 0x07, 0x3); +} + +static inline void pnp_set_enable(device_t dev, int enable) +{ + u8 tmp; + + tmp = pnp_read_config(dev, 0x30); + + if (enable) + tmp |= 1; + else + tmp &= ~1; + + pnp_write_config(dev, 0x30, tmp); +} + +#endif /* _ASM_SW64_AST2400_H */ diff --git a/arch/sw_64/include/asm/atomic.h b/arch/sw_64/include/asm/atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..4a68da09722c352f136ec591df023413522fcfcc --- /dev/null +++ b/arch/sw_64/include/asm/atomic.h @@ -0,0 +1,547 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ATOMIC_H +#define _ASM_SW64_ATOMIC_H + +#include +#include +#include + +/* + * Atomic operations that C can't guarantee us. Useful for + * resource counting etc... + * + * But use these as seldom as possible since they are much slower + * than regular operations. + */ + +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } + +#define arch_atomic_read(v) READ_ONCE((v)->counter) +#define arch_atomic64_read(v) READ_ONCE((v)->counter) + +#define arch_atomic_set(v, i) WRITE_ONCE((v)->counter, (i)) +#define arch_atomic64_set(v, i) WRITE_ONCE((v)->counter, (i)) + +/* + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + */ +#define arch_atomic64_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new)) + +#define arch_atomic_cmpxchg(v, old, new) (arch_cmpxchg(&((v)->counter), old, new)) +#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new)) + + +#ifdef CONFIG_SUBARCH_C3B +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " seleq %4, 1, $31, %4\n" + " wr_f %4\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %4, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " seleq %0, 1, $31, %0\n" + " wr_f %0\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %0, 2f\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + + + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, temp2, addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %0, 0(%3)\n" \ + " rd_f %0\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " ldi %1, 1\n" \ + " wr_f %1\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: br 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#else /* !CONFIG_SUBARCH_C3B */ + +/** + * arch_atomic_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addw %0, %6, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/** + * arch_atomic64_fetch_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns the old value of @v. + */ +static inline long arch_atomic64_fetch_add_unless(atomic64_t *v, long a, long u) +{ + long old, new, c; + unsigned long addr; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %4\n" + " bne %4, 2f\n" + " addl %0, %6, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (old), "=&r" (new), "=m" (v->counter), "=&r" (addr), "=&r" (c) + : "Ir" (u), "Ir" (a), "m" (v->counter)); + return old; +} + +/* + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive + * @v: pointer of type atomic_t + * + * The function returns the old value of *v minus 1, even if + * the atomic variable, v, was not decremented. + */ +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + unsigned long old, temp1, addr, temp2; + + __asm__ __volatile__( + " ldi %3, %2\n" + "1: lldl %4, 0(%3)\n" + " cmple %4, 0, %0\n" + " bne %0, 2f\n" + " subl %4, 1, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr), "=&r" (old) + : "m" (v->counter)); + return old - 1; +} + +#define ATOMIC_OP(op, asm_op) \ +static inline void arch_atomic_##op(int i, atomic_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldw %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstw %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC_OP_RETURN(op, asm_op) \ +static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#define ATOMIC_FETCH_OP(op, asm_op) \ +static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ +{ \ + int temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldw %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstw %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + + +#define ATOMIC64_OP(op, asm_op) \ +static inline void arch_atomic64_##op(long i, atomic64_t *v) \ +{ \ + unsigned long temp1, addr; \ + __asm__ __volatile__( \ + " ldi %2, %1\n" \ + "1: lldl %0, 0(%2)\n" \ + " " #asm_op " %0, %3, %0\n" \ + " lstl %0, 0(%2)\n" \ + " beq %0, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ +} \ + + +#define ATOMIC64_OP_RETURN(op, asm_op) \ +static inline long arch_atomic64_##op##_return_relaxed(long i, atomic64_t *v)\ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " " #asm_op " %0, %4, %0\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} + +#define ATOMIC64_FETCH_OP(op, asm_op) \ +static inline long arch_atomic64_fetch_##op##_relaxed(long i, atomic64_t *v) \ +{ \ + long temp1, temp2; \ + unsigned long addr; \ + __asm__ __volatile__( \ + " ldi %3, %2\n" \ + "1: lldl %0, 0(%3)\n" \ + " " #asm_op " %0, %4, %1\n" \ + " lstl %1, 0(%3)\n" \ + " beq %1, 2f\n" \ + ".subsection 2\n" \ + "2: lbr 1b\n" \ + ".previous" \ + : "=&r" (temp1), "=&r" (temp2), "=m" (v->counter), "=&r" (addr) \ + : "Ir" (i), "m" (v->counter)); \ + return temp1; \ +} \ + +#endif /* CONFIG_SUBARCH_C3B */ + +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive + +#define ATOMIC_OPS(op) \ + ATOMIC_OP(op, op##w) \ + ATOMIC_OP_RETURN(op, op##w) \ + ATOMIC_FETCH_OP(op, op##w) \ + ATOMIC64_OP(op, op##l) \ + ATOMIC64_OP_RETURN(op, op##l) \ + ATOMIC64_FETCH_OP(op, op##l) \ + +ATOMIC_OPS(add) +ATOMIC_OPS(sub) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed + +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed + + + + +#undef ATOMIC_OPS + +#define ATOMIC_OPS(op, asm) \ + ATOMIC_OP(op, asm) \ + ATOMIC_FETCH_OP(op, asm) \ + ATOMIC64_OP(op, asm) \ + ATOMIC64_FETCH_OP(op, asm) \ + + +ATOMIC_OPS(and, and) +ATOMIC_OPS(andnot, bic) +ATOMIC_OPS(or, bis) +ATOMIC_OPS(xor, xor) + + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed + +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed + + +#undef ATOMIC_OPS +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_OP_RETURN +#undef ATOMIC64_OP +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN +#undef ATOMIC_OP + +#define arch_atomic_andnot arch_atomic_andnot +#define arch_atomic64_andnot arch_atomic64_andnot + +#endif /* _ASM_SW64_ATOMIC_H */ diff --git a/arch/sw_64/include/asm/barrier.h b/arch/sw_64/include/asm/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..bff199126c9fa49c44028fab3d4a10e0a18ce136 --- /dev/null +++ b/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BARRIER_H +#define _ASM_SW64_BARRIER_H + +#include + +#define mb() __asm__ __volatile__("memb" : : : "memory") + +#define rmb() __asm__ __volatile__("memb" : : : "memory") + +#if defined(CONFIG_SUBARCH_C3B) +#define wmb() __asm__ __volatile__("memb" : : : "memory") +#elif defined(CONFIG_SUBARCH_C4) +#define wmb() __asm__ __volatile__("wmemb" : : : "memory") +#endif + +#define imemb() __asm__ __volatile__("imemb" : : : "memory") + +#ifdef CONFIG_SMP +#define __ASM_SMP_MB "\tmemb\n" +#else +#define __ASM_SMP_MB +#endif + +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() + +#include + +#endif /* _ASM_SW64_BARRIER_H */ diff --git a/arch/sw_64/include/asm/bitops.h b/arch/sw_64/include/asm/bitops.h new file mode 100644 index 0000000000000000000000000000000000000000..b3cdabd95abfc0b4d49591d5b1f9d5e7fefe230a --- /dev/null +++ b/arch/sw_64/include/asm/bitops.h @@ -0,0 +1,566 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BITOPS_H +#define _ASM_SW64_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error only can be included directly +#endif + +#include +#include + +#ifdef CONFIG_SUBARCH_C3B +/* + * These have to be done with inline assembly: that way the bit-setting + * is guaranteed to be atomic. All bit operations return 0 if the bit + * was cleared before the operation and != 0 if it was not. + * + * To get proper branch prediction for the main line, we must branch + * forward to code at the end of this object's .text section, then + * branch back to restart the operation. + * + * bit 0 is the LSB of addr; bit 64 is the LSB of (addr+1). + */ + +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " seleq %3, 1, $31, %1\n" + " wr_f %1\n" + " bis %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " bne %3, 2f\n" // %3 is not zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, temp2, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %4, %6\n" + "1: lldw %0, 0(%4)\n" + " and %0, %5, %3\n" + " selne %3, 1, $31, %1\n" //Note: here is SELNE!!! + " wr_f %1\n" + " bic %0, %5, %0\n" + " lstw %0, 0(%4)\n" + " rd_f %0\n" + " beq %3, 2f\n" // %3 is zero, no need to set, return + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp1), "=&r" (temp2), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %2, 1\n" + " wr_f %2\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " rd_f %0\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +#else /* !CONFIG_SUBARCH_C3B */ +static inline void +set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bis %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " bic %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline void +change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %2, %4\n" + "1: lldw %0, 0(%2)\n" + " xor %0, %3, %0\n" + " lstw %0, 0(%2)\n" + " beq %0, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m)); +} + +static inline int +test_and_set_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " bne %2, 2f\n" // %2 is not zero, no need to set, return + " bis %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_clear_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp1, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " beq %2, 2f\n" // %2 is zero, no need to set, return + " bic %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" // failed to set, try again. + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp1), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_change_bit(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp, base; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " and %0, %4, %2\n" + " xor %0, %4, %0\n" + " lstw %0, 0(%3)\n" + " beq %0, 3f\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (temp), "=m" (*m), "=&r" (oldbit), "=&r" (base) + : "Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + + +#endif /* CONFIG_SUBARCH_C3B */ + +/* + * WARNING: non atomic version. + */ +static __always_inline void +arch___set_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m |= 1 << (nr & 31); +} + +#define smp_mb__before_clear_bit() smp_mb() +#define smp_mb__after_clear_bit() smp_mb() + +static inline void +clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + +static __always_inline void +arch___clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m &= ~(1 << (nr & 31)); +} + +static inline void +__clear_bit_unlock(unsigned long nr, volatile void *addr) +{ + smp_mb(); + arch___clear_bit(nr, addr); +} + +static __always_inline void +arch___change_bit(unsigned long nr, volatile unsigned long *addr) +{ + int *m = ((int *) addr) + (nr >> 5); + + *m ^= 1 << (nr & 31); +} + +static __always_inline bool +arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old | mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old & ~mask; + return (old & mask) != 0; +} + +static __always_inline bool +arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr) +{ + unsigned long mask = 1 << (nr & 0x1f); + int *m = ((int *) addr) + (nr >> 5); + int old = *m; + + *m = old ^ mask; + return (old & mask) != 0; +} + +#define arch_test_bit generic_test_bit +#define arch_test_bit_acquire generic_test_bit_acquire + +/* + * ffz = Find First Zero in word. Undefined if no zero exists, + * so code should check against ~0UL first.. + * + * Do a binary search on the bits. Due to the nature of large + * constants on the sw64, it is worthwhile to split the search. + */ +static inline unsigned long ffz_b(unsigned long x) +{ + unsigned long sum, x1, x2, x4; + + x = ~x & -~x; /* set first 0 bit, clear others */ + x1 = x & 0xAA; + x2 = x & 0xCC; + x4 = x & 0xF0; + sum = x2 ? 2 : 0; + sum += (x4 != 0) * 4; + sum += (x1 != 0); + + return sum; +} + +static inline unsigned long ffz(unsigned long word) +{ + return __kernel_cttz(~word); +} + +/* + * __ffs = Find First set bit in word. Undefined if no set bit exists. + */ +static inline unsigned long __ffs(unsigned long word) +{ + return __kernel_cttz(word); +} + +#ifdef __KERNEL__ + +/* + * ffs: find first bit set. This is defined the same way as + * the libc and compiler builtin ffs routines, therefore + * differs in spirit from the above __ffs. + */ + +static inline int ffs(int word) +{ + int result = __ffs(word) + 1; + + return word ? result : 0; +} + +/* + * fls: find last bit set. + */ +static inline int fls64(unsigned long word) +{ + return 64 - __kernel_ctlz(word); +} + +static inline unsigned long __fls(unsigned long x) +{ + return fls64(x) - 1; +} + +static inline int fls(int x) +{ + return fls64((unsigned int) x); +} + +/* + * hweightN: returns the hamming weight (i.e. the number + * of bits set) of a N-bit word + */ + +static inline unsigned long __arch_hweight64(unsigned long w) +{ + return __kernel_ctpop(w); +} + +static inline unsigned int __arch_hweight32(unsigned int w) +{ + return __arch_hweight64(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight64(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight64(w & 0xff); +} + +#include + +#endif /* __KERNEL__ */ + +#ifdef __KERNEL__ + +/* + * Every architecture must define this function. It's the fastest + * way of searching a 100-bit bitmap. It's guaranteed that at least + * one of the 100 bits is cleared. + */ +static inline unsigned long +sched_find_first_bit(const unsigned long b[2]) +{ + unsigned long b0, b1, ofs, tmp; + + b0 = b[0]; + b1 = b[1]; + ofs = (b0 ? 0 : 64); + tmp = (b0 ? b0 : b1); + + return __ffs(tmp) + ofs; +} + +#include + +#include + +#include + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_BITOPS_H */ diff --git a/arch/sw_64/include/asm/bug.h b/arch/sw_64/include/asm/bug.h new file mode 100644 index 0000000000000000000000000000000000000000..4a179f236ccf9cff8f664ac291af4990cea25cf7 --- /dev/null +++ b/arch/sw_64/include/asm/bug.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_BUG_H +#define _ASM_SW64_BUG_H + +#include +#include + +#endif /* _ASM_SW64_BUG_H */ diff --git a/arch/sw_64/include/asm/cache.h b/arch/sw_64/include/asm/cache.h new file mode 100644 index 0000000000000000000000000000000000000000..6a6ce4e99265a02576d448ec0577d486c7b16ffe --- /dev/null +++ b/arch/sw_64/include/asm/cache.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm/cache.h + */ +#ifndef _ASM_SW64_CACHE_H +#define _ASM_SW64_CACHE_H + +#define L1_CACHE_SHIFT 7 +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#define SMP_CACHE_BYTES L1_CACHE_BYTES + +#endif /* _ASM_SW64_CACHE_H */ diff --git a/arch/sw_64/include/asm/cacheflush.h b/arch/sw_64/include/asm/cacheflush.h new file mode 100644 index 0000000000000000000000000000000000000000..0d49830b8493464a520276661cc49dca9f04a0a4 --- /dev/null +++ b/arch/sw_64/include/asm/cacheflush.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CACHEFLUSH_H +#define _ASM_SW64_CACHEFLUSH_H + +/* + * DCache: PIPT + * ICache: + * - C3B is VIVT with ICTAG, support coherence. + * - C4 is VIPT + */ +#include + +#endif /* _ASM_SW64_CACHEFLUSH_H */ diff --git a/arch/sw_64/include/asm/checksum.h b/arch/sw_64/include/asm/checksum.h new file mode 100644 index 0000000000000000000000000000000000000000..7f3768290402bea31e0e5c445716012e81d6ad4e --- /dev/null +++ b/arch/sw_64/include/asm/checksum.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CHECKSUM_H +#define _ASM_SW64_CHECKSUM_H + +#include + +#define extll(x, y, z) \ + ({__asm__ __volatile__("extll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define exthl(x, y, z) \ + ({__asm__ __volatile__("exthl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskll(x, y, z) \ + ({__asm__ __volatile__("maskll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define maskhl(x, y, z) \ + ({__asm__ __volatile__("maskhl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define insll(x, y, z) \ + ({__asm__ __volatile__("insll %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +#define inshl(x, y, z) \ + ({__asm__ __volatile__("inshl %1, %2, %0" : "=r" (z) \ + : "r" (x), "r" (y)); }) + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +extern __wsum csum_partial(const void *buff, int len, __wsum sum); + +/* + * the same as csum_partial, but copies from src while it + * checksums + * + * here even more important to align src and dst on a 32-bit (or even + * better 64-bit) boundary + */ +#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); + +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ + +extern __sum16 ip_compute_csum(const void *buff, int len); + +/* + * Fold a partial checksum without adding pseudo headers + */ + +static inline __sum16 csum_fold(__wsum csum) +{ + u32 sum = (__force u32)csum; + + sum = (sum & 0xffff) + (sum >> 16); + sum = (sum & 0xffff) + (sum >> 16); + return (__force __sum16)~sum; +} + +#define _HAVE_ARCH_IPV6_CSUM +extern __sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, __u32 len, + __u8 proto, __wsum sum); + +static inline unsigned short from64to16(unsigned long x) +{ + /* + * Using extract instructions is a bit more efficient + * than the original shift/bitmask version. + */ + + union { + unsigned long ul; + unsigned int ui[2]; + unsigned short us[4]; + } in_v, tmp_v, out_v; + + in_v.ul = x; + tmp_v.ul = (unsigned long)in_v.ui[0] + (unsigned long)in_v.ui[1]; + + /* + * Since the bits of tmp_v.sh[3] are going to always be zero, + * we don't have to bother to add that in. + */ + out_v.ul = (unsigned long)tmp_v.us[0] + (unsigned long)tmp_v.us[1] + + (unsigned long)tmp_v.us[2]; + + /* Similarly, out_v.us[2] is always zero for the final add. */ + return out_v.us[0] + out_v.us[1]; +} + +#endif /* _ASM_SW64_CHECKSUM_H */ diff --git a/arch/sw_64/include/asm/cmpxchg.h b/arch/sw_64/include/asm/cmpxchg.h new file mode 100644 index 0000000000000000000000000000000000000000..9f51d035313d92c00e67ad8d1183f16fe26154be --- /dev/null +++ b/arch/sw_64/include/asm/cmpxchg.h @@ -0,0 +1,73 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CMPXCHG_H +#define _ASM_SW64_CMPXCHG_H + +/* + * Atomic exchange routines. + */ + +#define __ASM__MB +#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) +#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) +#include + +#define arch_xchg_local(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg_local(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_local((ptr), (o), (n)); \ +}) + +#ifdef CONFIG_SMP +#undef __ASM__MB +#define __ASM__MB "\tmemb\n" +#endif +#undef ____xchg +#undef ____cmpxchg +#undef _ASM_SW64_XCHG_H +#define ____xchg(type, args...) __arch_xchg ##type(args) +#define ____cmpxchg(type, args...) __cmpxchg ##type(args) +#include + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), (unsigned long)_x_, \ + sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \ + (unsigned long)_n_, sizeof(*(ptr)));\ +}) + +#define arch_cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ +}) + +#undef __ASM__MB +#undef ____cmpxchg + +#define __HAVE_ARCH_CMPXCHG 1 + +#endif /* _ASM_SW64_CMPXCHG_H */ diff --git a/arch/sw_64/include/asm/core.h b/arch/sw_64/include/asm/core.h new file mode 100644 index 0000000000000000000000000000000000000000..2b6748cec93d459f9e2379399c92df746c682b29 --- /dev/null +++ b/arch/sw_64/include/asm/core.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CORE_H +#define _ASM_SW64_CORE_H + +#include + +#define II_II0 0 +#define II_II1 1 +#define II_SLEEP 2 +#define II_WAKE 3 +#define II_NMII 6 + +#define II_RESET II_NMII + +#if defined(CONFIG_SUBARCH_C3B) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 5 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 31 + +#define CORE_ID_BITS 5 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return 0; +} + +#elif defined(CONFIG_SUBARCH_C4) + +#define DOMAIN_ID_BITS 2 +#define DOMAIN_ID_SHIFT 12 + +#define THREAD_ID_BITS 1 +#define THREAD_ID_SHIFT 8 + +#define CORE_ID_BITS 6 +#define CORE_ID_SHIFT 0 + +static inline bool core_is_ht(void) +{ + return rdhtctl() == 0x3; +} + +#endif + +#define DOMAIN_ID_MASK (GENMASK(DOMAIN_ID_BITS - 1, 0) << DOMAIN_ID_SHIFT) +#define THREAD_ID_MASK (GENMASK(THREAD_ID_BITS - 1, 0) << THREAD_ID_SHIFT) +#define CORE_ID_MASK (GENMASK(CORE_ID_BITS - 1, 0) << CORE_ID_SHIFT) +#define MAX_CORES_PER_CPU (1 << CORE_ID_BITS) + +/* + * 0x00 ~ 0xff for hardware mm fault + */ + +#define MMCSR__TNV 0x0 +#define MMCSR__IACV 0x1 +#define MMCSR__FOR 0x2 +#define MMCSR__FOE 0x3 +#define MMCSR__FOW 0x4 + +#define MMCSR__BAD_DVA 0x6 +#define MMCSR__ACV1 0x7 +#define MMCSR__ACV0 0xc +#define MMCSR__BAD_IVA 0xf + +/* 0x100 ~ 0x1ff for match debug */ +#define MMCSR__DA_MATCH 0x100 +#define MMCSR__DV_MATCH 0x101 +#define MMCSR__DAV_MATCH 0x102 +#define MMCSR__IA_MATCH 0x103 +#define MMCSR__IDA_MATCH 0x104 +#define MMCSR__IV_MATCH 0x105 + + /* entry.S */ +extern void entArith(void); +extern void entIF(void); +extern void entInt(void); +extern void entMM(void); +extern void entSys(void); +extern void entUna(void); +/* head.S */ +extern void __smp_callin(unsigned long args); +#endif /* _ASM_SW64_CORE_H */ diff --git a/arch/sw_64/include/asm/cpu.h b/arch/sw_64/include/asm/cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..4da30bb91d89602e48c55c01fdff08d3f495b45c --- /dev/null +++ b/arch/sw_64/include/asm/cpu.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPU_H +#define _ASM_SW64_CPU_H + +#endif /* _ASM_SW64_CPU_H */ diff --git a/arch/sw_64/include/asm/cpufreq.h b/arch/sw_64/include/asm/cpufreq.h new file mode 100644 index 0000000000000000000000000000000000000000..cf47f1fc6866860b56ec2112abc1a1449ff66d72 --- /dev/null +++ b/arch/sw_64/include/asm/cpufreq.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_CPUFREQ_H +#define _ASM_SW64_CPUFREQ_H + +#include +#include +#include +#include +#include + +struct clk; + +extern char curruent_policy[CPUFREQ_NAME_LEN]; + +struct clk_ops { + void (*init)(struct clk *clk); + void (*enable)(struct clk *clk); + void (*disable)(struct clk *clk); + void (*recalc)(struct clk *clk); + int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); + long (*round_rate)(struct clk *clk, unsigned long rate); +}; + +struct clk { + struct list_head node; + const char *name; + int id; + struct module *owner; + + struct clk *parent; + const struct clk_ops *ops; + + struct kref kref; + + unsigned long rate; + unsigned long flags; +}; + +#define CLK_ALWAYS_ENABLED (1 << 0) +#define CLK_RATE_PROPAGATES (1 << 1) + +#define CLK_PRT 0x1UL +#define CORE_CLK0_V (0x1UL << 1) +#define CORE_CLK0_R (0x1UL << 2) +#define CORE_CLK2_V (0x1UL << 15) +#define CORE_CLK2_R (0x1UL << 16) + +#define CLK_LV1_SEL_PRT 0x1UL +#define CLK_LV1_SEL_MUXA (0x1UL << 2) +#define CLK_LV1_SEL_MUXB (0x1UL << 3) + +#define CORE_PLL0_CFG_SHIFT 4 +#define CORE_PLL2_CFG_SHIFT 18 + +extern struct cpufreq_frequency_table freq_table[]; + +int clk_init(void); +void sw64_set_rate(unsigned int index); + +struct clk *sw64_clk_get(struct device *dev, const char *id); + +void sw64_update_clockevents(unsigned long cpu, u32 freq); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy); +#endif /* _ASM_SW64_CPUFREQ_H */ diff --git a/arch/sw_64/include/asm/cputime.h b/arch/sw_64/include/asm/cputime.h new file mode 100644 index 0000000000000000000000000000000000000000..cdd46b05e22840bbbe033ca200951269afa0b98f --- /dev/null +++ b/arch/sw_64/include/asm/cputime.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CPUTIME_H +#define _ASM_SW64_CPUTIME_H + +typedef u64 __nocast cputime64_t; + +#define jiffies64_to_cputime64(__jif) ((__force cputime64_t)(__jif)) + +#endif /* _ASM_SW64_CPUTIME_H */ diff --git a/arch/sw_64/include/asm/csr.h b/arch/sw_64/include/asm/csr.h new file mode 100644 index 0000000000000000000000000000000000000000..0610384208a460b32703ea7988ebbfa29be821c9 --- /dev/null +++ b/arch/sw_64/include/asm/csr.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CSR_H +#define _ASM_SW64_CSR_H + +#include + +#define CSR_EXC_SUM 0xd +#define CSR_INT_EN 0x1a +#define CSR_INT_STAT 0x1b +#define CSR_PCIE_MSI0_INT 0x1d +#define CSR_PCIE_MSI1_INT 0x1e +#define CSR_PCIE_MSI2_INT 0x1f +#define CSR_PCIE_MSI3_INT 0x20 +#define CSR_INT_VEC 0x2d +#define CSR_PCIE_MSI0_INTEN 0x35 +#define CSR_PCIE_MSI1_INTEN 0x36 +#define CSR_PCIE_MSI2_INTEN 0x37 +#define CSR_PCIE_MSI3_INTEN 0x38 +#define CSR_EXC_GPA 0x3b +#define CSR_EXC_PC 0xe +#define CSR_AS_INFO 0x3c +#define CSR_DS_STAT 0x48 +#define CSR_SOFTCID 0xc9 +#define CSR_DVA 0x54 +#define CSR_PTBR_SYS 0x68 +#define CSR_PTBR_USR 0x69 +#define CSR_APTP 0x6a +#define CSR_CID 0xc4 +#define CSR_WR_FREGS 0xc8 +#define CSR_SHTCLOCK 0xca +#define CSR_SHTCLOCK_OFFSET 0xcb + +#ifdef CONFIG_SUBARCH_C4 +#define CSR_IA_VPNMATCH 0xa +#define CSR_UPCR 0x15 +#define CSR_VPCR 0x16 +#define CSR_IA_MATCH 0x17 +#define CSR_IA_MASK 0x18 +#define CSR_IV_MATCH 0x19 +#define CSR_IA_UPNMATCH 0x3a +#define CSR_DC_CTLP 0x4e +#define CSR_DA_MATCH 0x51 +#define CSR_DA_MASK 0x52 +#define CSR_DA_MATCH_MODE 0x53 +#define CSR_DV_MATCH 0x56 +#define CSR_DV_MASK 0x57 +#define CSR_IDA_MATCH 0xc5 +#define CSR_IDA_MASK 0xc6 + +#define DA_MATCH_EN_S 4 +#define DV_MATCH_EN_S 6 +#define DAV_MATCH_EN_S 7 +#define DPM_MATCH 8 +#define DPM_MATCH_EN_S 10 +#define IDA_MATCH_EN_S 53 +#define IV_PM_EN_S 61 +#define IV_MATCH_EN_S 62 +#define IA_MATCH_EN_S 63 + +#endif + + +#ifdef CONFIG_HAVE_CSRRW +#define read_csr(x) \ + ({ unsigned long __val; \ + __asm__ __volatile__("csrr %0,%1" : "=r"(__val) : "i"(x)); \ + __val; }) + +#define write_csr(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1" ::"r"(x), "i"(y)); }) + +#define write_csr_imb(x, y) \ + ({ __asm__ __volatile__("csrw %0,%1; imemb" ::"r"(x), "i"(y)); }) + + +#ifndef __ASSEMBLY__ +#include +static inline void update_ptbr_sys(unsigned long ptbr) +{ + imemb(); + write_csr_imb(ptbr, CSR_PTBR_SYS); +} +#endif +#else +#define read_csr(x) (0) +#define write_csr(x, y) do { } while (0) +#define write_csr_imb(x, y) do { } while (0) + +#ifndef __ASSEMBLY__ +static inline void update_ptbr_sys(unsigned long ptbr) +{ + wrptbr(ptbr); +} +#endif + +#endif +#endif /* _ASM_SW64_CSR_H */ diff --git a/arch/sw_64/include/asm/current.h b/arch/sw_64/include/asm/current.h new file mode 100644 index 0000000000000000000000000000000000000000..862caabb9c7092f561dca2613ab977fe5cc34cf0 --- /dev/null +++ b/arch/sw_64/include/asm/current.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_CURRENT_H +#define _ASM_SW64_CURRENT_H + +#ifndef __ASSEMBLY__ + +struct task_struct; +static __always_inline struct task_struct *get_current(void) +{ + register struct task_struct *tp __asm__("$8"); + + return tp; +} + +#define current get_current() + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SW64_CURRENT_H */ diff --git a/arch/sw_64/include/asm/debug.h b/arch/sw_64/include/asm/debug.h new file mode 100644 index 0000000000000000000000000000000000000000..8db5a8bb9ab72dc1165157a02a111acd07b5c20b --- /dev/null +++ b/arch/sw_64/include/asm/debug.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Mao Minkai + * Author: Mao Minkai + * + * This code is taken from arch/mips/include/asm/debug.h + * Copyright (C) 2015 Imagination Technologies + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef _ASM_SW64_DEBUG_H +#define _ASM_SW64_DEBUG_H + +#include + +/* + * sw64_debugfs_dir corresponds to the "sw_64" directory at the top level + * of the DebugFS hierarchy. SW64-specific DebugFS entries should be + * placed beneath this directory. + */ +extern struct dentry *sw64_debugfs_dir; + +#define UNA_MAX_ENTRIES 64 + +struct unaligned_stat { + unsigned long pc; + unsigned long va; +}; + +extern char unaligned_task[]; +extern unsigned long unaligned_count; +extern struct unaligned_stat unaligned[]; + +#endif /* _ASM_SW64_DEBUG_H */ diff --git a/arch/sw_64/include/asm/delay.h b/arch/sw_64/include/asm/delay.h new file mode 100644 index 0000000000000000000000000000000000000000..f4080753e9545c1f19cc3775b4ad362cc378b32f --- /dev/null +++ b/arch/sw_64/include/asm/delay.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DELAY_H +#define _ASM_SW64_DELAY_H + +extern void __delay(unsigned long loops); +extern void udelay(unsigned long usecs); + +extern void ndelay(unsigned long nsecs); +#define ndelay ndelay + +#endif /* _ASM_SW64_DELAY_H */ diff --git a/arch/sw_64/include/asm/device.h b/arch/sw_64/include/asm/device.h new file mode 100644 index 0000000000000000000000000000000000000000..d999207e07d1e3b4a9c5eb1507710cfed18c2fc7 --- /dev/null +++ b/arch/sw_64/include/asm/device.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DEVICE_H +#define _ASM_SW64_DEVICE_H + +struct dev_archdata { +#if defined(CONFIG_SUNWAY_IOMMU) || defined(CONFIG_SUNWAY_IOMMU_V2) + void *iommu; +#endif +}; + +struct pdev_archdata { +}; +#endif /* _ASM_SW64_DEVICE_H */ diff --git a/arch/sw_64/include/asm/dma-direct.h b/arch/sw_64/include/asm/dma-direct.h new file mode 100644 index 0000000000000000000000000000000000000000..dee1680b8f6d284b709e6142c552ade27ac9f111 --- /dev/null +++ b/arch/sw_64/include/asm/dma-direct.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_DIRECT_H +#define _ASM_SW64_DMA_DIRECT_H + +static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +{ + return paddr; +} + +static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +{ + return daddr; +} + +#endif /* _ASM_SW64_DMA_DIRECT_H */ diff --git a/arch/sw_64/include/asm/dma-mapping.h b/arch/sw_64/include/asm/dma-mapping.h new file mode 100644 index 0000000000000000000000000000000000000000..65795f8e57920ef34ce1544e54cd9bd0a996edc2 --- /dev/null +++ b/arch/sw_64/include/asm/dma-mapping.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_DMA_MAPPING_H +#define _ASM_SW64_DMA_MAPPING_H + + +extern const struct dma_map_ops *dma_ops; + +static inline const struct dma_map_ops *get_arch_dma_ops(void) +{ + return dma_ops; +} + + +#endif /* _ASM_SW64_DMA_MAPPING_H */ diff --git a/arch/sw_64/include/asm/dma.h b/arch/sw_64/include/asm/dma.h new file mode 100644 index 0000000000000000000000000000000000000000..cf6a9cf75233878e8238d2f13e2e50c41fbb9564 --- /dev/null +++ b/arch/sw_64/include/asm/dma.h @@ -0,0 +1,350 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw_64/dma.h + * + * This is essentially the same as the i386 DMA stuff, as the SW64PCs + * use ISA-compatible dma. The only extension is support for high-page + * registers that allow to set the top 8 bits of a 32-bit DMA address. + * This register should be written last when setting up a DMA address + * as this will also enable DMA across 64 KB boundaries. + */ + +/* $Id: dma.h,v 1.7 1992/12/14 00:29:34 root Exp root $ + * linux/include/asm/dma.h: Defines for using and allocating dma channels. + * Written by Hennus Bergman, 1992. + * High DMA channel support & info by Hannu Savolainen + * and John Boyd, Nov. 1992. + */ + +#ifndef _ASM_SW64_DMA_H +#define _ASM_SW64_DMA_H + +#include +#include + +#define dma_outb outb +#define dma_inb inb + +/* + * NOTES about DMA transfers: + * + * controller 1: channels 0-3, byte operations, ports 00-1F + * controller 2: channels 4-7, word operations, ports C0-DF + * + * - ALL registers are 8 bits only, regardless of transfer size + * - channel 4 is not used - cascades 1 into 2. + * - channels 0-3 are byte - addresses/counts are for physical bytes + * - channels 5-7 are word - addresses/counts are for physical words + * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries + * - transfer count loaded to registers is 1 less than actual count + * - controller 2 offsets are all even (2x offsets for controller 1) + * - page registers for 5-7 don't use data bit 0, represent 128K pages + * - page registers for 0-3 use bit 0, represent 64K pages + * + * DMA transfers are limited to the lower 16MB of _physical_ memory. + * Note that addresses loaded into registers must be _physical_ addresses, + * not logical addresses (which may differ if paging is active). + * + * Address mapping for channels 0-3: + * + * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * | ... | | ... | | ... | + * P7 ... P0 A7 ... A0 A7 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Address mapping for channels 5-7: + * + * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) + * | ... | \ \ ... \ \ \ ... \ \ + * | ... | \ \ ... \ \ \ ... \ (not used) + * | ... | \ \ ... \ \ \ ... \ + * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 + * | Page | Addr MSB | Addr LSB | (DMA registers) + * + * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses + * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at + * the hardware level, so odd-byte transfers aren't possible). + * + * Transfer count (_not # bytes_) is limited to 64K, represented as actual + * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, + * and up to 128K bytes may be transferred on channels 5-7 in one operation. + * + */ + +#define MAX_DMA_CHANNELS 8 + +/* + * ISA DMA limitations on sw64 platforms, + + * These may be due to SIO (PCI<->ISA bridge) chipset limitation, or + * just a wiring limit. + */ + +/* + * Maximum address for all the others is the complete 32-bit bus + * address space. + */ +#define MAX_ISA_DMA_ADDRESS 0x100000000UL + +#define MAX_DMA32_PFN (1UL << (32 - PAGE_SHIFT)) + +/* + * If we have the iommu, we don't have any address limitations on DMA. + * Otherwise (Nautilus, RX164), we have to have 0-16 Mb DMA zone + * like i386. + */ +#define MAX_DMA_ADDRESS ~0UL + +/* 8237 DMA controllers */ +#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */ +#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */ + +/* DMA controller registers */ +#define DMA1_CMD_REG 0x08 /* command register (w) */ +#define DMA1_STAT_REG 0x08 /* status register (r) */ +#define DMA1_REQ_REG 0x09 /* request register (w) */ +#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */ +#define DMA1_MODE_REG 0x0B /* mode register (w) */ +#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */ +#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */ +#define DMA1_RESET_REG 0x0D /* Master Clear (w) */ +#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */ +#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */ +#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG) + +#define DMA2_CMD_REG 0xD0 /* command register (w) */ +#define DMA2_STAT_REG 0xD0 /* status register (r) */ +#define DMA2_REQ_REG 0xD2 /* request register (w) */ +#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */ +#define DMA2_MODE_REG 0xD6 /* mode register (w) */ +#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */ +#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */ +#define DMA2_RESET_REG 0xDA /* Master Clear (w) */ +#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */ +#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */ +#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG) + +#define DMA_ADDR_0 0x00 /* DMA address registers */ +#define DMA_ADDR_1 0x02 +#define DMA_ADDR_2 0x04 +#define DMA_ADDR_3 0x06 +#define DMA_ADDR_4 0xC0 +#define DMA_ADDR_5 0xC4 +#define DMA_ADDR_6 0xC8 +#define DMA_ADDR_7 0xCC + +#define DMA_CNT_0 0x01 /* DMA count registers */ +#define DMA_CNT_1 0x03 +#define DMA_CNT_2 0x05 +#define DMA_CNT_3 0x07 +#define DMA_CNT_4 0xC2 +#define DMA_CNT_5 0xC6 +#define DMA_CNT_6 0xCA +#define DMA_CNT_7 0xCE + +#define DMA_PAGE_0 0x87 /* DMA page registers */ +#define DMA_PAGE_1 0x83 +#define DMA_PAGE_2 0x81 +#define DMA_PAGE_3 0x82 +#define DMA_PAGE_5 0x8B +#define DMA_PAGE_6 0x89 +#define DMA_PAGE_7 0x8A + +#define DMA_HIPAGE_0 (0x400 | DMA_PAGE_0) +#define DMA_HIPAGE_1 (0x400 | DMA_PAGE_1) +#define DMA_HIPAGE_2 (0x400 | DMA_PAGE_2) +#define DMA_HIPAGE_3 (0x400 | DMA_PAGE_3) +#define DMA_HIPAGE_4 (0x400 | DMA_PAGE_4) +#define DMA_HIPAGE_5 (0x400 | DMA_PAGE_5) +#define DMA_HIPAGE_6 (0x400 | DMA_PAGE_6) +#define DMA_HIPAGE_7 (0x400 | DMA_PAGE_7) + +#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ +#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ +#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ + +#define DMA_AUTOINIT 0x10 + +extern spinlock_t dma_spin_lock; + +static inline unsigned long claim_dma_lock(void) +{ + unsigned long flags; + + spin_lock_irqsave(&dma_spin_lock, flags); + return flags; +} + +static inline void release_dma_lock(unsigned long flags) +{ + spin_unlock_irqrestore(&dma_spin_lock, flags); +} + +/* enable/disable a specific DMA channel */ +static inline void enable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr, DMA1_MASK_REG); + else + dma_outb(dmanr & 3, DMA2_MASK_REG); +} + +static inline void disable_dma(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(dmanr | 4, DMA1_MASK_REG); + else + dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); +} + +/* Clear the 'DMA Pointer Flip Flop'. + * Write 0 for LSB/MSB, 1 for MSB/LSB access. + * Use this once to initialize the FF to a known state. + * After that, keep track of it. :-) + * --- In order to do that, the DMA routines below should --- + * --- only be used while interrupts are disabled! --- + */ +static inline void clear_dma_ff(unsigned int dmanr) +{ + if (dmanr <= 3) + dma_outb(0, DMA1_CLEAR_FF_REG); + else + dma_outb(0, DMA2_CLEAR_FF_REG); +} + +/* set mode (above) for a specific DMA channel */ +static inline void set_dma_mode(unsigned int dmanr, char mode) +{ + if (dmanr <= 3) + dma_outb(mode | dmanr, DMA1_MODE_REG); + else + dma_outb(mode | (dmanr & 3), DMA2_MODE_REG); +} + +/* set extended mode for a specific DMA channel */ +static inline void set_dma_ext_mode(unsigned int dmanr, char ext_mode) +{ + if (dmanr <= 3) + dma_outb(ext_mode | dmanr, DMA1_EXT_MODE_REG); + else + dma_outb(ext_mode | (dmanr & 3), DMA2_EXT_MODE_REG); +} + +/* Set only the page register bits of the transfer address. + * This is used for successive transfers when we know the contents of + * the lower 16 bits of the DMA current address register. + */ +static inline void set_dma_page(unsigned int dmanr, unsigned int pagenr) +{ + switch (dmanr) { + case 0: + dma_outb(pagenr, DMA_PAGE_0); + dma_outb((pagenr >> 8), DMA_HIPAGE_0); + break; + case 1: + dma_outb(pagenr, DMA_PAGE_1); + dma_outb((pagenr >> 8), DMA_HIPAGE_1); + break; + case 2: + dma_outb(pagenr, DMA_PAGE_2); + dma_outb((pagenr >> 8), DMA_HIPAGE_2); + break; + case 3: + dma_outb(pagenr, DMA_PAGE_3); + dma_outb((pagenr >> 8), DMA_HIPAGE_3); + break; + case 5: + dma_outb(pagenr & 0xfe, DMA_PAGE_5); + dma_outb((pagenr >> 8), DMA_HIPAGE_5); + break; + case 6: + dma_outb(pagenr & 0xfe, DMA_PAGE_6); + dma_outb((pagenr >> 8), DMA_HIPAGE_6); + break; + case 7: + dma_outb(pagenr & 0xfe, DMA_PAGE_7); + dma_outb((pagenr >> 8), DMA_HIPAGE_7); + break; + } +} + + +/* Set transfer address & page bits for specific DMA channel. + * Assumes dma flipflop is clear. + */ +static inline void set_dma_addr(unsigned int dmanr, unsigned int a) +{ + if (dmanr <= 3) { + dma_outb(a & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + dma_outb((a >> 8) & 0xff, ((dmanr & 3) << 1) + IO_DMA1_BASE); + } else { + dma_outb((a >> 1) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + dma_outb((a >> 9) & 0xff, ((dmanr & 3) << 2) + IO_DMA2_BASE); + } + set_dma_page(dmanr, a >> 16); /* set hipage last to enable 32-bit mode */ +} + + +/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for + * a specific DMA channel. + * You must ensure the parameters are valid. + * NOTE: from a manual: "the number of transfers is one more + * than the initial word count"! This is taken into account. + * Assumes dma flip-flop is clear. + * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. + */ +static inline void set_dma_count(unsigned int dmanr, unsigned int count) +{ + count--; + if (dmanr <= 3) { + dma_outb(count & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + dma_outb((count >> 8) & 0xff, ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE); + } else { + dma_outb((count >> 1) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + dma_outb((count >> 9) & 0xff, ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE); + } +} + + +/* Get DMA residue count. After a DMA transfer, this + * should return zero. Reading this while a DMA transfer is + * still in progress will return unpredictable results. + * If called before the channel has been used, it may return 1. + * Otherwise, it returns the number of _bytes_ left to transfer. + * + * Assumes DMA flip-flop is clear. + */ +static inline int get_dma_residue(unsigned int dmanr) +{ + unsigned int io_port = (dmanr <= 3) ? + ((dmanr & 3) << 1) + 1 + IO_DMA1_BASE : + ((dmanr & 3) << 2) + 2 + IO_DMA2_BASE; + + /* using short to get 16-bit wrap around */ + unsigned short count; + + count = 1 + dma_inb(io_port); + count += dma_inb(io_port) << 8; + + return (dmanr <= 3) ? count : (count << 1); +} + + +/* These are in kernel/dma.c: */ +extern int request_dma(unsigned int dmanr, const char *device_id); /* reserve a DMA channel */ +extern void free_dma(unsigned int dmanr); /* release it again */ +#define KERNEL_HAVE_CHECK_DMA +extern int check_dma(unsigned int dmanr); + +/* From PCI */ + +#ifdef CONFIG_PCI +extern int isa_dma_bridge_buggy; +#else +#define isa_dma_bridge_buggy (0) +#endif + + +#endif /* _ASM_SW64_DMA_H */ diff --git a/arch/sw_64/include/asm/dmi.h b/arch/sw_64/include/asm/dmi.h new file mode 100644 index 0000000000000000000000000000000000000000..05e80c9a3a76dc143e505440a41b7d150b873e3d --- /dev/null +++ b/arch/sw_64/include/asm/dmi.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/dmi.h + * + * Copyright (C) 2019 Deepin Limited. + * Porting by: Deepin Kernel Team (kernel@deepin.com) + * + * based on arch/x864/include/asm/dmi.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#ifndef _ASM_SW64_DMI_H +#define _ASM_SW64_DMI_H + +#include +#include +#include +#include + +/* Use early IO mappings for DMI because it's initialized early */ +#define dmi_early_remap(x, l) early_ioremap(x, l) +#define dmi_early_unmap(x, l) early_iounmap(x, l) +#define dmi_remap(x, l) early_ioremap(x, l) +#define dmi_unmap(x) early_iounmap(x, 0) +#define dmi_alloc(l) kzalloc(l, GFP_KERNEL) + +#endif /* _ASM_SW64_DMI_H */ diff --git a/arch/sw_64/include/asm/early_ioremap.h b/arch/sw_64/include/asm/early_ioremap.h new file mode 100644 index 0000000000000000000000000000000000000000..172b96a401cb871241e3cbd0f7e316524fe211f7 --- /dev/null +++ b/arch/sw_64/include/asm/early_ioremap.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EARLY_IOREMAP_H +#define _ASM_SW64_EARLY_IOREMAP_H + +#include +#include + +static inline void __iomem * +early_ioremap(unsigned long phys_addr, unsigned long size) +{ + unsigned long y = 0; + + if (phys_addr >= __START_KERNEL_map) { + y = (unsigned long) phys_to_virt(__pa(phys_addr)); + } else { + y = phys_addr; + y |= PAGE_OFFSET; + } + + return (void __iomem *) y; +} +#define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) + +static inline void early_iounmap(volatile void __iomem *addr, unsigned long size) +{ +} +#define early_memunmap(addr, size) early_iounmap(addr, size) + +#endif /* _ASM_SW64_EARLY_IOREMAP_H */ diff --git a/arch/sw_64/include/asm/efi.h b/arch/sw_64/include/asm/efi.h new file mode 100644 index 0000000000000000000000000000000000000000..34d5637e23c2e23045764f6f316a1f5dcb5f36fb --- /dev/null +++ b/arch/sw_64/include/asm/efi.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EFI_H +#define _ASM_SW64_EFI_H + +#include +#include +#ifdef CONFIG_EFI +extern void efi_init(void); +extern unsigned long entSuspend; + +#define SLEEP_ENTRY_GUID EFI_GUID(0x59cb76bb, 0x9c3a, 0x4c8f, 0xbd, 0x5c, 0xc0, 0x0f, 0x20, 0x61, 0x18, 0x4b) + +#else +#define efi_init() +#define efi_idmap_init() +#endif + +#define arch_efi_call_virt_setup() +#define arch_efi_call_virt_teardown() + +#define ARCH_EFI_IRQ_FLAGS_MASK 0x00000001 + +/* arch specific definitions used by the stub code */ + +/* + * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from + * start of kernel and may not cross a 2MiB boundary. We set alignment to + * 2MiB so we know it won't cross a 2MiB boundary. + */ +#define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ +#define MAX_FDT_OFFSET SZ_512M + +#define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) + +#endif /* _ASM_SW64_EFI_H */ diff --git a/arch/sw_64/include/asm/elf.h b/arch/sw_64/include/asm/elf.h new file mode 100644 index 0000000000000000000000000000000000000000..95ba89a1aa9db11a8c8cae1fa1ca5b3ac67d445b --- /dev/null +++ b/arch/sw_64/include/asm/elf.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_ELF_H +#define _ASM_SW64_ELF_H +#ifdef __KERNEL__ +#include +#endif +/* Special values for the st_other field in the symbol table. */ + + +#define STO_SW64_NOPV 0x80 +#define STO_SW64_STD_GPLOAD 0x88 + +/* + * SW-64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +#define SHF_SW64_GPREL 0x10000000 + +/* Legal values for e_flags field of Elf64_Ehdr. */ + +#define EF_SW64_32BIT 1 /* All addresses are below 2GB */ + +/* + * ELF register definitions. + * + * For now, we just leave it at 33 (32 general regs + processor status word). + */ +#define ELF_NGREG 33 + +typedef unsigned long elf_greg_t; +typedef elf_greg_t elf_gregset_t[ELF_NGREG]; + +/* Same with user_fpsimd_state */ +#include +typedef struct user_fpsimd_state elf_fpregset_t; + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) ((x)->e_machine == EM_SW64) + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_CLASS ELFCLASS64 +#define ELF_DATA ELFDATA2LSB +#define ELF_ARCH EM_SW64 + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE PAGE_SIZE + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ + +#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + +/* + * $0 is set by ld.so to a pointer to a function which might be + * registered using atexit. This provides a mean for the dynamic + * linker to call DT_FINI functions for shared libraries that have + * been loaded before the code runs. + + * So that we can use the same startup file with static executables, + * we start programs with a value of 0 to indicate that there is no + * such function. + */ + +#define ELF_PLAT_INIT(_r, load_addr) (_r->regs[0] = 0) + +/* + * The registers are laid out in pt_regs for HMCODE and syscall + * convenience. Re-order them for the linear elf_gregset_t. + */ + +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); + +#ifdef __KERNEL__ +struct pt_regs; +struct task_struct; +extern void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *pt); +#define ELF_CORE_COPY_REGS(DEST, REGS) sw64_elf_core_copy_regs(DEST, REGS); + +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. + */ + +#define ELF_HWCAP 0 + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ + +#define ELF_PLATFORM ("sw_64") + + +/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ +#define ARCH_DLINFO \ +do { \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)current->mm->context.vdso); \ +} while (0) + +struct mm_struct; +extern unsigned long arch_randomize_brk(struct mm_struct *mm); +#define arch_randomize_brk arch_randomize_brk +#endif + +#endif /* _ASM_SW64_ELF_H */ diff --git a/arch/sw_64/include/asm/extable.h b/arch/sw_64/include/asm/extable.h new file mode 100644 index 0000000000000000000000000000000000000000..42f872ce6c3bb8531c67fb8c7a1f64b6bfc0b77f --- /dev/null +++ b/arch/sw_64/include/asm/extable.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_EXTABLE_H +#define _ASM_SW64_EXTABLE_H + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry { + signed int insn; + union exception_fixup { + unsigned int unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs, unsigned long pc); + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ + do { \ + (a)->fixup.unit = (b)->fixup.unit; \ + (b)->fixup.unit = (tmp).fixup.unit; \ + } while (0) + +#endif /* _ASM_SW64_EXTABLE_H */ diff --git a/arch/sw_64/include/asm/fpu.h b/arch/sw_64/include/asm/fpu.h new file mode 100644 index 0000000000000000000000000000000000000000..a0b0ff5af1689726d71ff29bbe5c14e6ce83b432 --- /dev/null +++ b/arch/sw_64/include/asm/fpu.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FPU_H +#define _ASM_SW64_FPU_H + +#include +#ifdef __KERNEL__ + +/* + * The following two functions don't need trapb/excb instructions + * around the mf_fpcr/mt_fpcr instructions because (a) the kernel + * never generates arithmetic faults and (b) sys_call instructions + * are implied trap barriers. + */ + +static inline unsigned long +rdfpcr(void) +{ + unsigned long ret; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " rfpcr $f0\n\t" + " fimovd $f0, %1\n\t" + " vldd $f0, %0\n\t" + : "=m"(*fp), "=r"(ret)); + + return ret; +} + +static inline void +wrfpcr(unsigned long val) +{ + unsigned long tmp; + unsigned long fp[4] __aligned(32); + + __asm__ __volatile__ ( + " vstd $f0, %0\n\t" + " ifmovd %2, $f0\n\t" + " wfpcr $f0\n\t" + " and %2, 0x3, %1\n\t" + " beq %1, 1f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 2f\n\t" + " subl %1, 1, %1\n\t" + " beq %1, 3f\n\t" + " setfpec3\n\t" + " br 6f\n\t" + "1: setfpec0\n\t" + " br 6f\n\t" + "2: setfpec1\n\t" + " br 6f\n\t" + "3: setfpec2\n\t" + "6: vldd $f0, %0\n\t" + : "=m"(*fp), "=&r"(tmp) : "r"(val)); +} + +static inline unsigned long +swcr_update_status(unsigned long swcr, unsigned long fpcr) +{ + /* + * SW64 implements most of the bits in hardware. Collect + * the acrued exception bits from the real fpcr. + */ + swcr &= ~(IEEE_STATUS_MASK0 | IEEE_STATUS_MASK1 + | IEEE_STATUS_MASK2 | IEEE_STATUS_MASK3); + swcr |= (fpcr >> 35) & IEEE_STATUS_MASK0; + swcr |= (fpcr >> 13) & IEEE_STATUS_MASK1; + swcr |= (fpcr << 14) & IEEE_STATUS_MASK2; + swcr |= (fpcr << 36) & IEEE_STATUS_MASK3; + return swcr; +} + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +extern void sw64_write_simd_fp_reg_s(unsigned long reg, + unsigned long f0, unsigned long f1); +extern void sw64_write_simd_fp_reg_d(unsigned long reg, + unsigned long f0, unsigned long f1, + unsigned long f2, unsigned long f3); +extern void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a); +extern void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value); +extern void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value); + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/asm/ftrace.h b/arch/sw_64/include/asm/ftrace.h new file mode 100644 index 0000000000000000000000000000000000000000..7ed6e3c06a333e81d96e881de91c0d1aac670ae7 --- /dev/null +++ b/arch/sw_64/include/asm/ftrace.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/include/asm/ftrace.h + * + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASM_SW64_FTRACE_H +#define _ASM_SW64_FTRACE_H + +#define MCOUNT_ADDR ((unsigned long)_mcount) +#define MCOUNT_INSN_SIZE 20 /* 5 * SW64_INSN_SIZE */ +#define MCOUNT_LDGP_SIZE 8 /* 2 * SW64_INSN_SIZE */ + +#define ARCH_SUPPORTS_FTRACE_OPS 1 + +#ifndef __ASSEMBLY__ +#include +#include + + +extern void _mcount(unsigned long); + +struct dyn_arch_ftrace { + /* No extra data needed for sw64 */ +}; + +extern unsigned long ftrace_graph_call; + + +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + /* + * addr is the address of the mcount call instruction. + * recordmcount does the necessary offset calculation. + */ + return addr; +} + +#endif /* ifndef __ASSEMBLY__ */ +#endif /* _ASM_SW64_FTRACE_H */ diff --git a/arch/sw_64/include/asm/futex.h b/arch/sw_64/include/asm/futex.h new file mode 100644 index 0000000000000000000000000000000000000000..78379981398053e3c7c29f1dddab934a8e44a003 --- /dev/null +++ b/arch/sw_64/include/asm/futex.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_FUTEX_H +#define _ASM_SW64_FUTEX_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +#ifdef CONFIG_SUBARCH_C3B + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + " ldi %2, 1\n" \ + " wr_f %2\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " rd_f %1\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: br 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " wr_f %2\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " rd_f %3\n" + " beq %2, 3f\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: br 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#else /* !CONFIG_SUBARCH_C3B */ + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg, tmp) \ + __asm__ __volatile__( \ + "1: lldw %0, 0(%3)\n" \ + insn \ + "2: lstw %1, 0(%3)\n" \ + " beq %1, 4f\n" \ + " bis $31, $31, %1\n" \ + "3: .subsection 2\n" \ + "4: lbr 1b\n" \ + " .previous\n" \ + " .section __ex_table, \"a\"\n" \ + " .long 1b-.\n" \ + " ldi $31, 3b-1b(%1)\n" \ + " .long 2b-.\n" \ + " ldi $31, 3b-2b(%1)\n" \ + " .previous\n" \ + : "=&r" (oldval), "=&r"(ret), "=&r"(tmp) \ + : "r" (uaddr), "r"(oparg) \ + : "memory") + + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0, cmp; + u32 prev, tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __asm__ __volatile__ ( + "1: lldw %1, 0(%4)\n" + " cmpeq %1, %5, %2\n" + " beq %2, 3f\n" + " bis $31, %6, %3\n" + "2: lstw %3, 0(%4)\n" + " beq %3, 4f\n" + "3: .subsection 2\n" + "4: lbr 1b\n" + " .previous\n" + " .section __ex_table, \"a\"\n" + " .long 1b-.\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b-.\n" + " ldi $31, 3b-2b(%0)\n" + " .previous\n" + : "+r"(ret), "=&r"(prev), "=&r"(cmp), "=&r"(tmp) + : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) + : "memory"); + + *uval = prev; + return ret; +} +#endif /* CONFIG_SUBARCH_C3B */ + +static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, + u32 __user *uaddr) +{ + int oldval = 0, ret; + unsigned long tmp; + + pagefault_disable(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("mov %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("addw %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_OR: + __futex_atomic_op("or %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("andnot %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("xor %0, %4, %1\n", ret, oldval, uaddr, oparg, tmp); + break; + default: + ret = -ENOSYS; + } + + pagefault_enable(); + + if (!ret) + *oval = oldval; + + return ret; +} + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_FUTEX_H */ diff --git a/arch/sw_64/include/asm/hardirq.h b/arch/sw_64/include/asm/hardirq.h new file mode 100644 index 0000000000000000000000000000000000000000..03368c3659dd5f7b0c2789029e91e9d92d25a25d --- /dev/null +++ b/arch/sw_64/include/asm/hardirq.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HARDIRQ_H +#define _ASM_SW64_HARDIRQ_H + +void ack_bad_irq(unsigned int irq); +#define ack_bad_irq ack_bad_irq + +#include + +#define __ARCH_IRQ_STAT +typedef struct { + u16 __softirq_pending; + unsigned int timer_irqs_event; +} ____cacheline_aligned irq_cpustat_t; + +DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); + +#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define arch_irq_stat_cpu arch_irq_stat_cpu +#define arch_irq_stat arch_irq_stat +extern u64 arch_irq_stat_cpu(unsigned int cpu); +extern u64 arch_irq_stat(void); + +#endif /* _ASM_SW64_HARDIRQ_H */ diff --git a/arch/sw_64/include/asm/hcall.h b/arch/sw_64/include/asm/hcall.h new file mode 100644 index 0000000000000000000000000000000000000000..bded05779db748861e2aeea7b7bd0863bd82852e --- /dev/null +++ b/arch/sw_64/include/asm/hcall.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HCALL_H +#define _ASM_SW64_HCALL_H + +#define HMC_hcall 0x32 +/* HCALL must > 0 */ +enum HCALL_TYPE { + HCALL_HALT = 10, + HCALL_NOTIFY = 11, + HCALL_SHUTDOWN = 12, + HCALL_SET_CLOCKEVENT = 13, + HCALL_IVI = 14, /* interrupt between virtual cpu */ + HCALL_TBI = 15, /* tlb flush for virtual cpu */ + HCALL_STOP = 16, /* indicate virtual cpu stopped */ + HCALL_RESTART = 17, /* indicate virtual cpu restarted */ + HCALL_MSI = 18, /* guest request msi intr */ + HCALL_MSIX = 19, /* guest request msix intr */ + HCALL_SWNET = 20, /* guest request swnet service */ + HCALL_SWNET_IRQ = 21, /* guest request swnet intr */ + HCALL_FATAL_ERROR = 22, /* guest fatal error, issued by hmcode */ + HCALL_MEMHOTPLUG = 23, /* guest memory hotplug event */ + NR_HCALL +}; + +static inline unsigned long hcall(unsigned long hcall, unsigned long arg0, + unsigned long arg1, unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = hcall; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5 " + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#endif /* _ASM_SW64_HCALL_H */ diff --git a/arch/sw_64/include/asm/hmcall.h b/arch/sw_64/include/asm/hmcall.h new file mode 100644 index 0000000000000000000000000000000000000000..e3bac3016740a831b78928fef7b00c2584819fd2 --- /dev/null +++ b/arch/sw_64/include/asm/hmcall.h @@ -0,0 +1,236 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HMCALL_H +#define _ASM_SW64_HMCALL_H + +/* + * Common HMC-code + */ +/* 0x0 - 0x3F : Kernel Level HMC routine */ +#define HMC_halt 0x00 +#define HMC_rdio64 0x01 +#define HMC_rdio32 0x02 +#define HMC_cpuid 0x03 +#define HMC_sleepen 0x05 +#define HMC_rdksp 0x06 +#define HMC_wrasid 0x08 +#define HMC_rdktp 0x09 +#define HMC_wrktp 0x0A +#define HMC_rdptbr 0x0B +#define HMC_wrptbr 0x0C +#define HMC_rdhtctl 0x0D +#define HMC_wrksp 0x0E +#define HMC_mtinten 0x0F +#define HMC_load_mm 0x11 +#define HMC_tbisasid 0x14 +#define HMC_tbivpn 0x19 +#define HMC_ret 0x1A +#define HMC_wrvpcr 0x29 +#define HMC_wrfen 0x2B +#define HMC_sflush 0x2F +#define HMC_entervm 0x31 +#define HMC_hcall 0x32 +#define HMC_tbi 0x33 +#define HMC_wrent 0x34 +#define HMC_swpipl 0x35 +#define HMC_rdps 0x36 +#define HMC_wrkgp 0x37 +#define HMC_wrusp 0x38 +#define HMC_rvpcr 0x39 +#define HMC_rdusp 0x3A +#define HMC_wrtimer 0x3B +#define HMC_whami 0x3C +#define HMC_retsys 0x3D +#define HMC_sendii 0x3E +#define HMC_rti 0x3F + + +/* 0x80 - 0xBF : User Level HMC routine */ +#include + +/* Following will be deprecated from user level invocation */ +#define HMC_rwreg 0x87 +#define HMC_sz_uflush 0xA8 +#define HMC_longtime 0xB1 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +#include +extern void __init fixup_hmcall(void); + +extern void halt(void) __noreturn; + +#define __CALL_HMC_VOID(NAME) \ +static inline void NAME(void) \ +{ \ + __asm__ __volatile__( \ + "sys_call %0 " : : "i" (HMC_ ## NAME)); \ +} + +#define __CALL_HMC_R0(NAME, TYPE) \ +static inline TYPE NAME(void) \ +{ \ + register TYPE __r0 __asm__("$0"); \ + __asm__ __volatile__( \ + "sys_call %1 # " #NAME \ + : "=r" (__r0) \ + : "i" (HMC_ ## NAME) \ + : "$1", "$16", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_W1(NAME, TYPE0) \ +static inline void NAME(TYPE0 arg0) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %1 # "#NAME \ + : "=r"(__r16) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_W2(NAME, TYPE0, TYPE1) \ +static inline void NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r17) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ +} + +#define __CALL_HMC_RW1(NAME, RTYPE, TYPE0) \ +static inline RTYPE NAME(TYPE0 arg0) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + __asm__ __volatile__( \ + "sys_call %2 # "#NAME \ + : "=r"(__r16), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW2(NAME, RTYPE, TYPE0, TYPE1) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + __asm__ __volatile__( \ + "sys_call %3 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + +#define __CALL_HMC_RW3(NAME, RTYPE, TYPE0, TYPE1, TYPE2) \ +static inline RTYPE NAME(TYPE0 arg0, TYPE1 arg1, TYPE2 arg2) \ +{ \ + register RTYPE __r0 __asm__("$0"); \ + register TYPE0 __r16 __asm__("$16") = arg0; \ + register TYPE1 __r17 __asm__("$17") = arg1; \ + register TYPE2 __r18 __asm__("$18") = arg2; \ + __asm__ __volatile__( \ + "sys_call %4 # "#NAME \ + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0) \ + : "i"(HMC_ ## NAME), "0"(__r16), "1"(__r17), "2"(__r18) \ + : "$1", "$22", "$23", "$24", "$25"); \ + return __r0; \ +} + + +__CALL_HMC_VOID(imb); +__CALL_HMC_VOID(sflush); +__CALL_HMC_VOID(wrfen); +#define fpu_enable() wrfen() + +__CALL_HMC_VOID(sleepen); +__CALL_HMC_VOID(mtinten); + +__CALL_HMC_VOID(rdktp); +#define restore_ktp() rdktp() +__CALL_HMC_VOID(wrktp); +#define save_ktp() wrktp() + +__CALL_HMC_R0(rdps, unsigned long); + +__CALL_HMC_R0(rdusp, unsigned long); +__CALL_HMC_W1(wrusp, unsigned long); + +__CALL_HMC_R0(rdksp, unsigned long); +__CALL_HMC_W1(wrksp, unsigned long); +__CALL_HMC_R0(rdhtctl, unsigned long); + +/* + * Load a mm context. This is needed when we change the page + * table pointer(CSR:PTBR) or when we update the ASID. + * load_mm(asid, ptbr) + * + */ +__CALL_HMC_W2(load_mm, unsigned long, unsigned long); + +__CALL_HMC_W1(wrasid, unsigned long); +__CALL_HMC_R0(rdptbr, unsigned long); +__CALL_HMC_W1(wrptbr, unsigned long); + +__CALL_HMC_RW1(swpipl, unsigned long, unsigned long); +__CALL_HMC_R0(whami, unsigned long); +__CALL_HMC_RW1(rdio64, unsigned long, unsigned long); +__CALL_HMC_RW1(rdio32, unsigned int, unsigned long); +__CALL_HMC_W2(wrent, void*, unsigned long); +__CALL_HMC_W2(tbisasid, unsigned long, unsigned long); +__CALL_HMC_W1(wrkgp, unsigned long); +__CALL_HMC_RW2(wrperfmon, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW3(sendii, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_W1(wrtimer, unsigned long); +__CALL_HMC_RW3(tbivpn, unsigned long, unsigned long, unsigned long, unsigned long); +__CALL_HMC_RW2(cpuid, unsigned long, unsigned long, unsigned long); + +__CALL_HMC_W1(wrtp, unsigned long); +/* + * TB routines.. + */ +#define __tbi(nr, arg, arg1...) \ +({ \ + register unsigned long __r16 __asm__("$16") = (nr); \ + register unsigned long __r17 __asm__("$17"); arg; \ + __asm__ __volatile__( \ + "sys_call %3 #__tbi" \ + : "=r" (__r16), "=r" (__r17) \ + : "0" (__r16), "i" (HMC_tbi), ##arg1 \ + : "$0", "$1", "$22", "$23", "$24", "$25"); \ +}) + +#define tbi(x, y) __tbi(x, __r17 = (y), "1" (__r17)) + +/* Invalidate all TLB, only used by hypervisor */ +#define tbia() __tbi(-2, /* no second argument */) + +/* Invalidate TLB for all processes with currnet VPN */ +#define tbivp() __tbi(-1, /* no second argument */) + +/* Invalidate all TLB with current VPN */ +#define tbiv() __tbi(0, /* no second argument */) + +/* Invalidate ITLB of addr with current UPN and VPN */ +#define tbisi(addr) __tbi(1, __r17 = (addr), "1" (__r17)) + +/* Invalidate DTLB of addr with current UPN and VPN */ +#define tbisd(addr) __tbi(2, __r17 = (addr), "1" (__r17)) + +/* Invalidate TLB of addr with current UPN and VPN */ +#define tbis(addr) __tbi(3, __r17 = (addr), "1" (__r17)) + +/* Invalidate all user TLB with current UPN and VPN */ +#define tbiu() __tbi(4, /* no second argument */) + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/asm/hugetlb.h b/arch/sw_64/include/asm/hugetlb.h new file mode 100644 index 0000000000000000000000000000000000000000..f4c8cbe0891a9fd138dfdd1e049a09b82b08ad44 --- /dev/null +++ b/arch/sw_64/include/asm/hugetlb.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HUGETLB_H +#define _ASM_SW64_HUGETLB_H + +#include + +#ifdef CONFIG_SUBARCH_C4 +#define __HAVE_ARCH_HUGE_PTE_CLEAR +extern void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz); + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +extern void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); + +#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR +extern pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH +extern pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +extern void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep); + +#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, pte_t pte, int dirty); + +#define arch_make_huge_pte arch_make_huge_pte +extern pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, + vm_flags_t flags); + +#define set_huge_swap_pte_at set_huge_swap_pte_at +extern void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz); +#endif + +#include + +#endif /* _ASM_SW64_HUGETLB_H */ diff --git a/arch/sw_64/include/asm/hw_init.h b/arch/sw_64/include/asm/hw_init.h new file mode 100644 index 0000000000000000000000000000000000000000..2078c66d1c4fd3912392ba73790320657f5a83cc --- /dev/null +++ b/arch/sw_64/include/asm/hw_init.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_INIT_H +#define _ASM_SW64_HW_INIT_H +#include +#include + +#include + +#define MMSIZE __va(0x2040) + +/* + * Descriptor for a cache + */ +struct cache_desc { + unsigned int size; /* Bytes per way */ + unsigned int sets; /* Number of lines per set */ + unsigned char ways; /* Number of ways */ + unsigned char linesz; /* Size of line in bytes */ + unsigned char flags; /* Flags describing cache properties */ +}; + +struct cpuinfo_sw64 { + unsigned long last_asid; + unsigned long last_vpn; + unsigned long ipi_count; + struct cache_desc icache; /* Primary I-cache */ + struct cache_desc dcache; /* Primary D or combined I/D cache */ + struct cache_desc scache; /* Secondary cache */ + struct cache_desc tcache; /* Tertiary/split secondary cache */ +} __aligned(SMP_CACHE_BYTES); + +struct cpu_desc_t { + __u8 model; + __u8 family; + __u8 chip_var; + __u8 arch_var; + __u8 arch_rev; + __u8 pa_bits; + __u8 va_bits; + char vendor_id[16]; + char model_id[64]; + unsigned long frequency; +} __randomize_layout; + +#define MAX_NUMSOCKETS 8 +struct socket_desc_t { + bool is_online; /* 1 for online, 0 for offline */ + int numcores; + unsigned long socket_mem; +}; + +enum memmap_types { + memmap_reserved, + memmap_pci, + memmap_initrd, + memmap_kvm, + memmap_crashkernel, + memmap_acpi, + memmap_use, + memmap_protected, +}; + +#define MAX_NUMMEMMAPS 64 +struct memmap_entry { + u64 addr; /* start of memory segment */ + u64 size; /* size of memory segment */ + enum memmap_types type; +}; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; +extern void store_cpu_data(int cpu); + +extern struct cpu_desc_t cpu_desc; +extern struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +extern int memmap_nr; +extern struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +extern cpumask_t cpu_offline; +extern bool memblock_initialized; + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type); +void __init process_memmap(void); + +static inline unsigned long get_cpu_freq(void) +{ + return cpu_desc.frequency; +} + +static inline void update_cpu_freq(unsigned long khz) +{ + cpu_desc.frequency = khz * 1000; +} + +#define EMUL_FLAG (0x1UL << 63) +#define MMSIZE_MASK (EMUL_FLAG - 1) + +DECLARE_STATIC_KEY_TRUE(run_mode_host_key); +DECLARE_STATIC_KEY_FALSE(run_mode_guest_key); +DECLARE_STATIC_KEY_FALSE(run_mode_emul_key); + +#define is_in_host() static_branch_likely(&run_mode_host_key) +#define is_in_guest() static_branch_unlikely(&run_mode_guest_key) +#define is_in_emul() static_branch_unlikely(&run_mode_emul_key) +#define is_guest_or_emul() !static_branch_likely(&run_mode_host_key) + +#define CPU_SW3231 0x31 +#define CPU_SW831 0x32 +#define CPU_SW8A 0x41 + +#define GET_TABLE_ENTRY 1 +#define GET_VENDOR_ID 2 +#define GET_MODEL 3 +#define GET_CPU_FREQ 4 +#define GET_CACHE_INFO 5 + +#define TABLE_ENTRY_MAX 32 +#define VENDOR_ID_MAX 2 +#define MODEL_MAX 8 +#define CACHE_INFO_MAX 4 + +#define L1_ICACHE 0 +#define L1_DCACHE 1 +#define L2_CACHE 2 +#define L3_CACHE 3 + +#define CPUID_ARCH_REV_MASK 0xf +#define CPUID_ARCH_REV(val) ((val) & CPUID_ARCH_REV_MASK) +#define CPUID_ARCH_VAR_SHIFT 4 +#define CPUID_ARCH_VAR_MASK (0xf << CPUID_ARCH_VAR_SHIFT) +#define CPUID_ARCH_VAR(val) \ + (((val) & CPUID_ARCH_VAR_MASK) >> CPUID_ARCH_VAR_SHIFT) +#define CPUID_CHIP_VAR_SHIFT 8 +#define CPUID_CHIP_VAR_MASK (0xf << CPUID_CHIP_VAR_SHIFT) +#define CPUID_CHIP_VAR(val) \ + (((val) & CPUID_CHIP_VAR_MASK) >> CPUID_CHIP_VAR_SHIFT) +#define CPUID_FAMILY_SHIFT 12 +#define CPUID_FAMILY_MASK (0xf << CPUID_FAMILY_SHIFT) +#define CPUID_FAMILY(val) \ + (((val) & CPUID_FAMILY_MASK) >> CPUID_FAMILY_SHIFT) +#define CPUID_MODEL_SHIFT 24 +#define CPUID_MODEL_MASK (0xff << CPUID_MODEL_SHIFT) +#define CPUID_MODEL(val) \ + (((val) & CPUID_MODEL_MASK) >> CPUID_MODEL_SHIFT) +#define CPUID_PA_BITS_SHIFT 32 +#define CPUID_PA_BITS_MASK (0x7fUL << CPUID_PA_BITS_SHIFT) +#define CPUID_PA_BITS(val) \ + (((val) & CPUID_PA_BITS_MASK) >> CPUID_PA_BITS_SHIFT) +#define CPUID_VA_BITS_SHIFT 39 +#define CPUID_VA_BITS_MASK (0x7fUL << CPUID_VA_BITS_SHIFT) +#define CPUID_VA_BITS(val) \ + (((val) & CPUID_VA_BITS_MASK) >> CPUID_VA_BITS_SHIFT) + + +#define CACHE_SIZE_SHIFT 0 +#define CACHE_SIZE_MASK (0xffffffffUL << CACHE_SIZE_SHIFT) +#define CACHE_SIZE(val) \ + (((val) & CACHE_SIZE_MASK) >> CACHE_SIZE_SHIFT) +#define CACHE_LINE_BITS_SHIFT 32 +#define CACHE_LINE_BITS_MASK (0xfUL << CACHE_LINE_BITS_SHIFT) +#define CACHE_LINE_BITS(val) \ + (((val) & CACHE_LINE_BITS_MASK) >> CACHE_LINE_BITS_SHIFT) +#define CACHE_INDEX_BITS_SHIFT 36 +#define CACHE_INDEX_BITS_MASK (0x3fUL << CACHE_INDEX_BITS_SHIFT) +#define CACHE_INDEX_BITS(val) \ + (((val) & CACHE_INDEX_BITS_MASK) >> CACHE_INDEX_BITS_SHIFT) +#define current_cpu_data cpu_data[smp_processor_id()] + +#endif /* _ASM_SW64_HW_INIT_H */ diff --git a/arch/sw_64/include/asm/hw_irq.h b/arch/sw_64/include/asm/hw_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..3cfc725f7517dcacadeffb3f47a675aef142288e --- /dev/null +++ b/arch/sw_64/include/asm/hw_irq.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_HW_IRQ_H +#define _ASM_SW64_HW_IRQ_H + +#include + +extern volatile unsigned long irq_err_count; +DECLARE_PER_CPU(unsigned long, irq_pmi_count); + +#define ACTUAL_NR_IRQS NR_IRQS + +#ifdef CONFIG_PCI_MSI +typedef unsigned int vector_irq_t[PERCPU_MSI_IRQS]; +DECLARE_PER_CPU(vector_irq_t, vector_irq); +#endif +#endif /* _ASM_SW64_HW_IRQ_H */ diff --git a/arch/sw_64/include/asm/idle.h b/arch/sw_64/include/asm/idle.h new file mode 100644 index 0000000000000000000000000000000000000000..95e145f25306ada8722f28ad9976601076658afb --- /dev/null +++ b/arch/sw_64/include/asm/idle.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IDLE_H +#define _ASM_SW64_IDLE_H + +extern void arch_cpu_idle(void); + +#endif /* _ASM_SW64_IDLE_H */ diff --git a/arch/sw_64/include/asm/insn.h b/arch/sw_64/include/asm/insn.h new file mode 100644 index 0000000000000000000000000000000000000000..437cb48d1e9306bc46f206b10a1b0fb176a8e65a --- /dev/null +++ b/arch/sw_64/include/asm/insn.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_INSN_H +#define _ASM_SW64_INSN_H +#include + +/* Register numbers */ +enum { + R26 = 26, + R27, + R28, + R31 = 31, +}; + +#define BR_MAX_DISP 0xfffff +/* SW64 instructions are always 32 bits. */ +#define SW64_INSN_SIZE 4 + +#define ___SW64_RA(a) (((a) & 0x1f) << 21) +#define ___SW64_RB(b) (((b) & 0x1f) << 16) +#define ___SW64_SIMP_RC(c) (((c) & 0x1f)) +#define ___SW64_ST_DISP(disp) (((disp) & 0xffff)) +#define ___SW64_SYSCALL_FUNC(func) ((func) & 0xff) +#define ___SW64_BR_DISP(disp) (((disp) & 0x1fffff)) + + +#define SW64_INSN_BIS 0x40000740 +#define SW64_INSN_CALL 0x04000000 +#define SW64_INSN_SYS_CALL 0x02000000 +#define SW64_INSN_BR 0x10000000 + +#define SW64_NOP (0x43ff075f) +#define SW64_BIS(a, b, c) (SW64_INSN_BIS | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_SIMP_RC(c)) +#define SW64_CALL(a, b, disp) (SW64_INSN_CALL | ___SW64_RA(a) | ___SW64_RB(b) | ___SW64_ST_DISP(disp)) +#define SW64_SYS_CALL(func) (SW64_INSN_SYS_CALL | ___SW64_SYSCALL_FUNC(func)) +#define SW64_BR(a, disp) (SW64_INSN_BR | ___SW64_RA(a) | ___SW64_BR_DISP(disp)) + +extern int sw64_insn_read(void *addr, u32 *insnp); +extern int sw64_insn_write(void *addr, u32 insn); +extern int sw64_insn_double_write(void *addr, u64 insn); +extern unsigned int sw64_insn_nop(void); +extern unsigned int sw64_insn_call(unsigned int ra, unsigned int rb); +extern unsigned int sw64_insn_sys_call(unsigned int num); +extern unsigned int sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc); + +#define SW64_OPCODE_RA(opcode) ((opcode >> 21) & 0x1f) + +#define SW64_INSN(name, opcode, mask) \ +static inline bool sw64_insn_is_##name(u32 insn) \ +{ \ + return (insn & mask) == opcode; \ +} + +SW64_INSN(sys_call_b, 0x00000000, 0xfc000000); +SW64_INSN(sys_call, 0x00000001, 0xfc000000); +SW64_INSN(call, 0x04000000, 0xfc000000); +SW64_INSN(ret, 0x08000000, 0xfc000000); +SW64_INSN(jmp, 0x0c000000, 0xfc000000); +SW64_INSN(br, 0x10000000, 0xfc000000); +SW64_INSN(bsr, 0x14000000, 0xfc000000); +SW64_INSN(memb, 0x18000000, 0xfc00ffff); +SW64_INSN(imemb, 0x18000001, 0xfc00ffff); +SW64_INSN(rtc, 0x18000020, 0xfc00ffff); +SW64_INSN(halt, 0x18000080, 0xfc00ffff); +SW64_INSN(rd_f, 0x18001000, 0xfc00ffff); +SW64_INSN(beq, 0xc0000000, 0xfc000000); +SW64_INSN(bne, 0xc4000000, 0xfc000000); +SW64_INSN(blt, 0xc8000000, 0xfc000000); +SW64_INSN(ble, 0xcc000000, 0xfc000000); +SW64_INSN(bgt, 0xd0000000, 0xfc000000); +SW64_INSN(bge, 0xd4000000, 0xfc000000); +SW64_INSN(blbc, 0xd8000000, 0xfc000000); +SW64_INSN(blbs, 0xdc000000, 0xfc000000); +SW64_INSN(fbeq, 0xe0000000, 0xfc000000); +SW64_INSN(fbne, 0xe4000000, 0xfc000000); +SW64_INSN(fblt, 0xe8000000, 0xfc000000); +SW64_INSN(fble, 0xec000000, 0xfc000000); +SW64_INSN(fbgt, 0xf0000000, 0xfc000000); +SW64_INSN(fbge, 0xf4000000, 0xfc000000); +SW64_INSN(lldw, 0x20000000, 0xfc00f000); +SW64_INSN(lldl, 0x20001000, 0xfc00f000); + +#endif /* _ASM_SW64_INSN_H */ diff --git a/arch/sw_64/include/asm/io.h b/arch/sw_64/include/asm/io.h new file mode 100644 index 0000000000000000000000000000000000000000..2b045be5257e0e472bc221875117bd6560db2eae --- /dev/null +++ b/arch/sw_64/include/asm/io.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IO_H +#define _ASM_SW64_IO_H + +#ifdef __KERNEL__ + +#include +#include +#include +#include + +/* The generic header contains only prototypes. Including it ensures that + * the implementation we have here matches that interface. + */ +#include + +/* We don't use IO slowdowns on the sw64, but.. */ +#define __SLOW_DOWN_IO do { } while (0) +#define SLOW_DOWN_IO do { } while (0) + +#define page_to_phys(page) page_to_pa(page) + +/* Maximum PIO space address supported? */ +#define IO_SPACE_LIMIT 0xffffffffffffffff + +/* + * Generic IO read/write. These perform native-endian accesses. + */ + +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("stb %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("sth %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("stw %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("stl %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("ldbu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("ldhu %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("ldw %0, 0(%1)\n" + "zapnot %0, 0xf, %0\n" + : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + + asm volatile("ldl %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +/* IO barriers */ + +#define __iormb() rmb() +#define __iowmb() wmb() +#define mmiowb() do { } while (0) + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. + */ +#define readb_relaxed(c) __raw_readb(c) +#define readw_relaxed(c) __raw_readw(c) +#define readl_relaxed(c) __raw_readl(c) +#define readq_relaxed(c) __raw_readq(c) + +#define writeb_relaxed(v, c) __raw_writeb((v), (c)) +#define writew_relaxed(v, c) __raw_writew((v), (c)) +#define writel_relaxed(v, c) __raw_writel((v), (c)) +#define writeq_relaxed(v, c) __raw_writeq((v), (c)) + +/* + * I/O memory access primitives. Reads are ordered relative to any + * following Normal memory access. Writes are ordered relative to any prior + * Normal memory access. + */ +#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) +#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) +#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; }) +#define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(); __v; }) + +#define writeb(v, c) ({ __iowmb(); writeb_relaxed((v), (c)); }) +#define writew(v, c) ({ __iowmb(); writew_relaxed((v), (c)); }) +#define writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); }) +#define writeq(v, c) ({ __iowmb(); writeq_relaxed((v), (c)); }) +/* + * We always have external versions of these routines. + */ +extern u8 inb(unsigned long port); +extern u16 inw(unsigned long port); +extern u32 inl(unsigned long port); +extern void outb(u8 b, unsigned long port); +extern void outw(u16 b, unsigned long port); +extern void outl(u32 b, unsigned long port); +#define inb inb +#define inw inw +#define inl inl +#define outb outb +#define outw outw +#define outl outl + +static inline void __iomem *__ioremap(phys_addr_t addr, size_t size, + pgprot_t prot) +{ + unsigned long tmp = addr | PAGE_OFFSET; + + return (void __iomem *)(tmp); +} + +#define ioremap(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_nocache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_cache(addr, size) __ioremap((addr), (size), PAGE_KERNEL) +#define ioremap_uc ioremap_nocache + +#define ioport_map ioport_map +#define ioport_unmap ioport_unmap + +static inline void __iounmap(volatile void __iomem *addr) +{ +} + +#define iounmap __iounmap + +#define ioread16be(p) be16_to_cpu(ioread16(p)) +#define ioread32be(p) be32_to_cpu(ioread32(p)) +#define iowrite16be(v, p) iowrite16(cpu_to_be16(v), (p)) +#define iowrite32be(v, p) iowrite32(cpu_to_be32(v), (p)) + +#define inb_p inb +#define inw_p inw +#define inl_p inl +#define outb_p outb +#define outw_p outw +#define outl_p outl + + +/* + * String version of IO memory access ops: + */ +#define memcpy_fromio memcpy_fromio +extern void memcpy_fromio(void *buffer, const volatile void __iomem *addr, long len); + +#define memcpy_toio memcpy_toio +extern void memcpy_toio(volatile void __iomem *addr, const void *buffer, long len); + +extern void _memset_c_io(volatile void __iomem *addr, unsigned long c, long len); + +#define memset_io memset_io +static inline void memset_io(volatile void __iomem *addr, u8 c, long len) +{ + _memset_c_io(addr, 0x0101010101010101UL * c, len); +} + +static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len) +{ + _memset_c_io(addr, 0x0001000100010001UL * c, len); +} + +/* + * String versions of in/out ops: + */ +extern void insb(unsigned long port, void *dst, unsigned long count); +extern void insw(unsigned long port, void *dst, unsigned long count); +extern void insl(unsigned long port, void *dst, unsigned long count); +extern void outsb(unsigned long port, const void *src, unsigned long count); +extern void outsw(unsigned long port, const void *src, unsigned long count); +extern void outsl(unsigned long port, const void *src, unsigned long count); + +#define insb insb +#define insw insw +#define insl insl +#define outsb outsb +#define outsw outsw +#define outsl outsl + +/* + * These defines will override the defaults when doing RTC queries + */ + +#define RTC_PORT(x) (0x70 + (x)) +#define RTC_ALWAYS_BCD 0 + +/* + * Convert a physical pointer to a virtual kernel pointer for /dev/mem + * access + */ +#define xlate_dev_mem_ptr(p) __va(p) + +/* + * Convert a virtual cached pointer to an uncached pointer + */ +#define xlate_dev_kmem_ptr(p) p + +/* + * These get provided from since sw64 does not + * select GENERIC_IOMAP. + */ +#define ioread8 ioread8 +#define ioread16 ioread16 +#define ioread32 ioread32 +#define ioread64 ioread64 +#define iowrite8 iowrite8 +#define iowrite16 iowrite16 +#define iowrite32 iowrite32 +#define iowrite64 iowrite64 +#define ioread64be ioread64be +#define iowrite64be iowrite64be +#define ioread8_rep ioread8_rep +#define ioread16_rep ioread16_rep +#define ioread32_rep ioread32_rep +#define iowrite8_rep iowrite8_rep +#define iowrite16_rep iowrite16_rep +#define iowrite32_rep iowrite32_rep +#define pci_iounmap pci_iounmap + +#include + +/* + * Change addresses as seen by the kernel (virtual) to addresses as + * seen by a device (bus), and vice versa. + * + * Note that this only works for a limited range of kernel addresses, + * and very well may not span all memory. Consider this interface + * deprecated in favour of the DMA-mapping API. + */ +static inline unsigned long __deprecated virt_to_bus(void *address) +{ + return virt_to_phys(address); +} +#define isa_virt_to_bus virt_to_bus + +static inline void * __deprecated bus_to_virt(unsigned long address) +{ + void *virt; + + /* This check is a sanity check but also ensures that bus address 0 + * maps to virtual address 0 which is useful to detect null pointers + * (the NCR driver is much simpler if NULL pointers are preserved). + */ + virt = phys_to_virt(address); + return (long)address <= 0 ? NULL : virt; +} +#define isa_bus_to_virt bus_to_virt + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_IO_H */ diff --git a/arch/sw_64/include/asm/irq.h b/arch/sw_64/include/asm/irq.h new file mode 100644 index 0000000000000000000000000000000000000000..b3ac4105c29e14fc5f0cd2fde62c8db63f3cc525 --- /dev/null +++ b/arch/sw_64/include/asm/irq.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQ_H +#define _ASM_SW64_IRQ_H + +/* + * arch/sw/include/asm/irq.h + * + * (C) 2012 OSKernel JN + */ + +#include + +#define NR_VECTORS_PERCPU 256 +#define NR_IRQS_LEGACY 16 +#define NR_IRQS ((NR_VECTORS_PERCPU + NR_IRQS_LEGACY) * NR_CPUS) + +static inline int irq_canonicalize(int irq) +{ + /* + * XXX is this true for all Sw? The old serial driver + * did it this way for years without any complaints, so.... + */ + return ((irq == 2) ? 9 : irq); +} + +struct pt_regs; +extern void (*perf_irq)(unsigned long vector, struct pt_regs *regs); +extern void fixup_irqs(void); +extern void sw64_timer_interrupt(void); + +#endif /* _ASM_SW64_IRQ_H */ diff --git a/arch/sw_64/include/asm/irq_impl.h b/arch/sw_64/include/asm/irq_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..797af433a1267c8b49cfda9901baed051ff16b98 --- /dev/null +++ b/arch/sw_64/include/asm/irq_impl.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the IRQ handling routines in irq.c. + */ + +#ifndef _ASM_SW64_IRQ_IMPL_H +#define _ASM_SW64_IRQ_IMPL_H + +#include +#include +#include + +#include + +#define SW64_PCIE0_INT_BASE 17 +#define SW64_PCIE0_MSI_BASE 21 + +#define SW64_PCIE1_INT_BASE 277 +#define SW64_PCIE1_MSI_BASE 281 + +#define RTC_IRQ 8 +#define SWI2C_IRQ 14 + +enum sw64_irq_type { + INT_IPI = 1, + INT_PC0 = 2, + INT_PC1 = 3, + INT_INTx = 5, + INT_MSI = 6, + INT_MT = 7, + INT_RTC = 9, + INT_FAULT = 10, + INT_VT_SERIAL = 12, + INT_VT_HOTPLUG = 13, + INT_DEV = 17, + INT_NMI = 18, + INT_LEGACY = 31, +}; + +extern struct irqaction timer_irqaction; +extern void init_rtc_irq(irq_handler_t handler); +extern void handle_irq(int irq); +extern void handle_ipi(struct pt_regs *regs); +extern void __init sw64_init_irq(void); +extern irqreturn_t timer_interrupt(int irq, void *dev); + +#endif /* _ASM_SW64_IRQ_IMPL_H */ diff --git a/arch/sw_64/include/asm/irqflags.h b/arch/sw_64/include/asm/irqflags.h new file mode 100644 index 0000000000000000000000000000000000000000..b4440f25a51d622402198e1239bcac10908ee5a5 --- /dev/null +++ b/arch/sw_64/include/asm/irqflags.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_IRQFLAGS_H +#define _ASM_SW64_IRQFLAGS_H + +#include + +#define IPL_MIN 0 +#define IPL_MAX 7 + +#define getipl() (rdps() & 7) +#define setipl(ipl) ((void) swpipl(ipl)) + +static inline unsigned long arch_local_save_flags(void) +{ + return rdps(); +} + +static inline void arch_local_irq_disable(void) +{ + setipl(IPL_MAX); + barrier(); +} + +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags = swpipl(IPL_MAX); + + barrier(); + return flags; +} + +static inline void arch_local_irq_enable(void) +{ + barrier(); + setipl(IPL_MIN); +} + +static inline void arch_local_irq_restore(unsigned long flags) +{ + barrier(); + setipl(flags); + barrier(); +} + +static inline bool arch_irqs_disabled_flags(unsigned long flags) +{ + return flags > IPL_MIN; +} + +static inline bool arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(getipl()); +} + +#endif /* _ASM_SW64_IRQFLAGS_H */ diff --git a/arch/sw_64/include/asm/jump_label.h b/arch/sw_64/include/asm/jump_label.h new file mode 100644 index 0000000000000000000000000000000000000000..32fbf7573b206bb2c935cc173de392b100d02010 --- /dev/null +++ b/arch/sw_64/include/asm/jump_label.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_JUMP_LABEL_H +#define _ASM_SW64_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include +#include + +#define JUMP_LABEL_NOP_SIZE SW64_INSN_SIZE + +static __always_inline bool arch_static_branch(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: nop\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) +{ + asm_volatile_goto("1: br %l[l_yes]\n\t" + ".pushsection __jump_table, \"aw\"\n\t" + ".align 3\n\t" + ".quad 1b, %l[l_yes], %0\n\t" + ".popsection\n\t" + : : "i"(&((char *)key)[branch]) : : l_yes); + + return false; +l_yes: + return true; +} + +typedef u64 jump_label_t; + +struct jump_entry { + jump_label_t code; + jump_label_t target; + jump_label_t key; +}; + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_JUMP_LABEL_H */ diff --git a/arch/sw_64/include/asm/kdebug.h b/arch/sw_64/include/asm/kdebug.h new file mode 100644 index 0000000000000000000000000000000000000000..73793057c3e87d5e9f40fa0032b8522cd1c31126 --- /dev/null +++ b/arch/sw_64/include/asm/kdebug.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KDEBUG_H +#define _ASM_SW64_KDEBUG_H + +#include + +enum die_val { + DIE_OOPS = 1, + DIE_BREAK, + DIE_SSTEPBP, + DIE_UPROBE, + DIE_UPROBE_XOL, +}; + +#endif /* _ASM_SW64_KDEBUG_H */ diff --git a/arch/sw_64/include/asm/kexec.h b/arch/sw_64/include/asm/kexec.h new file mode 100644 index 0000000000000000000000000000000000000000..25e0d8da84f8dbe98908179bb061ea5f4759aa6e --- /dev/null +++ b/arch/sw_64/include/asm/kexec.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KEXEC_H +#define _ASM_SW64_KEXEC_H + +#ifdef CONFIG_KEXEC + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +#define KEXEC_CONTROL_PAGE_SIZE 8192 + +#define KEXEC_ARCH KEXEC_ARCH_SW64 + +#define KEXEC_SW64_ATAGS_OFFSET 0x1000 +#define KEXEC_SW64_ZIMAGE_OFFSET 0x8000 + +#ifndef __ASSEMBLY__ + +/** + * crash_setup_regs() - save registers for the panic kernel + * @newregs: registers are saved here + * @oldregs: registers to be saved (may be %NULL) + * + * Function copies machine registers from @oldregs to @newregs. If @oldregs is + * %NULL then current registers are stored there. + */ +static inline void crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) { + memcpy(newregs, oldregs, sizeof(*newregs)); + } else { + __asm__ __volatile__ ("stl $0, %0" : "=m" (newregs->regs[0])); + __asm__ __volatile__ ("stl $1, %0" : "=m" (newregs->regs[1])); + __asm__ __volatile__ ("stl $2, %0" : "=m" (newregs->regs[2])); + __asm__ __volatile__ ("stl $3, %0" : "=m" (newregs->regs[3])); + __asm__ __volatile__ ("stl $4, %0" : "=m" (newregs->regs[4])); + __asm__ __volatile__ ("stl $5, %0" : "=m" (newregs->regs[5])); + __asm__ __volatile__ ("stl $6, %0" : "=m" (newregs->regs[6])); + __asm__ __volatile__ ("stl $7, %0" : "=m" (newregs->regs[7])); + __asm__ __volatile__ ("stl $8, %0" : "=m" (newregs->regs[8])); + __asm__ __volatile__ ("stl $9, %0" : "=m" (newregs->regs[9])); + __asm__ __volatile__ ("stl $10, %0" : "=m" (newregs->regs[10])); + __asm__ __volatile__ ("stl $11, %0" : "=m" (newregs->regs[11])); + __asm__ __volatile__ ("stl $12, %0" : "=m" (newregs->regs[12])); + __asm__ __volatile__ ("stl $13, %0" : "=m" (newregs->regs[13])); + __asm__ __volatile__ ("stl $14, %0" : "=m" (newregs->regs[14])); + __asm__ __volatile__ ("stl $15, %0" : "=m" (newregs->regs[15])); + __asm__ __volatile__ ("stl $16, %0" : "=m" (newregs->regs[16])); + __asm__ __volatile__ ("stl $17, %0" : "=m" (newregs->regs[17])); + __asm__ __volatile__ ("stl $18, %0" : "=m" (newregs->regs[18])); + __asm__ __volatile__ ("stl $19, %0" : "=m" (newregs->regs[19])); + __asm__ __volatile__ ("stl $20, %0" : "=m" (newregs->regs[20])); + __asm__ __volatile__ ("stl $21, %0" : "=m" (newregs->regs[21])); + __asm__ __volatile__ ("stl $22, %0" : "=m" (newregs->regs[22])); + __asm__ __volatile__ ("stl $23, %0" : "=m" (newregs->regs[23])); + __asm__ __volatile__ ("stl $24, %0" : "=m" (newregs->regs[24])); + __asm__ __volatile__ ("stl $25, %0" : "=m" (newregs->regs[25])); + __asm__ __volatile__ ("stl $26, %0" : "=m" (newregs->regs[26])); + __asm__ __volatile__ ("stl $27, %0" : "=m" (newregs->regs[27])); + __asm__ __volatile__ ("stl $28, %0" : "=m" (newregs->regs[28])); + __asm__ __volatile__ ("stl $29, %0" : "=m" (newregs->regs[29])); + __asm__ __volatile__ ("stl $30, %0" : "=m" (newregs->regs[30])); + newregs->pc = (unsigned long)current_text_addr(); + } +} + +/* Function pointer to optional machine-specific reinitialization */ +extern void (*kexec_reinit)(void); + +#endif /* __ASSEMBLY__ */ + +struct kimage; +extern unsigned long kexec_args[4]; + +#endif /* CONFIG_KEXEC */ + +#endif /* _ASM_SW64_KEXEC_H */ diff --git a/arch/sw_64/include/asm/kgdb.h b/arch/sw_64/include/asm/kgdb.h new file mode 100644 index 0000000000000000000000000000000000000000..a00a45ce767ca74361319836d3b188db9178285b --- /dev/null +++ b/arch/sw_64/include/asm/kgdb.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * sw64 KGDB support + * + * Based on arch/arm64/include/kgdb.h + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_KGDB_H +#define _ASM_SW64_KGDB_H + +#include +#include + +#ifndef __ASSEMBLY__ + + +#define GDB_ADJUSTS_BREAK_OFFSET +#define BREAK_INSTR_SIZE 4 +#define CACHE_FLUSH_IS_SAFE 0 + +static inline void arch_kgdb_breakpoint(void) +{ + __asm__ __volatile__("sys_call %0" : : "i"(HMC_bpt)); +} + +void sw64_task_to_gdb_regs(struct task_struct *task, unsigned long *regs); + +extern void kgdb_handle_bus_error(void); +extern int kgdb_fault_expected; +extern unsigned long get_reg(struct task_struct *task, unsigned long regno); + +#endif /* !__ASSEMBLY__ */ + +/* + * general purpose registers size in bytes. + */ +#define DBG_MAX_REG_NUM (67) + +/* + * Size of I/O buffer for gdb packet. + * considering to hold all register contents, size is set + */ + +#define BUFMAX 4096 + +/* + * Number of bytes required for gdb_regs buffer. + * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each + * GDB fails to connect for size beyond this with error + * "'g' packet reply is too long" + */ +#define NUMREGBYTES (DBG_MAX_REG_NUM * 8) + +#endif /* _ASM_SW64_KGDB_H */ diff --git a/arch/sw_64/include/asm/kprobes.h b/arch/sw_64/include/asm/kprobes.h new file mode 100644 index 0000000000000000000000000000000000000000..0c7be8109ed29423cadec91e4f0ffc9d65e7ab0b --- /dev/null +++ b/arch/sw_64/include/asm/kprobes.h @@ -0,0 +1,76 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel Probes (KProbes) + * Based on arch/mips/include/asm/kprobes.h + */ + +#ifndef _ASM_SW64_KPROBES_H +#define _ASM_SW64_KPROBES_H + +#include + +#define BREAK_KPROBE 0x40ffffff +#define BREAK_KPROBE_SS 0x40fffeff + +#ifdef CONFIG_KPROBES +#include +#include + +#include +#include + +#define __ARCH_WANT_KPROBES_INSN_SLOT + +struct kprobe; +struct pt_regs; + +typedef u32 kprobe_opcode_t; + +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) \ +do { \ + if (p->addr) \ + flush_icache_range((unsigned long)p->addr, \ + (unsigned long)p->addr + \ + (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ +} while (0) + + +#define kretprobe_blacklist_size 0 + +void arch_remove_kprobe(struct kprobe *p); + +/* Architecture specific copy of original instruction*/ +struct arch_specific_insn { + /* copy of the original instruction */ + kprobe_opcode_t *insn; + /* + * Set in kprobes code, initially to 0. If the instruction can be + * eumulated, this is set to 1, if not, to -1. + */ + int boostable; +}; + +struct prev_kprobe { + struct kprobe *kp; + unsigned long status; +}; + +#define SKIP_DELAYSLOT 0x0001 + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned long kprobe_status; + /* Per-thread fields, used while emulating branches */ + unsigned long flags; + unsigned long target_pc; + struct prev_kprobe prev_kprobe; +}; +extern int kprobe_handler(struct pt_regs *regs); +extern int post_kprobe_handler(struct pt_regs *regs); +extern int kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr); + + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_SW64_KPROBES_H */ diff --git a/arch/sw_64/include/asm/kvm_asm.h b/arch/sw_64/include/asm/kvm_asm.h new file mode 100644 index 0000000000000000000000000000000000000000..fd1b25018fc8c37f97a176201f0b8e444bd2ed3e --- /dev/null +++ b/arch/sw_64/include/asm/kvm_asm.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_ASM_H +#define _ASM_SW64_KVM_ASM_H + +#define SW64_KVM_EXIT_HOST_INTR 0 +#define SW64_KVM_EXIT_IO 1 +#define SW64_KVM_MIGRATION_SET_DIRTY 2 +#define SW64_KVM_MIGRATION_SET_DIRTY_HM 3 +#define SW64_KVM_EXIT_HALT 10 +#define SW64_KVM_EXIT_SHUTDOWN 12 +#define SW64_KVM_EXIT_TIMER 13 +#define SW64_KVM_EXIT_IPI 14 +#define SW64_KVM_EXIT_STOP 16 +#define SW64_KVM_EXIT_RESTART 17 +#define SW64_KVM_EXIT_APT_FAULT 18 +#define SW64_KVM_EXIT_FATAL_ERROR 22 +#define SW64_KVM_EXIT_MEMHOTPLUG 23 +#define SW64_KVM_EXIT_DEBUG 24 + + +#define kvm_sw64_exception_type \ + {0, "HOST_INTR" }, \ + {1, "IO" }, \ + {10, "HALT" }, \ + {12, "SHUTDOWN" }, \ + {13, "TIMER" }, \ + {14, "IPI" }, \ + {16, "STOP" }, \ + {17, "RESTART" }, \ + {18, "APT_FAULT" }, \ + {22, "FATAL_ERROR" }, \ + {23, "MEMHOTPLUG" }, \ + {24, "DEBUG" } + + +#include + +#endif /* _ASM_SW64_KVM_ASM_H */ diff --git a/arch/sw_64/include/asm/kvm_cma.h b/arch/sw_64/include/asm/kvm_cma.h new file mode 100644 index 0000000000000000000000000000000000000000..d50ba599ceb716ac0309122cf77c1e2ea38c70d7 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_cma.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_CMA_H +#define _ASM_SW64_KVM_CMA_H + +#include + +extern int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma); +#endif /* _ASM_SW64_KVM_CMA_H */ diff --git a/arch/sw_64/include/asm/kvm_emulate.h b/arch/sw_64/include/asm/kvm_emulate.h new file mode 100644 index 0000000000000000000000000000000000000000..915aa6c0bce212b8b06bb3aa19aa6aee8c04e532 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_emulate.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_EMULATE_H +#define _ASM_SW64_KVM_EMULATE_H + +#include +#include + +#define R(x) ((size_t) &((struct kvm_regs *)0)->x) + +static int reg_offsets[32] = { + R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), + R(r9), R(r10), R(r11), R(r12), R(r13), R(r14), R(r15), + R(r16), R(r17), R(r18), + R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), + R(r27), R(r28), R(gp), + 0, 0, +}; + + +static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, + unsigned long val) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + regs_ptr += reg_offsets[reg_num]; + *(unsigned long *)regs_ptr = val; +} + +static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ + void *regs_ptr = (void *)&vcpu->arch.regs; + + if (reg_num == 31) + return 0; + regs_ptr += reg_offsets[reg_num]; + return *(unsigned long *)regs_ptr; +} + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, + struct kvm_run *run); + +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more); +void clear_vcpu_irq(struct kvm_vcpu *vcpu); +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq); +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more); +#endif /* _ASM_SW64_KVM_EMULATE_H */ diff --git a/arch/sw_64/include/asm/kvm_host.h b/arch/sw_64/include/asm/kvm_host.h new file mode 100644 index 0000000000000000000000000000000000000000..09a995218a2cbf473933f89d45631997ad6c73bd --- /dev/null +++ b/arch/sw_64/include/asm/kvm_host.h @@ -0,0 +1,225 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_HOST_H +#define _ASM_SW64_KVM_HOST_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#define last_vpn(cpu) (cpu_data[cpu].last_vpn) + +#ifdef CONFIG_SUBARCH_C3B +#define VPN_BITS 8 +#define GUEST_RESET_PC 0xffffffff80011100 +#endif + +#ifdef CONFIG_SUBARCH_C4 +#define VPN_BITS 10 +#define GUEST_RESET_PC 0xfff0000000011002 +#endif + +#define VPN_FIRST_VERSION (1UL << VPN_BITS) +#define VPN_MASK ((1UL << VPN_BITS) - 1) +#define VPN_SHIFT (64 - VPN_BITS) + +#define KVM_MAX_VCPUS 64 +#define KVM_INTERNAL_MEM_SLOTS (KVM_MEM_SLOTS_NUM - 512) + +#define KVM_HALT_POLL_NS_DEFAULT 0 +#define KVM_IRQCHIP_NUM_PINS 256 +/* KVM Hugepage definitions for sw64 */ +#define KVM_NR_PAGE_SIZES 3 +#define KVM_HPAGE_GFN_SHIFT(x) (((x) - 1) * 9) +#define KVM_HPAGE_SHIFT(x) (PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x)) +#define KVM_HPAGE_SIZE(x) (1UL << KVM_HPAGE_SHIFT(x)) +#define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) +#define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) + +/* + * The architecture supports 48-bit GPA as input to the addtional stage translations. + */ +#define KVM_PHYS_SHIFT (48) +#define KVM_PHYS_SIZE (_AC(1, ULL) << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK (KVM_PHYS_SIZE - _AC(1, ULL)) + +struct kvm_arch_memory_slot { + unsigned long host_phys_addr; + bool valid; +}; + +struct kvm_arch { + unsigned long host_phys_addr; + unsigned long size; + + /* segment table */ + unsigned long *seg_pgd; + + struct swvm_mem mem; + /* Addtional stage page table*/ + pgd_t *pgd; +}; + +#define KVM_NR_MEM_OBJS 40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { + int nobjs; + void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_arch { + struct kvm_regs regs __aligned(32); + struct vcpucb vcb; + struct task_struct *tsk; + unsigned int pcpu_id; /* current running pcpu id */ + + /* Virtual clock device */ + struct hrtimer hrt; + unsigned long timer_next_event; + unsigned long vtimer_freq; + + int first_run; + int halted; + int stopped; + int restart; + + /* Pending virtual interrupts */ + DECLARE_BITMAP(irqs_pending, SWVM_IRQS); + unsigned long vpnc[NR_CPUS]; + + /* Detect first run of a vcpu */ + bool has_run_once; + + /* WAIT executed */ + int wait; + + /* vcpu power-off state */ + bool power_off; + + /* Don't run the guest (internal implementation need) */ + bool pause; + + struct kvm_decode mmio_decode; + + /* Cache some mmu pages needed inside spinlock regions */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* guest live migration */ + unsigned long migration_mark; + unsigned long shtclock; +}; + +struct vmem_info { + unsigned long start; + size_t size; + atomic_t refcnt; +}; + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 pid; + u64 exits; + u64 io_exits; + u64 mmio_exits; + u64 migration_set_dirty; + u64 shutdown_exits; + u64 restart_exits; + u64 stop_exits; + u64 ipi_exits; + u64 timer_exits; + u64 debug_exits; +#ifdef CONFIG_KVM_MEMHOTPLUG + u64 memhotplug_exits; +#endif + u64 fatal_error_exits; + u64 halt_exits; + u64 halt_successful_poll; + u64 halt_attempted_poll; + u64 halt_wakeup; + u64 halt_poll_success_ns; + u64 halt_poll_fail_ns; + u64 halt_poll_invalid; + u64 signal_exits; + u64 steal; + u64 st_max; + u64 utime; + u64 stime; + u64 gtime; +}; + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr); +#endif +#ifdef CONFIG_SUBARCH_C4 +#define KVM_ARCH_WANT_MMU_NOTIFIER +#endif +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, bool blockable); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva); + +void update_vcpu_stat_time(struct kvm_vcpu_stat *vcpu_stat); +void check_vcpu_requests(struct kvm_vcpu *vcpu); +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu); +int vmem_init(void); +void vmem_exit(void); +int __sw64_vcpu_run(unsigned long vcb_pa, struct kvm_regs *regs, + struct hcall_args *args); +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs); +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type); +static inline void kvm_arch_hardware_disable(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} +static inline void kvm_arch_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) {} +static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {} +static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {} +static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); + +int kvm_sw64_perf_init(void); +int kvm_sw64_perf_teardown(void); +void kvm_flush_tlb_all(void); +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn); +int kvm_sw64_init_vm(struct kvm *kvm); +void kvm_sw64_destroy_vm(struct kvm *kvm); +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu); +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg); +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg); + +void update_aptp(unsigned long pgd); +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu); +#endif /* _ASM_SW64_KVM_HOST_H */ diff --git a/arch/sw_64/include/asm/kvm_mmio.h b/arch/sw_64/include/asm/kvm_mmio.h new file mode 100644 index 0000000000000000000000000000000000000000..c87b259e9395f0943b062c4d4a6e08a433bacf1d --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmio.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMIO_H +#define _ASM_SW64_KVM_MMIO_H + +#include +#include + +struct kvm_decode { + unsigned long rt; + bool sign_extend; +}; + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs); + +#endif /* _ASM_SW64_KVM_MMIO_H */ diff --git a/arch/sw_64/include/asm/kvm_mmu.h b/arch/sw_64/include/asm/kvm_mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..f4493de934bab402a088d0deec64fe86b86a7b70 --- /dev/null +++ b/arch/sw_64/include/asm/kvm_mmu.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_MMU_H +#define _ASM_SW64_KVM_MMU_H + +#define AF_ACCESS_TYPE_SHIFT 55 +#define AF_INV_LEVEL_SHIFT 53 +#define AF_FAULT_STATUS_SHIFT 48 + +#define AF_ACCESS_TYPE_MASK 0x3 +#define AF_INV_LEVEL_MASK 0x3 +#define AF_FAULT_STATUS_MASK 0x1f +#define AF_ENTRY_ADDR_MASK ((0x1UL << AF_FAULT_STATUS_SHIFT) - 1) + +/* access type defination */ +#define AF_READ_ACCESS_TYPE 0x1 +#define AF_WRITE_ACCESS_TYPE 0x2 +#define AF_EXEC_ACCESS_TYPE 0x3 + +/* invalid page level */ +#define AF_INV_LEVEL_1 0 +#define AF_INV_LEVEL_2 1 +#define AF_INV_LEVEL_3 2 +#define AF_INV_LEVEL_4 3 + +/* fault status */ +#define AF_STATUS_MISCONFIG 0x1 +#define AF_STATUS_FOR 0x2 +#define AF_STATUS_FOW 0x4 +#define AF_STATUS_FOE 0x8 +#define AF_STATUS_INV 0x10 + +#define KVM_MMU_CACHE_MIN_PAGES 2 + +static inline void kvm_set_aptpte_readonly(pte_t *pte) +{ + pte_val(*pte) |= _PAGE_FOW; +} + +static inline bool kvm_aptpte_readonly(pte_t *pte) +{ + return (pte_val(*pte) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpmd_readonly(pmd_t *pmd) +{ + pmd_val(*pmd) |= _PAGE_FOW; +} + +static inline bool kvm_aptpmd_readonly(pmd_t *pmd) +{ + return (pmd_val(*pmd) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline void kvm_set_aptpud_readonly(pud_t *pud) +{ + pud_val(*pud) |= _PAGE_FOW; +} + +static inline bool kvm_aptpud_readonly(pud_t *pud) +{ + return (pud_val(*pud) & _PAGE_FOW) == _PAGE_FOW; +} + +static inline pte_t kvm_pte_mkwrite(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t kvm_pte_mkexec(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOE; + return pte; +} + +static inline bool kvm_pte_exec(pte_t *pte) +{ + return !(pte_val(*pte) & _PAGE_FOE); +} + +static inline pmd_t kvm_pmd_mkwrite(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t kvm_pmd_mkexec(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOE; + return pmd; +} + +static inline bool kvm_pmd_exec(pmd_t *pmd) +{ + return !(pmd_val(*pmd) & _PAGE_FOE); +} + +static inline pud_t kvm_pud_mkwrite(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOW; + return pud; +} + +static inline pud_t kvm_pud_mkexec(pud_t pud) +{ + pud_val(pud) &= ~_PAGE_FOE; + return pud; +} + +static inline bool kvm_pud_exec(pud_t *pud) +{ + return !(pud_val(*pud) & _PAGE_FOE); +} + +void kvm_core4_commit_memory_region(struct kvm *kvm, + const struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change); +void kvm_core4_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +void kvm_core4_flush_shadow_all(struct kvm *kvm); +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end); +void kvm_handle_apt_fault(struct kvm_vcpu *vcpu); +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm); +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot); +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); +void apt_unmap_vm(struct kvm *kvm); +#endif /* _ASM_SW64_KVM_MMU_H */ diff --git a/arch/sw_64/include/asm/kvm_para.h b/arch/sw_64/include/asm/kvm_para.h new file mode 100644 index 0000000000000000000000000000000000000000..442f1c7d9f832159bcb04068b8939c1fa3b107cc --- /dev/null +++ b/arch/sw_64/include/asm/kvm_para.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_PARA_H +#define _ASM_SW64_KVM_PARA_H + +#include + +#define HMC_hcall 0x32 + +static inline unsigned long kvm_hypercall3(unsigned long num, + unsigned long arg0, + unsigned long arg1, + unsigned long arg2) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = num; + register unsigned long __r17 __asm__("$17") = arg0; + register unsigned long __r18 __asm__("$18") = arg1; + register unsigned long __r19 __asm__("$19") = arg2; + __asm__ __volatile__( + "sys_call %5" + : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r19), "=r"(__r0) + : "i"(HMC_hcall), "0"(__r16), "1"(__r17), "2"(__r18), "3"(__r19) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} +#endif /* _ASM_SW64_KVM_PARA_H */ diff --git a/arch/sw_64/include/asm/kvm_timer.h b/arch/sw_64/include/asm/kvm_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..8080873c684f82b0e0c06b7e55c72ba475f497cd --- /dev/null +++ b/arch/sw_64/include/asm/kvm_timer.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_KVM_TIMER_H +#define _ASM_SW64_KVM_TIMER_H + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta); +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +enum hrtimer_restart clockdev_fn(struct hrtimer *timer); + +#endif /* _ASM_SW64_KVM_TIMER_H */ diff --git a/arch/sw_64/include/asm/linkage.h b/arch/sw_64/include/asm/linkage.h new file mode 100644 index 0000000000000000000000000000000000000000..1721753b4d98a5b0d2621a448761ee18e4038b98 --- /dev/null +++ b/arch/sw_64/include/asm/linkage.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_LINKAGE_H +#define _ASM_SW64_LINKAGE_H + +#define cond_syscall(x) asm(".weak\t" #x "\n" #x " = sys_ni_syscall") +#define SYSCALL_ALIAS(alias, name) \ + asm (#alias " = " #name "\n\t.globl " #alias) + +#define SYM_END(name, sym_type) \ + .type name sym_type ASM_NL \ + .size name, .-name + +#endif /* _ASM_SW64_LINKAGE_H */ diff --git a/arch/sw_64/include/asm/livepatch.h b/arch/sw_64/include/asm/livepatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1feec0f6be76ddad2c1e65e0bfacf3d511510af0 --- /dev/null +++ b/arch/sw_64/include/asm/livepatch.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * livepatch.h - sw64-specific Kernel Live Patching Core + */ + +#ifndef _ASM_SW64_LIVEPATCH_H +#define _ASM_SW64_LIVEPATCH_H + +#include + +static inline int klp_check_compiler_support(void) +{ + return 0; +} + +static inline void klp_arch_set_pc(struct pt_regs *regs, unsigned long ip) +{ + regs->regs[27] = ip; + regs->regs[28] = ip; +} + +#endif /* _ASM_SW64_LIVEPATCH_H */ diff --git a/arch/sw_64/include/asm/memory.h b/arch/sw_64/include/asm/memory.h new file mode 100644 index 0000000000000000000000000000000000000000..b2b7492ae477d81e92bc806e093ace0c4ade9c2f --- /dev/null +++ b/arch/sw_64/include/asm/memory.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MEMORY_H +#define _ASM_SW64_MEMORY_H + +#ifdef CONFIG_NUMA +#include +#endif + +#define MIN_MEMORY_BLOCK_SIZE_VM_MEMHP (1UL << 30) +#define NODE0_START (_TEXT_START - __START_KERNEL_map) + +#define MAX_PHYSMEM_BITS 48 + +struct mem_desc_t { + unsigned long phys_base; /* start address of physical memory */ + unsigned long phys_size; /* size of physical memory */ + phys_addr_t base; /* start address of memory managed by kernel */ + phys_addr_t size; /* size of memory managed by kernel */ +}; +extern struct mem_desc_t mem_desc; + +struct numa_node_desc_t { + phys_addr_t base; + phys_addr_t size; +}; +extern struct numa_node_desc_t numa_nodes_desc[]; + +void __init callback_init(void); +void __init mem_detect(void); +void __init sw64_memblock_init(void); +void __init zone_sizes_init(void); +void __init sw64_numa_init(void); +void __init sw64_memory_present(void); + +#endif /* _ASM_SW64_MEMORY_H */ diff --git a/arch/sw_64/include/asm/mmu.h b/arch/sw_64/include/asm/mmu.h new file mode 100644 index 0000000000000000000000000000000000000000..f24219fac654bb3381ea0e85e5faeeb3486d23d3 --- /dev/null +++ b/arch/sw_64/include/asm/mmu.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_H +#define _ASM_SW64_MMU_H + +/* The sw64 MMU context is one "unsigned long" bitmap per CPU*/ +typedef struct { + unsigned long asid[NR_CPUS]; + void *vdso; +} mm_context_t; +#endif /* _ASM_SW64_MMU_H */ diff --git a/arch/sw_64/include/asm/mmu_context.h b/arch/sw_64/include/asm/mmu_context.h new file mode 100644 index 0000000000000000000000000000000000000000..420ad5f745be49541200a9c68ff22abf1f9731b0 --- /dev/null +++ b/arch/sw_64/include/asm/mmu_context.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMU_CONTEXT_H +#define _ASM_SW64_MMU_CONTEXT_H + +#include + +#include +#include + +/* + * The maximum ASID's the processor supports. + */ + +#if defined(CONFIG_SUBARCH_C3B) || defined(CONFIG_SUBARCH_C4) +#define ASID_BITS 10 +#endif + +#include +#define last_asid(cpu) (cpu_data[cpu].last_asid) + +#define ASID_FIRST_VERSION (1UL << ASID_BITS) +#define ASID_MASK ((1UL << ASID_BITS) - 1) + +#define cpu_asid(cpu, mm) ((mm)->context.asid[cpu] & ASID_MASK) + +static inline bool asid_valid(struct mm_struct *mm, unsigned int cpu) +{ + return !((mm->context.asid[cpu] ^ last_asid(cpu)) & ~ASID_MASK); +} + +/* + * NOTE! The way this is set up, the high bits of the "last_asid" (and + * the "mm->context.asid[cpu]") are the ASID _version_ code. A version + * of 0 is always considered invalid, so to invalidate another process + * you only need to do "p->mm->context.asid[cpu] = 0". + * + * If we need more ASID's than the processor has, we invalidate the old + * user TLB's (tbivp()) and start a new ASID version. That will force a + * new asid for any other processes the next time they want to run. + */ + +static inline void __get_new_mm_context(struct mm_struct *mm, long cpu) +{ + unsigned long asid = last_asid(cpu); + + if (!(++asid & ASID_MASK)) + tbivp(); + mm->context.asid[cpu] = last_asid(cpu) = asid; + +} + +static inline void +switch_mm_irqs_off(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *next) +{ + /* Check if our ASID is of an older version, and thus invalid. */ + unsigned long asid, ptbr; + long cpu = smp_processor_id(); + + if (!asid_valid(next_mm, cpu)) + __get_new_mm_context(next_mm, cpu); + + /* Update CSR:UPN and CSR:PTBR. Another thread may have allocated + * a new mm->context[asid] (via flush_tlb_mm) without the ASID serial + * number wrapping. We have no way to detect when this is needed. + */ + asid = cpu_asid(cpu, next_mm); + ptbr = virt_to_pfn(next_mm->pgd); + load_mm(asid, ptbr); + cpumask_set_cpu(cpu, mm_cpumask(next_mm)); +} + +#define switch_mm_irqs_off switch_mm_irqs_off + +static inline void +switch_mm(struct mm_struct *prev_mm, struct mm_struct *next_mm, + struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + switch_mm_irqs_off(prev_mm, next_mm, tsk); + local_irq_restore(flags); +} + +#define activate_mm(prev, next) switch_mm(prev, next, current) +#define deactivate_mm(tsk, mm) do { } while (0) + +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ + int i; + + for_each_possible_cpu(i) + mm->context.asid[i] = 0; + return 0; +} + +static inline void destroy_context(struct mm_struct *mm) +{ + /* Nothing to do. */ +} + +static inline void enter_lazy_tlb(struct mm_struct *mm, + struct task_struct *tsk) +{ +} + +static inline int arch_dup_mmap(struct mm_struct *oldmm, + struct mm_struct *mm) +{ + return 0; +} + +static inline void arch_exit_mmap(struct mm_struct *mm) +{ +} + +static inline void arch_unmap(struct mm_struct *mm, unsigned long start, + unsigned long end) +{ +} + +static inline void arch_bprm_mm_init(struct mm_struct *mm, + struct vm_area_struct *vma) +{ +} + +static inline bool arch_vma_access_permitted(struct vm_area_struct *vma, + bool write, bool execute, + bool foreign) +{ + /* by default, allow everything */ + return true; +} +#endif /* _ASM_SW64_MMU_CONTEXT_H */ diff --git a/arch/sw_64/include/asm/mmzone.h b/arch/sw_64/include/asm/mmzone.h new file mode 100644 index 0000000000000000000000000000000000000000..363e2bc98a95b8b4acf2e1bb69ee19392f8d90df --- /dev/null +++ b/arch/sw_64/include/asm/mmzone.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MMZONE_H +#define _ASM_SW64_MMZONE_H + +#include + +/* + * Following are macros that are specific to this numa platform. + */ + +extern pg_data_t *node_data[]; + +#ifdef CONFIG_NUMA +#define NODE_DATA(nid) (node_data[(nid)]) +#endif + +#endif /* _ASM_SW64_MMZONE_H */ diff --git a/arch/sw_64/include/asm/module.h b/arch/sw_64/include/asm/module.h new file mode 100644 index 0000000000000000000000000000000000000000..d1663aab4097ab2cca1d2bb3ae4496245c8d0ea7 --- /dev/null +++ b/arch/sw_64/include/asm/module.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MODULE_H +#define _ASM_SW64_MODULE_H + +#include + +struct mod_arch_specific { + unsigned int gotsecindex; +}; + +#define ARCH_SHF_SMALL SHF_SW64_GPREL + +#ifdef MODULE +asm(".section .got, \"aw\", @progbits; .align 3; .previous"); +#endif + +#endif /* _ASM_SW64_MODULE_H */ diff --git a/arch/sw_64/include/asm/msi.h b/arch/sw_64/include/asm/msi.h new file mode 100644 index 0000000000000000000000000000000000000000..dbf6f81843beb7ff37f9031bd5943dc09ecea698 --- /dev/null +++ b/arch/sw_64/include/asm/msi.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_MSI_H +#define _ASM_SW64_MSI_H + +#include + +#define NR_VECTORS NR_IRQS +#define NR_IRQ_VECTORS NR_IRQS + +#define AUTO_ASSIGN 0 + +#define LAST_DEVICE_VECTOR 31 + +#define MSI_OFFSET 0x44 + +#define NUM_MSI_IRQS 256 + +#define PERCPU_MSI_IRQS 256 + +#define VT_MSIX_MSG_ADDR (0x8000fee00000UL) +#define VT_MSIX_ADDR_DEST_ID_SHIFT 12 +#define VT_MSIX_ADDR_DEST_ID_MASK (0xff << VT_MSIX_ADDR_DEST_ID_SHIFT) +#define VT_MSIX_ADDR_DEST_ID(dest) \ + (((dest) << VT_MSIX_ADDR_DEST_ID_SHIFT) & VT_MSIX_ADDR_DEST_ID_MASK) + + +#ifdef CONFIG_PCI_MSI +extern void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs); +extern int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type); +extern bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector); +extern int msi_compose_msg(unsigned int irq, struct msi_msg *msg); +extern void sw64_irq_noop(struct irq_data *d); +extern struct irq_chip sw64_irq_chip; +extern void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, + unsigned long pci_msi1_addr); + +#define MSI_ADDR_BASE_HI 0 +#define MSI_ADDR_BASE_LO 0x91abc0 + +#define MSI_ADDR_SHIFT 20 +#define MSI_ADDR_DEST_ID_SHIFT 10 + +struct sw64_msi_chip_data { + spinlock_t cdata_lock; + union { + unsigned long msi_config; + unsigned long msiaddr; + }; + unsigned long rc_node; + unsigned long rc_index; + unsigned int msi_config_index; + unsigned int dst_cpu; + unsigned int vector; + unsigned int prev_cpu; + unsigned int prev_vector; + unsigned int multi_msi; + bool move_in_progress; +}; + +static inline int rcid_to_msicid(int rcid) +{ + int msicid = 0; + + msicid |= (rcid_to_domain_id(rcid) << 7); + msicid |= (rcid_to_thread_id(rcid) << 6); + msicid |= (rcid_to_core_id(rcid) << 0); + + return msicid; +} + +extern void arch_init_msi_domain(struct irq_domain *domain); +enum irq_alloc_type { + IRQ_ALLOC_TYPE_MSI, + IRQ_ALLOC_TYPE_MSIX, + IRQ_ALLOC_TYPE_INTX, +}; +struct irq_alloc_info { + struct msi_desc *desc; + enum irq_alloc_type type; + struct pci_dev *msi_dev; + irq_hw_number_t hwirq; +}; +typedef struct irq_alloc_info msi_alloc_info_t; +#else /* !CONFIG_PCI_MSI */ +static inline void handle_pci_msi_interrupt(unsigned long type, + unsigned long vector, unsigned long pci_msi1_addr) +{ + pr_warn("SW arch disable CONFIG_PCI_MSI option.\n"); +} +#endif /* CONFIG_PCI_MSI */ +#endif /* _ASM_SW64_MSI_H */ diff --git a/arch/sw_64/include/asm/numa.h b/arch/sw_64/include/asm/numa.h new file mode 100644 index 0000000000000000000000000000000000000000..a2e3171caff1a5d666ae2e93a05ce851405160d2 --- /dev/null +++ b/arch/sw_64/include/asm/numa.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_NUMA_H +#define _ASM_SW64_NUMA_H + +#include +#include + +#ifdef CONFIG_NUMA +extern nodemask_t numa_nodes_parsed __initdata; +extern int numa_off; + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +#define NR_NODE_MEMBLKS (MAX_NUMNODES*2) +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; +extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); +extern void numa_clear_node(unsigned int cpu); +extern void __init numa_set_distance(int from, int to, int distance); +extern void __init early_map_cpu_to_node(unsigned int cpu, int nid); + +#else /* CONFIG_NUMA */ + +static inline void numa_clear_node(unsigned int cpu) { } +static inline void early_map_cpu_to_node(unsigned int cpu, int nid) { } + +#endif /* CONFIG_NUMA */ + +#endif /* _ASM_SW64_NUMA_H */ diff --git a/arch/sw_64/include/asm/page.h b/arch/sw_64/include/asm/page.h new file mode 100644 index 0000000000000000000000000000000000000000..68b4f2fc1b488c4fe85fc9de47f54dec6d57bb86 --- /dev/null +++ b/arch/sw_64/include/asm/page.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PAGE_H +#define _ASM_SW64_PAGE_H + +#include +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 13 +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +#define HUGE_MAX_HSTATE 2 + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +extern void clear_page(void *page); +#define clear_user_page(page, vaddr, pg) clear_page(page) + +#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE + +extern void copy_page(void *_to, void *_from); +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +typedef struct page *pgtable_t; + +extern unsigned long __phys_addr(unsigned long addr); +#ifdef CONFIG_SUBARCH_C3B +extern unsigned long __boot_phys_addr(unsigned long addr); +#else +#define __boot_phys_addr(x) __phys_addr(x) +#endif + +#endif /* !__ASSEMBLY__ */ + +#define KERNEL_IMAGE_SIZE (512 * 1024 * 1024) + +#include + +#define __START_KERNEL_map PAGE_OFFSET + +#define __pa(x) __phys_addr((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long) (x) | PAGE_OFFSET)) + +#define __boot_pa(x) __boot_phys_addr((unsigned long)(x)) +#define __boot_va(x) __va(x) + +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +#define virt_to_pfn(vaddr) (PHYS_PFN(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(PFN_PHYS(pfn))) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) ((pfn) < max_mapnr) +#endif /* CONFIG_FLATMEM */ + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC +#include +#include +#endif + +#endif /* _ASM_SW64_PAGE_H */ diff --git a/arch/sw_64/include/asm/pci.h b/arch/sw_64/include/asm/pci.h new file mode 100644 index 0000000000000000000000000000000000000000..21bfcef21c5f4fa9c88072cea2e638e08dc4bcb0 --- /dev/null +++ b/arch/sw_64/include/asm/pci.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PCI_H +#define _ASM_SW64_PCI_H + +#ifdef __KERNEL__ + +#include +#include +#include + +/* + * The following structure is used to manage multiple PCI busses. + */ + +struct pci_dev; +struct pci_bus; +struct resource; +struct sunway_iommu; +struct page; + +struct piu_saved { + unsigned long piuconfig0; + unsigned long piuconfig1; + unsigned long epdmabar; + unsigned long msiaddr; + unsigned long msiconfig[256]; + unsigned long iommuexcpt_ctrl; + unsigned long dtbaseaddr; + unsigned long hpintconfig; + unsigned long pmeintconfig; + unsigned long aererrintconfig; + unsigned long intaconfig; + unsigned long intbconfig; + unsigned long intcconfig; + unsigned long intdconfig; +}; + +/* A controller. Used to manage multiple PCI busses. */ +struct pci_controller { + struct pci_controller *next; + struct pci_bus *bus; + struct resource *io_space; + struct resource *mem_space; + struct resource *pre_mem_space; + struct resource *busn_space; + unsigned long sparse_mem_base; + unsigned long dense_mem_base; + unsigned long sparse_io_base; + unsigned long dense_io_base; + + /* This one's for the kernel only. It's in KSEG somewhere. */ + void __iomem *ep_config_space_base; + void __iomem *rc_config_space_base; + + unsigned long index; + unsigned long node; + DECLARE_BITMAP(piu_msiconfig, 256); + int int_irq; + int service_irq; + /* For compatibility with current (as of July 2003) pciutils + * and XFree86. Eventually will be removed. + */ + unsigned int need_domain_info; + bool iommu_enable; + struct sunway_iommu *pci_iommu; + int first_busno; + int last_busno; + int self_busno; + void *sysdata; +}; + +/* Override the logic in pci_scan_bus for skipping already-configured + * bus numbers. + */ + +#define pcibios_assign_all_busses() (pci_has_flag(PCI_REASSIGN_ALL_BUS)) + +#define PCIBIOS_MIN_IO 0 +#define PCIBIOS_MIN_MEM 0 + +extern void __init sw64_init_pci(void); +extern void __init sw64_device_interrupt(unsigned long vector); +extern void __init sw64_init_irq(void); +extern void __init sw64_init_arch(void); +extern struct pci_ops sw64_pci_ops; +extern int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); +extern struct pci_controller *hose_head; +#ifdef CONFIG_PCI_SW64 +extern void __init setup_chip_pci_ops(void); +#else +#define setup_chip_pci_ops() do { } while (0) +#endif + +extern struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus); +extern struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num); + +extern void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge); +extern void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge); +extern int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); + +#ifdef CONFIG_PCI_DOMAINS +static inline int pci_proc_domain(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + return hose->need_domain_info; +} +#endif + +#ifdef CONFIG_NUMA +static inline int __pcibus_to_node(const struct pci_bus *bus) +{ + struct pci_controller *hose; + + hose = pci_bus_to_pci_controller(bus); + if (!node_online(hose->node)) + return next_node_in(hose->node, node_online_map); + else + return hose->node; +} +#define pcibus_to_node(bus) __pcibus_to_node(bus) +#endif + +#endif /* __KERNEL__ */ + +/* Values for the `which' argument to sys_pciconfig_iobase. */ +#define IOBASE_HOSE 0 +#define IOBASE_SPARSE_MEM 1 +#define IOBASE_DENSE_MEM 2 +#define IOBASE_SPARSE_IO 3 +#define IOBASE_DENSE_IO 4 +#define IOBASE_ROOT_BUS 5 +#define IOBASE_FROM_HOSE 0x10000 + +extern int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, + size_t count); +extern int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, + size_t count); +extern int pci_mmap_legacy_page_range(struct pci_bus *bus, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_state); +extern void pci_adjust_legacy_attr(struct pci_bus *bus, + enum pci_mmap_state mmap_type); +#define HAVE_PCI_LEGACY 1 + +extern int pci_create_resource_files(struct pci_dev *dev); +extern void pci_remove_resource_files(struct pci_dev *dev); +extern void __init reserve_mem_for_pci(void); +extern int chip_pcie_configure(struct pci_controller *hose); + +#define PCI_VENDOR_ID_JN 0x5656 +#define PCI_DEVICE_ID_SW64_ROOT_BRIDGE 0x3231 +#define PCI_DEVICE_ID_JN_PCIESW 0x1000 +#define PCI_DEVICE_ID_JN_PCIEUSIP 0x1200 +#define PCI_DEVICE_ID_JN_PCIE2PCI 0x1314 + +#define NR_IRQ_VECTORS NR_IRQS + +#define LAST_DEVICE_VECTOR 31 + +#define PCITODMA_OFFSET 0x0 /*0 offset*/ + +#endif /* _ASM_SW64_PCI_H */ diff --git a/arch/sw_64/include/asm/pci_impl.h b/arch/sw_64/include/asm/pci_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..aa17a69b73f88330ab175d4921b662f306ab810d --- /dev/null +++ b/arch/sw_64/include/asm/pci_impl.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#ifndef _SW64_KERNEL_PCI_IMPL_H +#define _SW64_KERNEL_PCI_IMPL_H + +#include + +struct pci_dev; +struct pci_controller; + +/* The hose list. */ +extern struct pci_controller *hose_head, **hose_tail; + +extern void common_init_pci(void); +extern struct pci_controller *alloc_pci_controller(void); +extern struct resource *alloc_resource(void); + +extern unsigned long size_for_memory(unsigned long max); + +extern const struct dma_map_ops sw64_dma_direct_ops; + +extern struct cma *sw64_kvm_cma; +extern struct gen_pool *sw64_kvm_pool; +#endif /* _SW64_KERNEL_PCI_IMPL_H */ diff --git a/arch/sw_64/include/asm/percpu.h b/arch/sw_64/include/asm/percpu.h new file mode 100644 index 0000000000000000000000000000000000000000..3acdf36bcf5590a7f148db537edbd465ea871839 --- /dev/null +++ b/arch/sw_64/include/asm/percpu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERCPU_H +#define _ASM_SW64_PERCPU_H + +/* + * To calculate addresses of locally defined variables, GCC uses + * 32-bit displacement from the GP. Which doesn't work for per cpu + * variables in modules, as an offset to the kernel per cpu area is + * way above 4G. + * + * Always use weak definitions for percpu variables in modules. + */ +#if defined(MODULE) && defined(CONFIG_SMP) +#define ARCH_NEEDS_WEAK_PER_CPU +#endif + +#include + +#endif /* _ASM_SW64_PERCPU_H */ diff --git a/arch/sw_64/include/asm/perf_event.h b/arch/sw_64/include/asm/perf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..dc55a361babd015aa92fbf7b0387f1e2beeecc40 --- /dev/null +++ b/arch/sw_64/include/asm/perf_event.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PERF_EVENT_H +#define _ASM_SW64_PERF_EVENT_H + +#include +#include + +#ifdef CONFIG_PERF_EVENTS +struct pt_regs; +extern unsigned long perf_instruction_pointer(struct pt_regs *regs); +extern unsigned long perf_misc_flags(struct pt_regs *regs); +#define perf_misc_flags(regs) perf_misc_flags(regs) +#define perf_arch_bpf_user_pt_regs(regs) ®s->user_regs +#endif + +#endif /* _ASM_SW64_PERF_EVENT_H */ diff --git a/arch/sw_64/include/asm/pgalloc.h b/arch/sw_64/include/asm/pgalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..1cc03e3be5b67419b80f00891cd9a27d8f0bcd40 --- /dev/null +++ b/arch/sw_64/include/asm/pgalloc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGALLOC_H +#define _ASM_SW64_PGALLOC_H + +#include +#include +#include /* for pte_{alloc,free}_one */ + +/* + * Allocate and free page tables. The xxx_kernel() versions are + * used to allocate a kernel page table - this turns on ASN bits + * if any. + */ + +static inline void +pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte) +{ + unsigned long pfn = page_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void +p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PFN_SHIFT) | _PAGE_TABLE)); +} + +extern pgd_t *pgd_alloc(struct mm_struct *mm); + +#define check_pgt_cache() do { } while (0) + +#endif /* _ASM_SW64_PGALLOC_H */ diff --git a/arch/sw_64/include/asm/pgtable-4level.h b/arch/sw_64/include/asm/pgtable-4level.h new file mode 100644 index 0000000000000000000000000000000000000000..719e2c5377e349b8765a947a57f273835f2dc52b --- /dev/null +++ b/arch/sw_64/include/asm/pgtable-4level.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_4LEVEL_H +#define _ASM_SW64_PGTABLE_4LEVEL_H + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pud_val(x) ((x).pud) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pmd(x) ((pmd_t) { (x) }) +#define __pud(x) ((pud_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) +#endif /* !__ASSEMBLY__ */ + +#define PAGE_OFFSET 0xfff0000000000000 + +#endif +#endif /* _ASM_SW64_PGTABLE_4LEVEL_H */ diff --git a/arch/sw_64/include/asm/pgtable.h b/arch/sw_64/include/asm/pgtable.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1f825eb74c6e7a01cd80ebf11785a3d2d6a665 --- /dev/null +++ b/arch/sw_64/include/asm/pgtable.h @@ -0,0 +1,789 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PGTABLE_H +#define _ASM_SW64_PGTABLE_H + + +#include + +/* + * This file contains the functions and defines necessary to modify and use + * the sw64 page table tree. + * + * This hopefully works with any standard sw64 page-size, as defined + * in (currently 8192). + */ +#include +#include + +#include +#include +#include /* For TASK_SIZE */ +#include + +struct mm_struct; +struct vm_area_struct; + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmdval) +{ + set_pmd(pmdp, pmdval); +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + *p4dp = p4d; +} +/* PGDIR_SHIFT determines what a forth-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + 3 * (PAGE_SHIFT - 3)) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* PUD_SHIFT determines the size of the area a third-level page table can map */ +#define PUD_SHIFT (PAGE_SHIFT + 2 * (PAGE_SHIFT - 3)) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PMD_SHIFT determines the size of the area a second-level page table can map */ +#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - 3)) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) + +#define CONT_PMD_SHIFT 6 +#define CONT_PMDS (1 << CONT_PMD_SHIFT) +#define CONT_PMD_SIZE (CONT_PMDS * PMD_SIZE) +#define CONT_PMD_MASK (~(CONT_PMD_SIZE - 1)) + +/* + * Entries per page directory level: the sw64 is three-level, with + * all levels having a one-page page table. + */ +#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - 3)) +#define PTRS_PER_PUD (1UL << (PAGE_SHIFT - 3)) + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) +#define FIRST_USER_ADDRESS 0UL + +/* Number of pointers that fit on a page: this will go away. */ +#define PTRS_PER_PAGE (1UL << (PAGE_SHIFT - 3)) + +#define VMALLOC_START (-2 * PGDIR_SIZE) +#ifndef CONFIG_SPARSEMEM_VMEMMAP +#define VMALLOC_END (-PGDIR_SIZE) +#else +#define VMEMMAP_END (-PGDIR_SIZE) +#define vmemmap ((struct page *)VMEMMAP_END - (1UL << (MAX_PHYSMEM_BITS - PAGE_SHIFT))) +#define VMALLOC_END ((unsigned long)vmemmap) +#endif + +/* + * HMcode-imposed page table bits + */ +#if defined(CONFIG_SUBARCH_C3B) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_ASM 0x0010 +#define _PAGE_CONT 0x0020 /* used for 256M page size bit */ +#define _PAGE_LEAF 0x0040 /* used for 8M page size bit */ +#define _PAGE_PROTNONE 0x0080 /* used for numa page balancing */ +#define _PAGE_SPECIAL 0x0100 +#define _PAGE_KRE 0x0400 /* xxx - see below on the "accessed" bit */ +#define _PAGE_URE 0x0800 /* xxx */ +#define _PAGE_KWE 0x4000 /* used to do the dirty bit in software */ +#define _PAGE_UWE 0x8000 /* used to do the dirty bit in software */ + +/* .. and these are ours ... */ +#define _PAGE_DIRTY 0x20000 +#define _PAGE_ACCESSED 0x40000 + +#define _PAGE_SPLITTING 0x200000 /* For Transparent Huge Page */ +#define _PAGE_DEVMAP 0x400000 /* For ZONE DEVICE page */ + +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 18 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 21 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 22 /* bit of _PAGE_DEVMAP */ +/* + * NOTE! The "accessed" bit isn't necessarily exact: it can be kept exactly + * by software (use the KRE/URE/KWE/UWE bits appropriately), but I'll fake it. + * Under Linux/sw64, the "accessed" bit just means "read", and I'll just use + * the KRE/URE bits to watch for it. That way we don't need to overload the + * KWE/UWE bits with both handling dirty and accessed. + * + * Note that the kernel uses the accessed bit just to check whether to page + * out a page or not, so it doesn't have to be exact anyway. + */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS (_PAGE_DIRTY | _PAGE_KWE | _PAGE_UWE) +#define __ACCESS_BITS (_PAGE_ACCESSED | _PAGE_KRE | _PAGE_URE) + +#define _PFN_SHIFT 28 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) + +#define page_valid_kern(x) (0) + +#elif defined(CONFIG_SUBARCH_C4) + +#define _PAGE_VALID 0x0001 +#define _PAGE_PRESENT _PAGE_VALID +#define _PAGE_FOR 0x0002 /* used for page protection (fault on read) */ +#define _PAGE_FOW 0x0004 /* used for page protection (fault on write) */ +#define _PAGE_FOE 0x0008 /* used for page protection (fault on exec) */ +#define _PAGE_FIXED 0x0010 +#define _PAGE_CONT 0x0020 /* used for 512M page size bit*/ +#define _PAGE_LEAF 0x0040 /* used for huge page bit */ +#define _PAGE_PCD 0x0080 /* used for page cache disabled */ + +/* and these are sw definition */ +#define _PAGE_WCD 0x0100 +#define _PAGE_ACCESSED 0x0200 +#define _PAGE_SPLITTING 0x0400 /* For Transparent Huge Page */ +#define _PAGE_SPECIAL 0x0800 +#define _PAGE_DEVMAP 0x1000 /* For ZONE DEVICE page */ +#define _PAGE_KERN 0x2000 +#define _PAGE_DIRTY _BITUL(62) +#define _PAGE_PROTNONE _BITUL(63) +#define _PAGE_BIT_FOW 2 /* bit of _PAGE_FOW */ +#define _PAGE_BIT_ACCESSED 9 /* bit of _PAGE_ACCESSED */ +#define _PAGE_BIT_SPLITTING 10 /* bit of _PAGE_SPLITTING */ +#define _PAGE_BIT_DEVMAP 12 /* bit of _PAGE_DEVMAP */ + +/* Used for swap PTEs only. */ +#define _PAGE_SWP_EXCLUSIVE _BITUL(5) + +#define __DIRTY_BITS _PAGE_DIRTY +#define __ACCESS_BITS _PAGE_ACCESSED + +#define _PFN_SHIFT 24 + +/* + * All the normal masks have the "page accessed" bits on, as any time they are used, + * the page is accessed. They are cleared only by the page-out routines + */ +#define PAGE_NONE __pgprot(__ACCESS_BITS | _PAGE_FOR | _PAGE_FOW | _PAGE_FOE | _PAGE_LEAF | _PAGE_PROTNONE) +#define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF) +#define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_LEAF) +#define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_KERN | _PAGE_LEAF) +#define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_LEAF | (x)) + +#define page_valid_kern(x) ((x & (_PAGE_VALID | _PAGE_KERN)) == (_PAGE_VALID | _PAGE_KERN)) +#endif + +#define PFN_PTE_SHIFT _PFN_SHIFT + +#define _PFN_BITS (MAX_PHYSMEM_BITS - PAGE_SHIFT) +#define _PFN_MASK (GENMASK(_PFN_BITS - 1, 0) << _PFN_SHIFT) + +#define _PAGE_TABLE (_PAGE_VALID | __DIRTY_BITS | __ACCESS_BITS) +#define _PAGE_CHG_MASK (_PFN_MASK | __DIRTY_BITS | __ACCESS_BITS | _PAGE_SPECIAL | _PAGE_LEAF | _PAGE_CONT) + +#define _PAGE_P(x) _PAGE_NORMAL((x) | _PAGE_FOW) +#define _PAGE_S(x) _PAGE_NORMAL(x) + +/* + * pgprot_noncached() is only for infiniband pci support, and a real + * implementation for RAM would be more complicated. + */ +#define pgprot_noncached(prot) (prot) + +/* + * ZERO_PAGE is a global shared page that is always zero: used + * for zero-mapped memory areas etc.. + */ +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; + + if (page_valid_kern(pte_val(pteval))) { + mb(); + if ((pte_val(pteval) & _PAGE_FOE) == 0) + imemb(); + } +} + +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + pte_t pte; + + pte_val(pte) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pte; +} + +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) +{ + pmd_t pmd; + + pmd_val(pmd) = (pfn << _PFN_SHIFT) | pgprot_val(prot); + return pmd; +} +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t pgprot) +{ + pud_t pud; + + pud_val(pud) = (pfn << _PFN_SHIFT) | pgprot_val(pgprot); + return pud; +} + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pte; +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot); + return pmd; +} + +/* + * Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + */ +#define page_to_pa(page) (page_to_pfn(page) << PAGE_SHIFT) + +#define p4d_pfn(p4d) ((p4d_val(p4d) & _PFN_MASK) >> _PFN_SHIFT) +#define pud_pfn(pud) ((pud_val(pud) & _PFN_MASK) >> _PFN_SHIFT) +#define pmd_pfn(pmd) ((pmd_val(pmd) & _PFN_MASK) >> _PFN_SHIFT) +#define pte_pfn(pte) ((pte_val(pte) & _PFN_MASK) >> _PFN_SHIFT) + +#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) +#define pud_page(pud) pfn_to_page(pud_pfn(pud)) +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +#define pte_page(pte) pfn_to_page(pte_pfn(pte)) + +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pfn_to_virt(pud_pfn(pud)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long p4d_page_vaddr(p4d_t p4d) +{ + return (unsigned long)pfn_to_virt(p4d_pfn(p4d)); +} + +static inline unsigned long pud_page_vaddr(pud_t pud) +{ + return (unsigned long)pfn_to_virt(pud_pfn(pud)); +} + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)pfn_to_virt(pmd_pfn(pmd)); +} + +static inline int pte_none(pte_t pte) +{ + return !pte_val(pte); +} + +static inline int pte_valid(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_VALID); +} + +static inline int pte_present(pte_t pte) +{ + return !!(pte_val(pte) & (_PAGE_VALID | _PAGE_PROTNONE)); +} + +static inline int pte_huge(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_LEAF); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_val(*ptep) = 0; +} + +#define pte_accessible(mm, pte) \ + (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte)) + +static inline int pmd_none(pmd_t pmd) +{ + return !pmd_val(pmd); +} + +static inline int pmd_bad(pmd_t pmd) +{ + return (pmd_val(pmd) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because + * split_huge_page will temporarily clear the valid bit (but + * the _PAGE_LEAF flag will remain set at all times while the + * _PAGE_VALID bit is clear). + */ + return !!(pmd_val(pmd) & (_PAGE_VALID | _PAGE_PROTNONE | _PAGE_LEAF)); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + pmd_val(*pmdp) = 0; +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DIRTY); +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_ACCESSED); +} + +#define __HAVE_ARCH_PMD_WRITE +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return !(pmd_val(pmd) & _PAGE_FOW); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + pmd_val(pmd) &= ~(_PAGE_VALID | _PAGE_PROTNONE); + return pmd; +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__DIRTY_BITS); + pmd_val(pmd) |= _PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + pmd_val(pmd) &= ~(__ACCESS_BITS); + return pmd; +} + +static inline pmd_t pmd_mkwrite_novma(pmd_t pmd) +{ + pmd_val(pmd) &= ~_PAGE_FOW; + return pmd; +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + pmd_val(pmd) |= __DIRTY_BITS; + return pmd; +} + +static inline pmd_t pmd_mkdevmap(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_DEVMAP; + return pmd; +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + pmd_val(pmd) |= __ACCESS_BITS; + return pmd; +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_LEAF; + return pmd; +} + +static inline pmd_t pmd_mkcont(pmd_t pmd) +{ + pmd_val(pmd) |= _PAGE_CONT; + return pmd; +} + +static inline int pud_none(pud_t pud) +{ + return !pud_val(pud); +} + +static inline int pud_bad(pud_t pud) +{ + return (pud_val(pud) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int pud_present(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_VALID); +} + +static inline void pud_clear(pud_t *pudp) +{ + pud_val(*pudp) = 0; +} + +static inline pud_t pud_mkhuge(pud_t pud) +{ + pud_val(pud) |= _PAGE_LEAF; + return pud; +} + +static inline int p4d_none(p4d_t p4d) +{ + return !p4d_val(p4d); +} + +static inline int p4d_bad(p4d_t p4d) +{ + return (p4d_val(p4d) & ~_PFN_MASK) != _PAGE_TABLE; +} + +static inline int p4d_present(p4d_t p4d) +{ + return !!(p4d_val(p4d) & _PAGE_VALID); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + p4d_val(*p4dp) = 0; +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +/* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_FOW); +} + +static inline int pte_dirty(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_DIRTY); +} + +static inline int pte_young(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_ACCESSED); +} + +static inline int pte_special(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SPECIAL); +} + +static inline int pte_cont(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_CONT); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + pte_val(pte) &= ~(__DIRTY_BITS); + pte_val(pte) |= _PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkold(pte_t pte) +{ + pte_val(pte) &= ~(__ACCESS_BITS); + return pte; +} + +static inline pte_t pte_mkwrite_novma(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_FOW; + return pte; +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + pte_val(pte) |= __DIRTY_BITS; + return pte; +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + pte_val(pte) |= __ACCESS_BITS; + return pte; +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + pte_val(pte) |= _PAGE_LEAF; + return pte; +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +static inline pte_t pte_mkdevmap(pte_t pte) +{ + pte_val(pte) |= _PAGE_SPECIAL; + return pte; +} + +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PROTNONE | _PAGE_VALID)) + == _PAGE_PROTNONE; +} +#endif + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pte_devmap(pte_t a) +{ + return (pte_val(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + +/* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/ +#define pmdp_establish generic_pmdp_establish + +static inline int pmd_trans_splitting(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_SPLITTING); +} + +static inline int pmd_trans_cont(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_CONT); +} + +static inline int pmd_trans_huge(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_LEAF); +} + +static inline int has_transparent_hugepage(void) +{ + return 1; +} + +#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP +static inline int pmd_devmap(pmd_t pmd) +{ + return !!(pmd_val(pmd) & _PAGE_DEVMAP); +} + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +static inline int pud_devmap(pud_t pud) +{ + return !!(pud_val(pud) & _PAGE_DEVMAP); +} +#else +static inline int pud_devmap(pud_t pud) +{ + return 0; +} +#endif + +static inline int pgd_devmap(pgd_t pgd) +{ + return 0; +} +#endif +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +#define __HAVE_ARCH_PMDP_GET_AND_CLEAR +static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + unsigned long pmd_val = xchg(&pmdp->pmd, 0); + pmd_t pmd = (pmd_t){pmd_val}; + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pmd_t *pmdp) +{ + set_bit(_PAGE_BIT_FOW, (unsigned long *)pmdp); +} + +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), (prot)) + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +extern int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty); + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH +extern int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp); + +#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH +extern void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp); + +extern pgd_t swapper_pg_dir[1024]; + +/* + * The sw64 doesn't have any external MMU info: the kernel page + * tables contain all the necessary information. + */ +#define update_mmu_cache(vma, address, ptep) do { } while (0) +#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) + +static inline void update_mmu_cache_range(struct vm_fault *vmf, + struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, unsigned int nr) +{ +} + +#if defined(CONFIG_SUBARCH_C3B) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bit 7: _PAGE_PROTNONE (must be zero) + * bits 8-15: swap type + * bits 16-63: swap offset + */ +#define __SWP_TYPE_SHIFT 8 +#define __SWP_TYPE_BITS 8 + +#elif defined(CONFIG_SUBARCH_C4) + +/* + * Encode and decode a swap entry: + * + * Format of swap PTE: + * bit 0: _PAGE_VALID (must be zero) + * bit 6: _PAGE_LEAF (must be zero) + * bits 7-11: swap type + * bits 12-58: swap offset + * bit 63: _PAGE_PROTNONE (must be zero) + */ +#define __SWP_TYPE_SHIFT 7 +#define __SWP_TYPE_BITS 5 + +#endif + +#define __SWP_OFFSET_BITS 47 +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) +#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK) +#define __swp_entry(type, offset) \ + ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +static inline int pte_swp_exclusive(pte_t pte) +{ + return !!(pte_val(pte) & _PAGE_SWP_EXCLUSIVE); +} + +static inline pte_t pte_swp_mkexclusive(pte_t pte) +{ + pte_val(pte) |= _PAGE_SWP_EXCLUSIVE; + return pte; +} + +static inline pte_t pte_swp_clear_exclusive(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE; + return pte; +} + +#define kern_addr_valid(addr) (1) + +#define pte_ERROR(e) \ + pr_err("%s: %d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s: %d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pud_ERROR(e) \ + pr_err("%s: %d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s: %d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e)) +extern void paging_init(void); + +/* We have our own get_unmapped_area to cope with ADDR_LIMIT_32BIT. */ +#define HAVE_ARCH_UNMAPPED_AREA + +#endif /* _ASM_SW64_PGTABLE_H */ diff --git a/arch/sw_64/include/asm/platform.h b/arch/sw_64/include/asm/platform.h new file mode 100644 index 0000000000000000000000000000000000000000..ad54cdc772e13e8a5f9d8430406d11064dfb7ccb --- /dev/null +++ b/arch/sw_64/include/asm/platform.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PLATFORM_H +#define _ASM_SW64_PLATFORM_H + +#include +#if defined(CONFIG_UNCORE_XUELANG) +#include +#elif defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#ifdef CONFIG_EFI +#define BIOS_VERSION_GUID EFI_GUID(0xc47a23c3, 0xcebb, 0x4cc9, 0xa5, 0xe2, 0xde, 0xd0, 0x8f, 0xe4, 0x20, 0xb5) + +#define BIOS_SUPPORT_RESET_CLALLBACK(bios_version) ((bios_version) != NULL) + +extern unsigned long bios_version; + +#endif + +extern struct boot_params *sunway_boot_params; + +extern void sw64_halt(void); +extern void sw64_poweroff(void); +extern void sw64_restart(void); +extern void (*pm_restart)(void); +extern void (*pm_halt)(void); +extern int i2c_set_adapter(void); +extern void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data); +extern void fix_jm585_reset(void); + +#endif /* _ASM_SW64_PLATFORM_H */ diff --git a/arch/sw_64/include/asm/pmc.h b/arch/sw_64/include/asm/pmc.h new file mode 100644 index 0000000000000000000000000000000000000000..d5672dd940a791c62e0edbe1e5e2356183cdd131 --- /dev/null +++ b/arch/sw_64/include/asm/pmc.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Definitions for use with the sw64 PMC interface. + */ + +#ifndef _ASM_SW64_PMC_H +#define _ASM_SW64_PMC_H + +#define PMC_PC0 0 +#define PMC_PC1 1 + +/* Following commands are implemented on all CPUs */ +#define PMC_CMD_DISABLE 0 +#define PMC_CMD_ENABLE 1 +#define PMC_CMD_EVENT_BASE 2 +#define PMC_CMD_PM 4 +#define PMC_CMD_READ 5 +#define PMC_CMD_READ_CLEAR 6 +#define PMC_CMD_WRITE_BASE 7 + +#define PMC_DISABLE_BASE 1 + +#define PMC_ENABLE_BASE 1 + +#define PC0_RAW_BASE 0x0 +#define PC1_RAW_BASE 0x100 +#define PC0_MAX 0xF +#define PC1_MAX 0x3D + +#define SW64_PERFCTRL_KM 2 +#define SW64_PERFCTRL_UM 3 +#define SW64_PERFCTRL_AM 4 + +/* pc0 events */ +#define PC0_INSTRUCTIONS 0x0 +#define PC0_BRANCH_INSTRUCTIONS 0x3 +#define PC0_CPU_CYCLES 0x8 +#define PC0_ITB_READ 0x9 +#define PC0_DTB_READ 0xA +#define PC0_ICACHE_READ 0xB +#define PC0_DCACHE_READ 0xC +#define PC0_SCACHE_REFERENCES 0xD + +/* pc1 events */ +#define PC1_BRANCH_MISSES 0xB +#define PC1_SCACHE_MISSES 0x10 +#define PC1_ICACHE_READ_MISSES 0x16 +#define PC1_ITB_MISSES 0x17 +#define PC1_DTB_SINGLE_MISSES 0x30 +#define PC1_DCACHE_MISSES 0x32 + +#define MAX_HWEVENTS 2 +#define PMC_COUNT_MASK ((1UL << 58) - 1) + +#endif /* _ASM_SW64_PMC_H */ diff --git a/arch/sw_64/include/asm/processor.h b/arch/sw_64/include/asm/processor.h new file mode 100644 index 0000000000000000000000000000000000000000..ec68fe6cc6f220e6db5912b4916c8884a05b5502 --- /dev/null +++ b/arch/sw_64/include/asm/processor.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * include/asm-sw64/processor.h + * + * Copyright (C) 1994 Linus Torvalds + */ + +#ifndef _ASM_SW64_PROCESSOR_H +#define _ASM_SW64_PROCESSOR_H + +#include /* for ADDR_LIMIT_32BIT */ +#include + +#define task_pt_regs(task) \ + ((struct pt_regs *) (task->stack + THREAD_SIZE) - 1) + +/* + * Returns current instruction pointer ("program counter"). + */ +#define current_text_addr() \ + ({ void *__pc; __asm__ ("br %0, .+4" : "=r"(__pc)); __pc; }) + +/* + * SW64 does have an arch_pick_mmap_layout() + */ +#define HAVE_ARCH_PICK_MMAP_LAYOUT 1 + +/* + * We have a 52-bit user address space: 4PB user VM... + */ +#define TASK_SIZE (0x10000000000000UL) +#define UNMAPPED_BASE (TASK_SIZE >> 6) +#define STACK_TOP \ + (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL) + +#define STACK_TOP_MAX 0x00120000000UL + +/* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE \ + ((current->personality & ADDR_LIMIT_32BIT) ? 0x40000000 : UNMAPPED_BASE) + +struct thread_struct { + struct user_fpsimd_state fpstate; + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; + unsigned long s[7]; /* s0 ~ s6 */ +}; +#define INIT_THREAD { } + +struct task_struct; +struct pt_regs; + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp); + +/* Free all resources held by a thread. */ +extern void release_thread(struct task_struct *dead_task); + +unsigned long __get_wchan(struct task_struct *p); + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc) + +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[30]) + +#define cpu_relax() barrier() + +#define ARCH_HAS_PREFETCH +#define ARCH_HAS_PREFETCHW +#define ARCH_HAS_SPINLOCK_PREFETCH + +#ifndef CONFIG_SMP +/* Nothing to prefetch. */ +#define spin_lock_prefetch(lock) do { } while (0) +#endif + +static inline void prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 0, 3); +} + +static inline void prefetchw(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} + +#ifdef CONFIG_SMP +static inline void spin_lock_prefetch(const void *ptr) +{ + __builtin_prefetch(ptr, 1, 3); +} +#endif + +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("halt"); +} +#endif /* _ASM_SW64_PROCESSOR_H */ diff --git a/arch/sw_64/include/asm/ptrace.h b/arch/sw_64/include/asm/ptrace.h new file mode 100644 index 0000000000000000000000000000000000000000..964f4fc730f2c62be11a3d8fa1feb4d40e9f79d6 --- /dev/null +++ b/arch/sw_64/include/asm/ptrace.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_PTRACE_H +#define _ASM_SW64_PTRACE_H + +#include +#include +#include + +#define NO_SYSCALL _AC(-1, UL) + +#ifdef __KERNEL__ +#ifndef __ASSEMBLY__ + +/* + * This struct defines the way the registers are stored on the + * kernel stack during a system call or other kernel entry + */ + +struct pt_regs { + union { + struct user_pt_regs user_regs; + struct { + unsigned long regs[31]; + unsigned long pc; + unsigned long ps; + }; + }; + unsigned long orig_r0; + unsigned long orig_r19; + /* These are saved by HMcode: */ + unsigned long hm_ps; + unsigned long hm_pc; + unsigned long hm_gp; + unsigned long hm_r16; + unsigned long hm_r17; + unsigned long hm_r18; +}; + +#define arch_has_single_step() (1) +#define user_mode(regs) (((regs)->ps & 8) != 0) +#define instruction_pointer(regs) ((regs)->pc) +#define profile_pc(regs) instruction_pointer(regs) +#define user_stack_pointer(pt_regs) ((pt_regs)->regs[30]) +#define kernel_stack_pointer(regs) ((unsigned long)((regs) + 1)) +#define instruction_pointer_set(regs, val) ((regs)->pc = val) + +#define force_successful_syscall_return() (current_pt_regs()->orig_r0 = NO_SYSCALL) + +#define MAX_REG_OFFSET (offsetof(struct pt_regs, orig_r0)) + +extern short regoffsets[]; + +extern unsigned long syscall_trace_enter(void); +extern void syscall_trace_leave(void); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +static inline int is_syscall_success(struct pt_regs *regs) +{ + return !regs->regs[19]; +} + +static inline long regs_return_value(struct pt_regs *regs) +{ + if ((regs->orig_r0 == NO_SYSCALL) || is_syscall_success(regs)) + return regs->regs[0]; + else + return -regs->regs[0]; +} + +#endif /* !__ASSEMBLY__ */ +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/asm/setup.h b/arch/sw_64/include/asm/setup.h new file mode 100644 index 0000000000000000000000000000000000000000..2d557b3495553fc66a9c33d9f0b191983a2db279 --- /dev/null +++ b/arch/sw_64/include/asm/setup.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SETUP_H +#define _ASM_SW64_SETUP_H + +#include + +/* + * We leave one page for the initial stack page, and one page for + * the initial process structure. Also, the console eats 3 MB for + * the initial bootloader (one of which we can reclaim later). + */ +#define BOOT_PCB 0x20000000 +#define BOOT_ADDR 0x20000000 +/* Remove when official MILO sources have ELF support: */ +#define BOOT_SIZE (16 * 1024) + +#define KERNEL_START_PHYS CONFIG_PHYSICAL_START +#define KERNEL_START (__START_KERNEL_map + CONFIG_PHYSICAL_START) + +/* INIT_STACK may be used for merging lwk to kernel*/ +#define INIT_STACK (KERNEL_START + 0x02000) + +/* + * This is setup by the secondary bootstrap loader. Because + * the zero page is zeroed out as soon as the vm system is + * initialized, we need to copy things out into a more permanent + * place. + */ +#define PARAM (KERNEL_START + 0x0A000) +#define COMMAND_LINE ((char *)(KERNEL_START + 0x0B000)) +#define INITRD_START (*(unsigned long *)(PARAM + 0x100)) +#define INITRD_SIZE (*(unsigned long *)(PARAM + 0x108)) +#define DTB_START (*(unsigned long *)(PARAM + 0x118)) + +#define _TEXT_START (KERNEL_START + 0x10000) + +#define COMMAND_LINE_OFF (0x10000UL - 0xB000UL) +#define INITRD_START_OFF (0x10000UL - 0xA100UL) +#define INITRD_SIZE_OFF (0x10000UL - 0xA108UL) + +/* Motherboard Configuration Tables */ +#define MB_CONFIG_START 0x908000 +#define MB_MCLK (MB_CONFIG_START + 0x1) +#define MB_EXTCLK (MB_CONFIG_START + 0x11) + +#ifndef __ASSEMBLY__ +#include +extern struct boot_params *sunway_boot_params; +#endif + +#endif /* _ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/asm/sfp-machine.h b/arch/sw_64/include/asm/sfp-machine.h new file mode 100644 index 0000000000000000000000000000000000000000..156bebc9c515ea038efd05713ace96d618b623e0 --- /dev/null +++ b/arch/sw_64/include/asm/sfp-machine.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Machine-dependent software floating-point definitions. + * sw64 kernel version. + * Copyright (C) 1997,1998,1999 Free Software Foundation, Inc. + * This file is part of the GNU C Library. + * Contributed by Richard Henderson (rth@cygnus.com), + * Jakub Jelinek (jakub@redhat.com) and + * David S. Miller (davem@redhat.com). + */ + +#ifndef _ASM_SW64_SFP_MACHINE_H +#define _ASM_SW64_SFP_MACHINE_H + +#define _FP_W_TYPE_SIZE 64 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R, X, Y) \ + _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S, R, X, Y) +#define _FP_MUL_MEAT_D(R, X, Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D, R, X, Y, umul_ppmm) +#define _FP_MUL_MEAT_Q(R, X, Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_Q, R, X, Y, umul_ppmm) + +#define _FP_DIV_MEAT_S(R, X, Y) _FP_DIV_MEAT_1_imm(S, R, X, Y, _FP_DIV_HELP_imm) +#define _FP_DIV_MEAT_D(R, X, Y) _FP_DIV_MEAT_1_udiv(D, R, X, Y) +#define _FP_DIV_MEAT_Q(R, X, Y) _FP_DIV_MEAT_2_udiv(Q, R, X, Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D +#define _FP_NANFRAC_Q _FP_QNANBIT_Q +#define _FP_NANSIGN_S 1 +#define _FP_NANSIGN_D 1 +#define _FP_NANSIGN_Q 1 + +#define _FP_KEEPNANFRACP 1 + +/* Sw_64 Architecture Handbook, 4.7.10.4 sais that + * we should prefer any type of NaN in Fb, then Fa. + */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ +do { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R, X); \ + R##_c = FP_CLS_NAN; \ +} while (0) + +/* Obtain the current rounding mode. */ +#define FP_ROUNDMODE mode +#define FP_RND_NEAREST (FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT) +#define FP_RND_ZERO (FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT) +#define FP_RND_PINF (FPCR_DYN_PLUS >> FPCR_DYN_SHIFT) +#define FP_RND_MINF (FPCR_DYN_MINUS >> FPCR_DYN_SHIFT) + +/* Exception flags. */ +#define FP_EX_INVALID IEEE_TRAP_ENABLE_INV +#define FP_EX_OVERFLOW IEEE_TRAP_ENABLE_OVF +#define FP_EX_UNDERFLOW IEEE_TRAP_ENABLE_UNF +#define FP_EX_DIVZERO IEEE_TRAP_ENABLE_DZE +#define FP_EX_INEXACT IEEE_TRAP_ENABLE_INE +#define FP_EX_DENORM IEEE_TRAP_ENABLE_DNO + +#define FP_DENORM_ZERO (swcr & IEEE_MAP_DMZ) + +/* We write the results always */ +#define FP_INHIBIT_RESULTS 0 + +#endif /* _ASM_SW64_SFP_MACHINE_H */ diff --git a/arch/sw_64/include/asm/signal.h b/arch/sw_64/include/asm/signal.h new file mode 100644 index 0000000000000000000000000000000000000000..4dc3b6510b8641b7721693d15ab16aba40e47aac --- /dev/null +++ b/arch/sw_64/include/asm/signal.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SIGNAL_H +#define _ASM_SW64_SIGNAL_H + +#include + +/* Digital Unix defines 64 signals. Most things should be clean enough + * to redefine this at will, if care is taken to make libc match. + */ + +#define _NSIG 64 +#define _NSIG_BPW 64 +#define _NSIG_WORDS (_NSIG / _NSIG_BPW) + +typedef unsigned long old_sigset_t; /* at least 32 bits */ + +typedef struct { + unsigned long sig[_NSIG_WORDS]; +} sigset_t; + +struct odd_sigaction { + __sighandler_t sa_handler; + old_sigset_t sa_mask; + int sa_flags; +}; + +#include +#endif /* _ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/asm/smp.h b/arch/sw_64/include/asm/smp.h new file mode 100644 index 0000000000000000000000000000000000000000..3a2fcf62b30cabe5512a85946a31852d339e62b4 --- /dev/null +++ b/arch/sw_64/include/asm/smp.h @@ -0,0 +1,95 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SMP_H +#define _ASM_SW64_SMP_H + +#include +#include +#include +#include +#include + +#include +#include +#include + +/* HACK: Cabrio WHAMI return value is bogus if more than 8 bits used.. :-( */ + +extern cpumask_t core_start; + +static inline unsigned long +read_vpcr(void) +{ + register unsigned long __r0 __asm__("$0"); + __asm__ __volatile__( + "sys_call %1 #rvpcr" + : "=r"(__r0) + : "i" (0x39) + : "$1", "$22", "$23", "$24", "$25"); + return __r0; +} + +#ifdef CONFIG_SMP +/* SMP initialization hook for setup_arch */ +void __init setup_smp(void); + +#include + +/* smp reset control block */ +struct smp_rcb_struct { + void (*restart_entry)(unsigned long args); + unsigned long restart_args; + unsigned long ready; + unsigned long init_done; +}; + +#define INIT_SMP_RCB ((struct smp_rcb_struct *) __va(0x820000UL)) + + +#ifdef GENERATING_ASM_OFFSETS +#define raw_smp_processor_id() (0) +#else +#include +#define raw_smp_processor_id() (*((unsigned int *)((void *)current + TASK_CPU))) +#endif +#define hard_smp_processor_id() cpu_to_rcid(raw_smp_processor_id()) + +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[cpu] +#define cpu_physical_id(cpu) __cpu_to_rcid[cpu] + +extern unsigned long tidle_pcb[NR_CPUS]; +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +#else /* CONFIG_SMP */ +#define hard_smp_processor_id() 0 +#define smp_call_function_on_cpu(func, info, wait, cpu) ({ 0; }) +/* The map from sequential logical cpu number to hard cid. */ +extern int __cpu_to_rcid[NR_CPUS]; +#define cpu_to_rcid(cpu) __cpu_to_rcid[0] +#define cpu_physical_id(cpu) __cpu_to_rcid[0] +#endif /* CONFIG_SMP */ + +#define NO_PROC_ID (-1) + +static inline void send_ipi(int cpu, unsigned long type) +{ + int rcid; + + rcid = cpu_to_rcid(cpu); + + if (is_in_guest()) + hcall(HCALL_IVI, rcid, type, 0); + else + sendii(rcid, type, 0); +} + +#define reset_cpu(cpu) send_ipi((cpu), II_RESET) + +#endif /* _ASM_SW64_SMP_H */ diff --git a/arch/sw_64/include/asm/socket.h b/arch/sw_64/include/asm/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..e8704346777552f53e1c8f1bcf67223d40a9e317 --- /dev/null +++ b/arch/sw_64/include/asm/socket.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SOCKET_H +#define _ASM_SW64_SOCKET_H + +#include + +/* O_NONBLOCK clashes with the bits used for socket types. Therefore we + * have to define SOCK_NONBLOCK to a different value here. + */ +#define SOCK_NONBLOCK 0x40000000 +#endif /* _ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/asm/sparsemem.h b/arch/sw_64/include/asm/sparsemem.h new file mode 100644 index 0000000000000000000000000000000000000000..a60e757f3838791dfb1b35245a17780d8ecf4b83 --- /dev/null +++ b/arch/sw_64/include/asm/sparsemem.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPARSEMEM_H +#define _ASM_SW64_SPARSEMEM_H + +#include + +#define SECTION_SIZE_BITS 28 + +#endif /* _ASM_SW64_SPARSEMEM_H */ diff --git a/arch/sw_64/include/asm/spinlock.h b/arch/sw_64/include/asm/spinlock.h new file mode 100644 index 0000000000000000000000000000000000000000..64358f32cd9a80b587a023dae6d5eecb1cf270e6 --- /dev/null +++ b/arch/sw_64/include/asm/spinlock.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_SPINLOCK_H +#define _ASM_SW64_SPINLOCK_H + +#include +#include + +/* See include/linux/spinlock.h */ +#define smp_mb__after_spinlock() smp_mb() + +#endif /* _ASM_SW64_SPINLOCK_H */ diff --git a/arch/sw_64/include/asm/spinlock_types.h b/arch/sw_64/include/asm/spinlock_types.h new file mode 100644 index 0000000000000000000000000000000000000000..62e554e4f48c35b2d4578072231b58c75b202a4b --- /dev/null +++ b/arch/sw_64/include/asm/spinlock_types.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SPINLOCK_TYPES_H +#define _ASM_SW64_SPINLOCK_TYPES_H + +#include +#include + +#endif /* _ASM_SW64_SPINLOCK_TYPES_H */ diff --git a/arch/sw_64/include/asm/stacktrace.h b/arch/sw_64/include/asm/stacktrace.h new file mode 100644 index 0000000000000000000000000000000000000000..958c9892fd6d0943bf78484c7e870323694fbde8 --- /dev/null +++ b/arch/sw_64/include/asm/stacktrace.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SW64_STACKTRACE_H +#define _ASM_SW64_STACKTRACE_H + +#include +#include +#include +#include +#include + +struct stackframe { + unsigned long pc; + unsigned long fp; +}; + +enum stack_type { + STACK_TYPE_UNKNOWN, + STACK_TYPE_TASK, +}; + +struct stack_info { + unsigned long low; + unsigned long high; + enum stack_type type; +}; + +/* The form of the top of the frame on the stack */ +struct stack_frame { + unsigned long return_address; + struct stack_frame *next_frame; +}; + +extern int unwind_frame(struct task_struct *tsk, struct stackframe *frame); +extern void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data); + +static inline bool on_task_stack(struct task_struct *tsk, unsigned long sp, + struct stack_info *info) +{ + unsigned long low = (unsigned long)task_stack_page(tsk); + unsigned long high = low + THREAD_SIZE; + + if (sp < low || sp >= high) + return false; + + if (info) { + info->low = low; + info->high = high; + info->type = STACK_TYPE_TASK; + } + + return true; +} + +/* + * We can only safely access per-cpu stacks from current in a non-preemptible + * context. + */ +static inline bool on_accessible_stack(struct task_struct *tsk, + unsigned long sp, + struct stack_info *info) +{ + if (on_task_stack(tsk, sp, info)) + return true; + if (tsk != current || preemptible()) + return false; + + return false; +} + +#endif /* _ASM_SW64_STACKTRACE_H */ diff --git a/arch/sw_64/include/asm/string.h b/arch/sw_64/include/asm/string.h new file mode 100644 index 0000000000000000000000000000000000000000..87d93f4cd4d5eabb964cca67d64afa5a47b05a39 --- /dev/null +++ b/arch/sw_64/include/asm/string.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_STRING_H +#define _ASM_SW64_STRING_H + +#ifdef __KERNEL__ + +/* + * GCC of any recent vintage doesn't do stupid things with bcopy. + * EGCS 1.1 knows all about expanding memcpy inline, others don't. + * + * Similarly for a memset with data = 0. + */ + +#define __HAVE_ARCH_MEMCPY +extern void *memcpy(void *dest, const void *src, size_t n); +/* For backward compatibility with modules. Unused otherwise. */ +extern void *__memcpy(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMMOVE +extern void *memmove(void *dest, const void *src, size_t n); + +#define __HAVE_ARCH_MEMSET +extern void *__constant_c_memset(void *s, unsigned long c, size_t n); +extern void *___memset(void *s, int c, size_t n); +extern void *__memset(void *s, int c, size_t n); +extern void *memset(void *s, int c, size_t n); + +#define __HAVE_ARCH_STRCPY +extern char *strcpy(char *dest, const char *src); + +#define __HAVE_ARCH_STRNCPY +extern char *strncpy(char *dest, const char *src, size_t n); + +/* The following routine is like memset except that it writes 16-bit + * aligned values. The DEST and COUNT parameters must be even for + * correct operation. + */ + +#define __HAVE_ARCH_MEMSETW +extern void *__memsetw(void *dest, unsigned short c, size_t count); + +#define memsetw(s, c, n) \ +(__builtin_constant_p(c) \ + ? __constant_c_memset((s), 0x0001000100010001UL * (unsigned short)(c), (n)) \ + : __memsetw((s), (c), (n))) + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +#define __HAVE_ARCH_MEMCPY_FLUSHCACHE +void memcpy_flushcache(void *dst, const void *src, size_t cnt); +#endif + +#endif /* __KERNEL__ */ + +#endif /* _ASM_SW64_STRING_H */ diff --git a/arch/sw_64/include/asm/suspend.h b/arch/sw_64/include/asm/suspend.h new file mode 100644 index 0000000000000000000000000000000000000000..833e27f9d5e14a729a285406234e90fc03afbdfe --- /dev/null +++ b/arch/sw_64/include/asm/suspend.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SUSPEND_H +#define _ASM_SW64_SUSPEND_H + +#include +#include +#include +#define SOFTINF_SLEEP_MAGIC 0x0123456789ABCDEFUL + +#ifdef CONFIG_HIBERNATION +#include +#include +#endif + +struct callee_saved_regs { + unsigned long r9; + unsigned long r10; + unsigned long r11; + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + unsigned long ra; +}; + +struct callee_saved_fpregs { + unsigned long f2[4]; + unsigned long f3[4]; + unsigned long f4[4]; + unsigned long f5[4]; + unsigned long f6[4]; + unsigned long f7[4]; + unsigned long f8[4]; + unsigned long f9[4]; +} __aligned(32); /* 256 bits aligned for simd */ + +struct processor_state { + struct callee_saved_regs regs; + struct callee_saved_fpregs fpregs; + unsigned long fpcr; + unsigned long ktp; +#ifdef CONFIG_HIBERNATION + unsigned long sp; + struct vcpucb vcb; +#endif +}; + +extern void sw64_suspend_deep_sleep(struct processor_state *state); +extern const struct platform_suspend_ops native_suspend_ops; +#endif /* _ASM_SW64_SUSPEND_H */ diff --git a/arch/sw_64/include/asm/sw64_init.h b/arch/sw_64/include/asm/sw64_init.h new file mode 100644 index 0000000000000000000000000000000000000000..86ddd2cb65f839bfa9cc609aa6878740b4c484b3 --- /dev/null +++ b/arch/sw_64/include/asm/sw64_init.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64_INIT_H +#define _ASM_SW64_SW64_INIT_H + +#include +#include + +#include + +struct sw64_early_init_ops { + void (*setup_core_map)(struct cpumask *cpumask); + unsigned long (*get_node_mem)(int nodeid); + void (*get_smp_info)(void); +}; + +struct sw64_pci_init_ops { + int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin); + unsigned long (*get_rc_enable)(unsigned long node); + void (*hose_init)(struct pci_controller *hose); + void (*set_rc_piu)(unsigned long node, unsigned long index); + int (*check_pci_linkup)(unsigned long node, unsigned long index); + void (*set_intx)(unsigned long node, unsigned long index, + unsigned long int_conf); +}; + + +struct sw64_chip_init_ops { + struct sw64_early_init_ops early_init; + struct sw64_pci_init_ops pci_init; + void (*fixup)(void); +}; + +struct sw64_chip_ops { + int (*get_cpu_num)(void); + void (*device_interrupt)(unsigned long irq_info); + void (*suspend)(bool wake); + void (*fixup)(void); +}; + +extern void sw64_init_noop(void); +extern void setup_chip_ops(void); +extern struct sw64_chip_ops *sw64_chip; +extern struct sw64_chip_init_ops *sw64_chip_init; +#ifdef CONFIG_PM +extern struct syscore_ops io_syscore_ops; +#endif + +DECLARE_PER_CPU(unsigned long, hard_node_id); + +#endif /* _ASM_SW64_SW64_INIT_H */ diff --git a/arch/sw_64/include/asm/sw64io.h b/arch/sw_64/include/asm/sw64io.h new file mode 100644 index 0000000000000000000000000000000000000000..d52cd8cc86bf24aa3a7b45356bcbbf2651cf7b01 --- /dev/null +++ b/arch/sw_64/include/asm/sw64io.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SW64IO_H +#define _ASM_SW64_SW64IO_H + +#include +#include + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#define MK_RC_CFG(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_RC_CFG) +#define MK_PIU_IOR0(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR0_BASE) +#define MK_PIU_IOR1(nid, idx) \ + (SW64_PCI_IO_BASE((nid), (idx)) | PCI_IOR1_BASE) + +static inline unsigned int +read_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + return readl(addr); +} + +static inline void +write_rc_conf(unsigned long node, unsigned long rc, + unsigned int offset, unsigned int data) +{ + void __iomem *addr; + + addr = __va(MK_RC_CFG(node, rc) | offset); + writel(data, addr); +} + +static inline unsigned long +read_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior0(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR0(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +read_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + return readq(addr); +} + +static inline void +write_piu_ior1(unsigned long node, unsigned long rc, + unsigned int reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(MK_PIU_IOR1(node, rc) + reg); + writeq(data, addr); +} + +static inline unsigned long +sw64_io_read(unsigned long node, unsigned long reg) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + return readq(addr); +} + +static inline void +sw64_io_write(unsigned long node, unsigned long reg, unsigned long data) +{ + void __iomem *addr; + + addr = __va(SW64_IO_BASE(node) | reg); + writeq(data, addr); +} + +#if defined(CONFIG_UNCORE_XUELANG) +#include +#endif + +#if defined(CONFIG_UNCORE_JUNZHANG) +#include +#endif + +#endif /* _ASM_SW64_SW64IO_H */ diff --git a/arch/sw_64/include/asm/switch_to.h b/arch/sw_64/include/asm/switch_to.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2db4b9e26613645995a3dd1ee40b798b460860 --- /dev/null +++ b/arch/sw_64/include/asm/switch_to.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SWITCH_TO_H +#define _ASM_SW64_SWITCH_TO_H + +#include + +extern void __fpstate_save(struct task_struct *save_to); +extern void __fpstate_restore(struct task_struct *restore_from); +extern struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next); +extern void restore_da_match_after_sched(void); + +static inline void aux_save(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + pcb->tp = rtid(); + __fpstate_save(task); + } +} + +static inline void aux_restore(struct task_struct *task) +{ + struct pcb_struct *pcb; + + if (likely(!(task->flags & PF_KTHREAD))) { + pcb = &task_thread_info(task)->pcb; + wrtp(pcb->tp); + __fpstate_restore(task); + } +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + aux_save(prev); + aux_restore(next); +} + + +#define switch_to(prev, next, last) \ +do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + __switch_to_aux(__prev, __next); \ + (last) = __switch_to(__prev, __next); \ +} while (0) + + +/* TODO: finish_arch_switch has been removed from arch-independent code. */ + +/* + * finish_arch_switch will be called after switch_to + */ +#define finish_arch_post_lock_switch restore_da_match_after_sched + + +#endif /* _ASM_SW64_SWITCH_TO_H */ diff --git a/arch/sw_64/include/asm/syscall.h b/arch/sw_64/include/asm/syscall.h new file mode 100644 index 0000000000000000000000000000000000000000..a821bf68be1643d7f457ad738d9cde9cc1930d77 --- /dev/null +++ b/arch/sw_64/include/asm/syscall.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_SYSCALL_H +#define _ASM_SW64_SYSCALL_H + +#include + +#ifndef __ASSEMBLY__ + +typedef long (*syscall_fn_t)(ulong, ulong, ulong, ulong, ulong, ulong); + +extern syscall_fn_t sys_call_table[]; + +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline long +syscall_get_error(struct task_struct *task, struct pt_regs *regs) +{ + return regs->regs[19] ? -regs->regs[0] : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->regs[0]; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + if (error) { + regs->regs[0] = -error; + regs->regs[19] = 1; + } else { + regs->regs[0] = val; + regs->regs[19] = 0; + } +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + *args++ = regs->regs[16]; + *args++ = regs->regs[17]; + *args++ = regs->regs[18]; + *args++ = regs->regs[19]; + *args++ = regs->regs[20]; + *args = regs->regs[21]; +} + +static inline void syscall_set_arguments(struct task_struct *task, + struct pt_regs *regs, + const unsigned long *args) +{ + regs->regs[16] = *args++; + regs->regs[17] = *args++; + regs->regs[18] = *args++; + regs->regs[19] = *args++; + regs->regs[20] = *args++; + regs->regs[21] = *args; +} + +static inline int syscall_get_arch(struct task_struct *task) +{ + return AUDIT_ARCH_SW64; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_SW64_SYSCALL_H */ diff --git a/arch/sw_64/include/asm/tc.h b/arch/sw_64/include/asm/tc.h new file mode 100644 index 0000000000000000000000000000000000000000..aa39c3528e3fa923022365c1db31c1ef2c61e220 --- /dev/null +++ b/arch/sw_64/include/asm/tc.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TC_H +#define _ASM_SW64_TC_H + +static inline unsigned long rdtc(void) +{ + unsigned long ret; + + __asm__ __volatile__ ("rtc %0" : "=r"(ret)); + return ret; +} + +extern void tc_sync_clear(void); +extern void tc_sync_ready(void *ignored); +extern void tc_sync_set(void); +#endif /* _ASM_SW64_TC_H */ diff --git a/arch/sw_64/include/asm/thread_info.h b/arch/sw_64/include/asm/thread_info.h new file mode 100644 index 0000000000000000000000000000000000000000..4f3b837e2e908d7d74d871269a65343c13689633 --- /dev/null +++ b/arch/sw_64/include/asm/thread_info.h @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_THREAD_INFO_H +#define _ASM_SW64_THREAD_INFO_H + +#ifdef __KERNEL__ + +#ifndef __ASSEMBLY__ +#include +#include + +typedef struct { + unsigned long seg; +} mm_segment_t; + + +struct pcb_struct { + unsigned long tp; + unsigned long da_match, da_mask; + unsigned long dv_match, dv_mask; + union { + unsigned long dc_ctl; + unsigned long match_ctl; + }; + unsigned long ia_match, ia_mask; + unsigned long iv_match; + unsigned long ida_match, ida_mask; +}; + +struct thread_info { + struct pcb_struct pcb; /* hmcode state */ + + unsigned int flags; /* low level flags */ + unsigned int ieee_state; /* see fpu.h */ + + mm_segment_t addr_limit; /* thread address space */ + unsigned int cpu; /* current CPU */ + int preempt_count; /* 0 => preemptible, <0 => BUG */ + unsigned int status; /* thread-synchronous flags */ + + int bpt_nsaved; + unsigned long bpt_addr[2]; /* breakpoint handling */ + unsigned int bpt_insn[2]; +#ifdef CONFIG_DYNAMIC_FTRACE + unsigned long dyn_ftrace_addr; +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + unsigned long dyn_ftrace_regs_addr; +#endif +#endif +}; + +static __always_inline u64 rtid(void) +{ + u64 val; + + asm volatile("rtid %0" : "=r" (val) : :); + return val; +} + +/* + * Macros/functions for gaining access to the thread information structure. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .addr_limit = KERNEL_DS, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + + +#endif /* __ASSEMBLY__ */ + +/* Thread information allocation. */ +#define THREAD_SIZE_ORDER 1 +#define THREAD_SIZE (2 * PAGE_SIZE) + +/* + * Thread information flags: + * - these are process state flags and used from assembly + * - pending work-to-be-done flags come first and must be assigned to be + * within bits 0 to 7 to fit in and immediate operand. + * + * TIF_SYSCALL_TRACE is known to be 0 via blbs. + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_SYSCALL_AUDIT 4 /* syscall audit active */ +#define TIF_UPROBE 5 /* uprobe breakpoint or singlestep */ +#define TIF_PATCH_PENDING 6 /* pending live patching update */ +#define TIF_NOTIFY_SIGNAL 7 /* signal notifications exist */ +#define TIF_DIE_IF_KERNEL 9 /* dik recursion lock */ +#define TIF_SYSCALL_TRACEPOINT 10 +#define TIF_SECCOMP 11 /* secure computing */ +#define TIF_MEMDIE 13 /* is terminating due to OOM killer */ +#define TIF_POLLING_NRFLAG 14 /* idle is polling for TIF_NEED_RESCHED */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) + +/* Work to do on interrupt/exception return. */ +#define _TIF_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL) + +#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) + +/* Work to do on any return to userspace. */ +#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE) + +#define TS_UAC_NOPRINT 0x0001 /* ! Preserve the following three */ +#define TS_UAC_NOFIX 0x0002 /* ! flags as they match */ +#define TS_UAC_SIGBUS 0x0004 /* ! userspace part of 'prctl' */ + +#define SET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + if (value & PR_UNALIGN_NOPRINT) \ + status |= TS_UAC_NOPRINT; \ + if (value & PR_UNALIGN_SIGBUS) \ + status |= TS_UAC_SIGBUS; \ + if (value & PR_NOFIX) /* sw-specific */ \ + status |= TS_UAC_NOFIX; \ + task_thread_info(task)->status = status; \ + 0; }) + +#define GET_UNALIGN_CTL(task, value) ({ \ + __u32 status = task_thread_info(task)->status & ~UAC_BITMASK; \ + __u32 res = 0; \ + if (status & TS_UAC_NOPRINT) \ + res |= PR_UNALIGN_NOPRINT; \ + if (status & TS_UAC_SIGBUS) \ + res |= PR_UNALIGN_SIGBUS; \ + if (status & TS_UAC_NOFIX) \ + res |= PR_NOFIX; \ + put_user(res, (int __user *)(value)); \ + }) + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_THREAD_INFO_H */ diff --git a/arch/sw_64/include/asm/timer.h b/arch/sw_64/include/asm/timer.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea9e0a538d01f84360f0d032450062835e1b1d0 --- /dev/null +++ b/arch/sw_64/include/asm/timer.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMER_H +#define _ASM_SW64_TIMER_H + +extern void sw64_setup_clocksource(void); + +extern void sw64_setup_timer(void); + +extern void __init setup_sched_clock(void); + +#endif /* _ASM_SW64_TIMER_H */ diff --git a/arch/sw_64/include/asm/timex.h b/arch/sw_64/include/asm/timex.h new file mode 100644 index 0000000000000000000000000000000000000000..a5760bf8abd484628232265675d51610f2b7a959 --- /dev/null +++ b/arch/sw_64/include/asm/timex.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TIMEX_H +#define _ASM_SW64_TIMEX_H + +#include + +/* With only one or two oddballs, we use the RTC as the ticker, selecting + * the 32.768kHz reference clock, which nicely divides down to our HZ. + */ +#define CLOCK_TICK_RATE 32768 + +/* + * Standard way to access the cycle counter. + */ + +typedef unsigned long cycles_t; + +static inline cycles_t get_cycles(void) +{ + return rdtc(); +} + +#endif /* _ASM_SW64_TIMEX_H */ diff --git a/arch/sw_64/include/asm/tlb.h b/arch/sw_64/include/asm/tlb.h new file mode 100644 index 0000000000000000000000000000000000000000..08c8f4f97de13a0e5fdb87e943dc03997631ddf8 --- /dev/null +++ b/arch/sw_64/include/asm/tlb.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLB_H +#define _ASM_SW64_TLB_H + +#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm) + +#include + +#define __pte_free_tlb(tlb, pte, address) pte_free((tlb)->mm, pte) +#define __pmd_free_tlb(tlb, pmd, address) pmd_free((tlb)->mm, pmd) +#define __pud_free_tlb(tlb, pud, address) pud_free((tlb)->mm, pud) + +#endif /* _ASM_SW64_TLB_H */ diff --git a/arch/sw_64/include/asm/tlbflush.h b/arch/sw_64/include/asm/tlbflush.h new file mode 100644 index 0000000000000000000000000000000000000000..73995d9663a6e793f70db4dd33270894d9fe8672 --- /dev/null +++ b/arch/sw_64/include/asm/tlbflush.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TLBFLUSH_H +#define _ASM_SW64_TLBFLUSH_H + +#include +#include +#include +#include +#include +#include +#include + +static inline void local_flush_tlb_all(void) +{ + tbiv(); +} + +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + int cpu; + unsigned long flags; + + local_irq_save(flags); + + cpu = smp_processor_id(); + if (!asid_valid(mm, cpu)) { + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + goto out; + } + + if (current->mm == mm) { + __get_new_mm_context(mm, cpu); + wrasid(cpu_asid(cpu, mm)); + } else { + mm->context.asid[cpu] = 0; + cpumask_clear_cpu(cpu, mm_cpumask(mm)); + } +out: + local_irq_restore(flags); +} + +static inline void +local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + int cpu; + struct mm_struct *mm; + + cpu = smp_processor_id(); + mm = vma->vm_mm; + + if (asid_valid(mm, cpu)) + tbisasid(cpu_asid(cpu, mm), addr); + else + cpumask_clear_cpu(cpu, mm_cpumask(mm)); +} + +/* + * It flushes the whole user tlb now. + */ +static inline void +local_flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + local_flush_tlb_mm(vma->vm_mm); +} + +/* + * There is no way to invalidate kernel pages only, so it has to + * inlvalidate all mapping. + */ +static inline void +local_flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + local_flush_tlb_all(); +} + + +#ifdef CONFIG_SMP +extern void flush_tlb_all(void); +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +#else +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma, addr) local_flush_tlb_page(vma, addr) +#define flush_tlb_range(vma, start, end) local_flush_tlb_range(vma, start, end) +#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, end) + +#endif /* CONFIG_SMP */ + +#endif /* _ASM_SW64_TLBFLUSH_H */ diff --git a/arch/sw_64/include/asm/topology.h b/arch/sw_64/include/asm/topology.h new file mode 100644 index 0000000000000000000000000000000000000000..25ec7b9e943172316fd6f66cc27ee69af914a32e --- /dev/null +++ b/arch/sw_64/include/asm/topology.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_TOPOLOGY_H +#define _ASM_SW64_TOPOLOGY_H + +#include +#include +#include +#include +#include +#include + +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) + +void init_cpu_topology(void); +void store_cpu_topology(int cpuid); +void remove_cpu_topology(int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); + +static inline int rcid_to_thread_id(int rcid) +{ + return (rcid & THREAD_ID_MASK) >> THREAD_ID_SHIFT; +} + +static inline int rcid_to_core_id(int rcid) +{ + return (rcid & CORE_ID_MASK) >> CORE_ID_SHIFT; +} + +static inline int rcid_to_domain_id(int rcid) +{ + return (rcid & DOMAIN_ID_MASK) >> DOMAIN_ID_SHIFT; +} + +#ifdef CONFIG_NUMA + +#ifndef CONFIG_DEBUG_PER_CPU_MAPS +extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +/* Returns a pointer to the cpumask of CPUs on Node 'node'. */ +#define cpumask_of_node(node) ((node) == NUMA_NO_NODE ? \ + cpu_all_mask : \ + node_to_cpumask_map[node]) +#else +extern const struct cpumask *cpumask_of_node(int node); +#endif /* CONFIG_DEBUG_PER_CPU_MAPS */ + +extern void numa_add_cpu(unsigned int cpu); +extern void numa_remove_cpu(unsigned int cpu); +extern void numa_store_cpu_info(unsigned int cpu); +extern int __node_distance(int from, int to); +#define node_distance(a, b) __node_distance(a, b) +#define parent_node(node) (node) +#define cpumask_of_pcibus(bus) (cpu_online_mask) +#else /* !CONFIG_NUMA */ +static inline void numa_add_cpu(unsigned int cpu) { } +static inline void numa_remove_cpu(unsigned int cpu) { } +static inline void numa_store_cpu_info(unsigned int cpu) { } +#endif /* CONFIG_NUMA */ + +extern void get_vt_smp_info(void); + +#include + +static inline void arch_fix_phys_package_id(int num, u32 slot) { } + +#endif /* _ASM_SW64_TOPOLOGY_H */ diff --git a/arch/sw_64/include/asm/uaccess.h b/arch/sw_64/include/asm/uaccess.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b119f7fa78c70af35312f3a4dadfbbbc02f9cf --- /dev/null +++ b/arch/sw_64/include/asm/uaccess.h @@ -0,0 +1,311 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UACCESS_H +#define _ASM_SW64_UACCESS_H + +#include + +/* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with + * get_fs() == KERNEL_DS, checking is bypassed. + * + * Or at least it did once upon a time. Nowadays it is a mask that + * defines which bits of the address space are off limits. This is a + * wee bit faster than the above. + * + * For historical reasons, these macros are grossly misnamed. + */ + +#define KERNEL_DS ((mm_segment_t) { 0UL }) +#define USER_DS ((mm_segment_t) { -0x10000000000000UL }) + +#define get_fs() (current_thread_info()->addr_limit) +#define get_ds() (KERNEL_DS) +#define set_fs(x) (current_thread_info()->addr_limit = (x)) + +#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg) + +/* + * These are the main single-value transfer routines. They automatically + * use the right size if we just have the right pointer type. + * + * As the sw64 uses the same address space for kernel and user + * data, we can just do these as direct assignments. (Of course, the + * exception handling means that it's no longer "just"...) + * + * Careful to not + * (a) re-use the arguments for side effects (sizeof/typeof is ok) + * (b) require any knowledge of processes at this stage + */ +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) + +/* + * The "__xxx" versions do not do address space checking, useful when + * doing multiple accesses to the same area (the programmer has to do the + * checks by hand with "access_ok()") + */ +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +/* + * The "ldi %1, 2b-1b(%0)" bits are magic to get the assembler to + * encode the bits we need for resolving the exception. See the + * more extensive comments with fixup_inline_exception below for + * more information. + */ + +extern void __get_user_unknown(void); + +#define __get_user_nocheck(x, ptr, size) \ +({ \ + long __gu_err = 0; \ + unsigned long __gu_val; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __get_user_8(ptr); \ + break; \ + case 2: \ + __get_user_16(ptr); \ + break; \ + case 4: \ + __get_user_32(ptr); \ + break; \ + case 8: \ + __get_user_64(ptr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok(__gu_addr, size)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: \ + __get_user_8(__gu_addr); \ + break; \ + case 2: \ + __get_user_16(__gu_addr); \ + break; \ + case 4: \ + __get_user_32(__gu_addr); \ + break; \ + case 8: \ + __get_user_64(__gu_addr); \ + break; \ + default: \ + __get_user_unknown(); \ + break; \ + } \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ +}) + +struct __large_struct { unsigned long buf[100]; }; +#define __m(x) (*(struct __large_struct __user *)(x)) + +#define __get_user_64(addr) \ + __asm__("1: ldl %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_32(addr) \ + __asm__("1: ldw %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_16(addr) \ + __asm__("1: ldhu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +#define __get_user_8(addr) \ + __asm__("1: ldbu %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0, 2b-1b(%1)\n" \ + ".previous" \ + : "=r"(__gu_val), "=r"(__gu_err) \ + : "m"(__m(addr)), "1"(__gu_err)) + +extern void __put_user_unknown(void); + +#define __put_user_nocheck(x, ptr, size) \ +({ \ + long __pu_err = 0; \ + __chk_user_ptr(ptr); \ + switch (size) { \ + case 1: \ + __put_user_8(x, ptr); \ + break; \ + case 2: \ + __put_user_16(x, ptr); \ + break; \ + case 4: \ + __put_user_32(x, ptr); \ + break; \ + case 8: \ + __put_user_64(x, ptr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + __pu_err; \ +}) + +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok(__pu_addr, size)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: \ + __put_user_8(x, __pu_addr); \ + break; \ + case 2: \ + __put_user_16(x, __pu_addr); \ + break; \ + case 4: \ + __put_user_32(x, __pu_addr); \ + break; \ + case 8: \ + __put_user_64(x, __pu_addr); \ + break; \ + default: \ + __put_user_unknown(); \ + break; \ + } \ + } \ + __pu_err; \ +}) + +/* + * The "__put_user_xx()" macros tell gcc they read from memory + * instead of writing: this is because they do not write to + * any memory gcc knows about, so there are no aliasing issues + */ +#define __put_user_64(x, addr) \ +__asm__ __volatile__("1: stl %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) + +#define __put_user_32(x, addr) \ +__asm__ __volatile__("1: stw %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_16(x, addr) \ +__asm__ __volatile__("1: sth %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +#define __put_user_8(x, addr) \ +__asm__ __volatile__("1: stb %r2, %1\n" \ + "2:\n" \ + ".section __ex_table, \"a\"\n" \ + " .long 1b - .\n" \ + " ldi $31, 2b-1b(%0)\n" \ + ".previous" \ + : "=r"(__pu_err) \ + : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) + +/* + * Complex access routines + */ + +extern long __copy_user(void *to, const void *from, long len); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long len) +{ + return __copy_user(to, (__force const void *)from, len); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long len) +{ + return __copy_user((__force void *)to, from, len); +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER + +extern long __clear_user(void __user *to, long len); + +static inline long +clear_user(void __user *to, long len) +{ + if (__access_ok(to, len)) + len = __clear_user(to, len); + return len; +} + +#define user_addr_max() (uaccess_kernel() ? ~0UL : TASK_SIZE) + +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); + +#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE +struct page; +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len); +extern unsigned long __must_check __copy_user_flushcache(void *to, + const void __user *from, unsigned long n); + +static inline int +__copy_from_user_flushcache(void *dst, const void __user *src, unsigned long size) +{ + kasan_check_write(dst, size); + return __copy_user_flushcache(dst, src, size); +} +#endif + +#include +#endif /* _ASM_SW64_UACCESS_H */ diff --git a/arch/sw_64/include/asm/uncore_io_junzhang.h b/arch/sw_64/include/asm/uncore_io_junzhang.h new file mode 100644 index 0000000000000000000000000000000000000000..37cfe1fd68070c3ec194c181e4f416a790713947 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_junzhang.h @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_JUNZHANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_VT_MEMIO (0xc0000000UL) +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | SPBU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0xfff00000UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define SPBU_BASE (0x3UL << 36) +#define INTPU_BASE (0x3aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x8UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38016 + +/*-----------------------addr-----------------------*/ +/* INTPU REG */ +enum { + DEVINT_MISS = INTPU_BASE | 0x100UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + SPBUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR0 = INTPU_BASE | 0x1080UL, + IINT_MISS_VECTOR1 = INTPU_BASE | 0x1100UL, + IINT_MISS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + ADR_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* SPBU CSR */ +enum { + SMP_INFO = SPBU_BASE | 0x80UL, + INIT_CTL = SPBU_BASE | 0x680UL, + CORE_ONLINE = SPBU_BASE | 0x780UL, + DLI_RLTD_FAULT = SPBU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = SPBU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = SPBU_BASE | 0xa80UL, + CFG_INFO = SPBU_BASE | 0x1100UL, + IO_START = SPBU_BASE | 0x1300UL, + I2C0_SRST_L = SPBU_BASE | 0x1900UL, + I2C1_SRST_L = SPBU_BASE | 0x1980UL, + I2C2_SRST_L = SPBU_BASE | 0x1a00UL, + MCU_DVC_INT = SPBU_BASE | 0x3000UL, + MCU_DVC_INT_EN = SPBU_BASE | 0x3080UL, + SI_FAULT_STAT = SPBU_BASE | 0x3100UL, + SI_FAULT_STAT_EN = SPBU_BASE | 0x3180UL, + SI_FAULT_INT_EN = SPBU_BASE | 0x3200UL, + ADR_CTL = SPBU_BASE | 0x3600UL, + MC_ONLINE = SPBU_BASE | 0x3780UL, + PIU_TOP0_CONFIG = SPBU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = SPBU_BASE | 0x4d00UL, + SOFT_INFO0 = SPBU_BASE | 0xa000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_junzhang.h b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h new file mode 100644 index 0000000000000000000000000000000000000000..95a3b5c8053192398cc48764ff790c4d032e7cbd --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_junzhang.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H +#define _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long cfg_info; + + cfg_info = sw64_io_read(0, CFG_INFO); + cfg_info = (cfg_info >> 33) & 0x3; + cpus = 1 << cfg_info; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long total_mem; + + total_mem = sw64_io_read(node, CFG_INFO) >> 3; + total_mem = (total_mem & 0xffff) << 28; + node_mem = total_mem / __get_cpu_nums(); + + return node_mem; +} + +#define __io_read_longtime(node) (0UL) +#define __io_write_longtime(node, data) do { } while (0) +#define __io_write_longtime_start_en(node, data) do { } while (0) + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, FAULT_INT_CONFIG, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_JUNZHANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_ops_xuelang.h b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h new file mode 100644 index 0000000000000000000000000000000000000000..9336e473211d7ff131d54df51a3f820a4eaca4e7 --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_ops_xuelang.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_OPS_XUELANG_H +#define _ASM_SW64_UNCORE_IO_OPS_XUELANG_H + +static inline int __get_cpu_nums(void) +{ + int cpus; + unsigned long trkmode; + + trkmode = sw64_io_read(0, TRKMODE); + trkmode = (trkmode >> 6) & 0x3; + cpus = 1 << trkmode; + + return cpus; +} + +static inline unsigned long __get_node_mem(int node) +{ + unsigned long node_mem; + unsigned long mc_config; + unsigned long mc_online; + unsigned long mc_cap; + unsigned long mc_num; + + mc_config = sw64_io_read(node, MC_CAP_CFG) & 0xf; + mc_cap = (1UL << mc_config) << 28; + mc_online = sw64_io_read(node, MC_ONLINE) & 0xff; + mc_num = __kernel_ctpop(mc_online); + node_mem = mc_cap * mc_num; + + return node_mem; +} + +static inline unsigned long +__io_read_longtime(int node) +{ + return sw64_io_read(node, LONG_TIME); +} + +static inline void +__io_write_longtime(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME, data); +} + +static inline void +__io_write_longtime_start_en(int node, unsigned long data) +{ + sw64_io_write(node, LONG_TIME_START_EN, data); +} + +static inline void +__io_write_fault_int_en(int node, unsigned long data) +{ + sw64_io_write(node, DUAL_CG0_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG1_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG2_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG3_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG4_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG5_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG6_FAULT_INTEN, data); + sw64_io_write(node, DUAL_CG7_FAULT_INTEN, data); +} + +#endif /* _ASM_SW64_UNCORE_IO_OPS_XUELANG_H */ diff --git a/arch/sw_64/include/asm/uncore_io_xuelang.h b/arch/sw_64/include/asm/uncore_io_xuelang.h new file mode 100644 index 0000000000000000000000000000000000000000..aeaadec5be16378e0944490319f6225b57d418be --- /dev/null +++ b/arch/sw_64/include/asm/uncore_io_xuelang.h @@ -0,0 +1,323 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNCORE_IO_XUELANG_H +#define _ASM_SW64_UNCORE_IO_XUELANG_H + +#include + +#define IO_BASE (0x1UL << 47) +#define PCI_BASE (0x1UL << 43) +#define PCI_IOR0_BASE (0x2UL << 32) +#define PCI_IOR1_BASE (0x3UL << 32) + +#define PCI_RC_CFG (0x5UL << 32) + +#define PCI_EP_CFG (0x3UL << 33) +#define PCI_LEGACY_IO (0x1UL << 32) +#define PCI_LEGACY_IO_SIZE (0x100000000UL) +#define PCI_MEM_UNPRE 0x0UL +#define PCI_32BIT_MEMIO (0xe0000000UL) +#define PCI_32BIT_MEMIO_SIZE (0x20000000UL) +#define PCI_64BIT_MEMIO (0x1UL << 39) +#define PCI_64BIT_MEMIO_SIZE (0x8000000000UL) + +#define IO_RC_SHIFT 40 +#define IO_NODE_SHIFT 44 +#define IO_MARK_BIT 47 + +#define VT_MAX_CPUS_SHIFT 0 +#define VT_MAX_CPUS_MASK 0x3ff +#define VT_CORES_SHIFT 10 +#define VT_CORES_MASK 0x3ff +#define VT_THREADS_SHIFT 20 +#define VT_THREADS_MASK 0xfff + +#define QEMU_PRINTF_BUFF_BASE (IO_BASE | MCU_BASE | 0x40000UL) + +/* MSIConfig */ +#define MSICONFIG_VALID (0x1UL << 63) +#define MSICONFIG_EN (0x1UL << 62) +#define MSICONFIG_VECTOR_SHIFT 10 + +#define MSIX_MSG_ADDR (0x91abc0UL) + +#define SW64_PCI_IO_BASE(m, n) \ + (IO_BASE | ((m) << IO_NODE_SHIFT) | PCI_BASE | ((n) << IO_RC_SHIFT)) +#define SW64_IO_BASE(x) (IO_BASE | ((x) << IO_NODE_SHIFT)) + +#define SW64_PCI0_BUS 0 +#define PCI0_BUS SW64_PCI0_BUS + +#define MAX_NR_NODES 0x2 +#define MAX_NR_RCS 0x6 + +#define MCU_BASE (0x3UL << 36) +#define CAB0_BASE (0x10UL << 32) +#define INTPU_BASE (0x2aUL << 32) +#define IIC0_BASE (0x31UL << 32) +#define SPI_BASE (0x32UL << 32) +#define UART_BASE (0x33UL << 32) +#define IIC1_BASE (0x34UL << 32) +#define IIC2_BASE (0x35UL << 32) +#define GPIO_BASE (0x36UL << 32) +#define LPC_BASE (0x37UL << 32) +#define LPC_LEGACY_IO (0x1UL << 28 | IO_BASE | LPC_BASE) +#define LPC_MEM_IO (0x2UL << 28 | IO_BASE | LPC_BASE) +#define LPC_FIRMWARE_IO (0x3UL << 28 | IO_BASE | LPC_BASE) +#define DLIA_BASE (0x20UL << 32) +#define DLIB_BASE (0x21UL << 32) +#define DLIC_BASE (0x22UL << 32) +#define DLI_PHY_CTL (0x10UL << 24) +#define PCI_VT_LEGACY_IO (IO_BASE | PCI_BASE | PCI_LEGACY_IO) + +#define PME_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) +#define AER_ENABLE_INTD_CORE0 (0x1UL << 62 | 0x1UL << 10) + +#define PIUCONFIG0_INIT_VAL 0x38056 + +/*-----------------------addr-----------------------*/ +/* CAB0 REG */ +enum { + TRKMODE = CAB0_BASE | 0x80UL, +}; + +/* DLIA IO REG */ +enum { + DLIA_BWTEST_PAT = DLIA_BASE | 0x100980UL, + DLIA_PHY_VLDLANE = DLIA_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIB IO REG */ +enum { + DLIB_BWTEST_PAT = DLIB_BASE | 0x100980UL, + DLIB_PHY_VLDLANE = DLIB_BASE | DLI_PHY_CTL | 0x300UL, +}; + +/* DLIC IO REG */ +enum { + DLIC_BWTEST_PAT = DLIC_BASE | 0x100980UL, + DLIC_PHY_VLDLANE = DLIC_BASE | DLI_PHY_CTL | 0x300UL, +}; +/* INTPU REG */ +enum { + LCORE_SLEEPY = INTPU_BASE | 0x0UL, + LCORE_SLEEP = INTPU_BASE | 0x80UL, + DEVICE_MISS = INTPU_BASE | 0x100UL, + LONG_TIME = INTPU_BASE | 0x180UL, + LCORE_IDLE = INTPU_BASE | 0x280UL, + MT_INT_CONFIG = INTPU_BASE | 0x300UL, + DEV_INT_CONFIG = INTPU_BASE | 0x480UL, + FMT_ERR = INTPU_BASE | 0x700UL, + FAULT_INT_CONFIG = INTPU_BASE | 0x780UL, + SERR_CNTTH = INTPU_BASE | 0x880UL, + MCUSERR_CNT = INTPU_BASE | 0x900UL, + IRUSERR_CNT = INTPU_BASE | 0xa80UL, + ERRRPT_EN = INTPU_BASE | 0xb00UL, + IINT_MISS_VECTOR = INTPU_BASE | 0x1100UL, + IINT_MIS = INTPU_BASE | 0x1180UL, + IINT_MISS_RPTEN = INTPU_BASE | 0x1200UL, + DEVINT_MISS_RPTEN = INTPU_BASE | 0x1280UL, + ECCSERR = INTPU_BASE | 0x1300UL, + ECCSERR_RPTEN = INTPU_BASE | 0x1380UL, + ECCMERR = INTPU_BASE | 0x1400UL, + ECCMERR_RPTEN = INTPU_BASE | 0x1480UL, + DEVINT_WKEN = INTPU_BASE | 0x1500UL, + NMI_INT_CONFIG = INTPU_BASE | 0x1580UL, + DEVINTWK_INTEN = INTPU_BASE | 0x1600UL, +}; + +/* MC IO REG */ +enum { + CFGDEC = 0x400UL, + CFGCR = 0x480UL, + INIT_CTRL = 0x580UL, + CFGERR = 0xd00UL, + FSMSTAT = 0xe00UL, + PUB_INTERFACE = 0x1000UL, + POWERCTRL = 0x1080UL, + CFGMR0 = 0x1280UL, + CFGMR1 = 0x1300UL, + CFGMR2 = 0x1380UL, + CFGMR3 = 0x1400UL, + PERF_CTRL = 0x1480UL, + MC_PERF0 = 0x1500UL, + CFGMR4 = 0x1800UL, + CFGMR5 = 0x1880UL, + CFGMR6 = 0x1900UL, + MC_CTRL = 0x1c00UL, + MEMSERR_P = 0x1c80UL, + MEMSERR = 0x1d00UL, +}; + +/* MCU CSR */ +enum { + SMP_INFO = MCU_BASE | 0x80UL, + INIT_CTL = MCU_BASE | 0x680UL, + MT_STATE = MCU_BASE | 0x700UL, + CORE_ONLINE = MCU_BASE | 0x780UL, + MT_INT = MCU_BASE | 0x800UL, + MT_INT_END = MCU_BASE | 0x880UL, + CPU_ID = MCU_BASE | 0x900UL, + DLI_RLTD_FAULT = MCU_BASE | 0x980UL, + DLI_RLTD_FAULT_EN = MCU_BASE | 0xa00UL, + DLI_RLTD_FAULT_INTEN = MCU_BASE | 0xa80UL, + FAULT_SOURCE = MCU_BASE | 0xb00UL, + INT_SOURCE = MCU_BASE | 0xb80UL, + CORE_STATE0 = MCU_BASE | 0xc00UL, + CORE_STATE1 = MCU_BASE | 0xc80UL, + CFG_INFO = MCU_BASE | 0x1100UL, + MC_CAP_CFG = MCU_BASE | 0x1180UL, + IO_START = MCU_BASE | 0x1300UL, + UART_ONLINE = MCU_BASE | 0x1780UL, + I2C0_SRST_L = MCU_BASE | 0x1900UL, + I2C1_SRST_L = MCU_BASE | 0x1980UL, + I2C2_SRST_L = MCU_BASE | 0x1a00UL, + MCU_DVC_INT = MCU_BASE | 0x3000UL, + MCU_DVC_INT_EN = MCU_BASE | 0x3080UL, + SI_FAULT_STAT = MCU_BASE | 0x3100UL, + SI_FAULT_EN = MCU_BASE | 0x3180UL, + SI_FAULT_INT_EN = MCU_BASE | 0x3200UL, + FIFO_SYNSEL = MCU_BASE | 0x3400UL, + CPU_INFO = MCU_BASE | 0x3480UL, + WAKEUP_CTL = MCU_BASE | 0x3500UL, + FLAGREG = MCU_BASE | 0x3580UL, + NMI_CTL = MCU_BASE | 0x3600UL, + PIUPLL_CNT = MCU_BASE | 0x3680UL, + MC_ONLINE = MCU_BASE | 0x3780UL, + FLASH_INFO = MCU_BASE | 0x3800UL, + RTPUSROMCNT = MCU_BASE | 0x3880UL, + CLU_LV1_SEL = MCU_BASE | 0x3a80UL, + CLU_LV2_SEL = MCU_BASE | 0x3b00UL, + CLK_CTL = MCU_BASE | 0x3b80UL, + SLEEP_WAIT_CNT = MCU_BASE | 0x4980UL, + CHIP_ID = MCU_BASE | 0x4b00UL, + PIU_TOP0_CONFIG = MCU_BASE | 0x4c80UL, + PIU_TOP1_CONFIG = MCU_BASE | 0x4d00UL, + LVDS_CTL = MCU_BASE | 0x4d80UL, + LPC_DMAREQ_TOTH = MCU_BASE | 0x5100UL, + DLI_ONLINE = MCU_BASE | 0x6180UL, + LPC_DMAREQ_HADR = MCU_BASE | 0x6200UL, + PIU_PHY_SRST_H = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE0 = MCU_BASE | 0x6280UL, + CLK_SEL_PCIE1 = MCU_BASE | 0x6300UL, + CLK_SEL_PCIE2 = MCU_BASE | 0x6380UL, + CLK_SEL_PCIE3 = MCU_BASE | 0x6400UL, + CLK_SEL_PCIE4 = MCU_BASE | 0x6480UL, + CLK_SEL_PCIE5 = MCU_BASE | 0x6500UL, + PERST_N_PCIE0 = MCU_BASE | 0x6680UL, + PERST_N_PCIE1 = MCU_BASE | 0x6700UL, + PERST_N_PCIE2 = MCU_BASE | 0x6780UL, + PERST_N_PCIE3 = MCU_BASE | 0x6800UL, + PERST_N_PCIE4 = MCU_BASE | 0x6880UL, + PERST_N_PCIE5 = MCU_BASE | 0x6900UL, + BUTTON_RST_N_PCIE0 = MCU_BASE | 0x6a80UL, + BUTTON_RST_N_PCIE1 = MCU_BASE | 0x6b00UL, + BUTTON_RST_N_PCIE2 = MCU_BASE | 0x6b80UL, + BUTTON_RST_N_PCIE3 = MCU_BASE | 0x6c00UL, + BUTTON_RST_N_PCIE4 = MCU_BASE | 0x6c80UL, + BUTTON_RST_N_PCIE5 = MCU_BASE | 0x6d00UL, + DUAL_CG0_FAULT = MCU_BASE | 0x6d80UL, + DUAL_CG1_FAULT = MCU_BASE | 0x6e00UL, + DUAL_CG2_FAULT = MCU_BASE | 0x6e80UL, + DUAL_CG3_FAULT = MCU_BASE | 0x6f00UL, + DUAL_CG4_FAULT = MCU_BASE | 0x6f80UL, + DUAL_CG5_FAULT = MCU_BASE | 0x7000UL, + DUAL_CG6_FAULT = MCU_BASE | 0x7080UL, + DUAL_CG7_FAULT = MCU_BASE | 0x7100UL, + DUAL_CG0_FAULT_EN = MCU_BASE | 0x7180UL, + DUAL_CG1_FAULT_EN = MCU_BASE | 0x7200UL, + DUAL_CG2_FAULT_EN = MCU_BASE | 0x7280UL, + DUAL_CG3_FAULT_EN = MCU_BASE | 0x7300UL, + DUAL_CG4_FAULT_EN = MCU_BASE | 0x7380UL, + DUAL_CG5_FAULT_EN = MCU_BASE | 0x7400UL, + DUAL_CG6_FAULT_EN = MCU_BASE | 0x7480UL, + DUAL_CG7_FAULT_EN = MCU_BASE | 0x7500UL, + DUAL_CG0_FAULT_INTEN = MCU_BASE | 0x7580UL, + DUAL_CG1_FAULT_INTEN = MCU_BASE | 0x7600UL, + DUAL_CG2_FAULT_INTEN = MCU_BASE | 0x7680UL, + DUAL_CG3_FAULT_INTEN = MCU_BASE | 0x7700UL, + DUAL_CG4_FAULT_INTEN = MCU_BASE | 0x7780UL, + DUAL_CG5_FAULT_INTEN = MCU_BASE | 0x7800UL, + DUAL_CG6_FAULT_INTEN = MCU_BASE | 0x7880UL, + DUAL_CG7_FAULT_INTEN = MCU_BASE | 0x7900UL, + SOFT_INFO0 = MCU_BASE | 0x7f00UL, + LONG_TIME_START_EN = MCU_BASE | 0x9000UL, +}; + +/*--------------------------offset-----------------------------------*/ +/* PIU IOR0 */ +enum { + PIUCONFIG0 = 0x0UL, + EPDMABAR = 0x80UL, + IOMMUSEGITEM0 = 0x100UL, + IOMMUEXCPT_CTRL = 0x2100UL, + MSIADDR = 0x2180UL, + MSICONFIG0 = 0x2200UL, + INTACONFIG = 0xa200UL, + INTBCONFIG = 0xa280UL, + INTCCONFIG = 0xa300UL, + INTDCONFIG = 0xa380UL, + AERERRINTCONFIG = 0xa400UL, + AERERRMSICONFIG = 0xa480UL, + PMEINTCONFIG = 0xa500UL, + PMEMSICONFIG = 0xa580UL, + HPINTCONFIG = 0xa600UL, + HPMSICONFIG = 0xa680UL, + DTBASEADDR = 0xb000UL, + DTLB_FLUSHALL = 0xb080UL, + DTLB_FLUSHDEV = 0xb100UL, + PTLB_FLUSHALL = 0xb180UL, + PTLB_FLUSHDEV = 0xb200UL, + PTLB_FLUSHVADDR = 0xb280UL, + PCACHE_FLUSHALL = 0xb300UL, + PCACHE_FLUSHDEV = 0xb380UL, + PCACHE_FLUSHPADDR = 0xb400UL, + TIMEOUT_CONFIG = 0xb480UL, + IOMMUEXCPT_STATUS = 0xb500UL, + IOMMUPAGE_PADDR1 = 0xb580UL, + IOMMUPAGE_PADDR2 = 0xb600UL, + IOMMUPAGE_PADDR3 = 0xb680UL, + PTLB_ACCESS = 0xb700UL, + PTLB_ITEM_TAG = 0xb780UL, + PTLB_ITEM_DATA = 0xb800UL, + PCACHE_ACCESS = 0xb880UL, + PCACHE_ITEM_TAG = 0xb900UL, + PCACHE_ITEM_DATA0 = 0xb980UL, +}; + +/* PIU IOR1 */ +enum { + PIUCONFIG1 = 0x0UL, + ERRENABLE = 0x880UL, + RCDEBUGINF1 = 0xc80UL, + DCACONTROL = 0x1a00UL, + DEVICEID0 = 0x1a80UL, +}; + +/* RC */ +enum { + RC_VENDOR_ID = 0x0UL, + RC_COMMAND = 0x80UL, + RC_REVISION_ID = 0x100UL, + RC_PRIMARY_BUS = 0x300UL, + RC_MSI_CONTROL = 0xa00UL, + RC_EXP_DEVCAP = 0xe80UL, + RC_EXP_DEVCTL = 0xf00UL, + RC_SLOT_CTRL = 0x1100UL, + RC_LINK_STAT = 0x1000UL, + RC_CONTROL = 0X1180UL, + RC_STATUS = 0X1200UL, + RC_EXP_DEVCTL2 = 0x1300UL, + RC_PORT_LINK_CTL = 0xe200UL, + RC_ORDER_RULE_CTL = 0x11680UL, + RC_MISC_CONTROL_1 = 0x11780UL, + RC_PHY_INT_REG = 0x80000UL, + RC_PHY_EXT_GEN1 = 0x82400UL, + RC_PHY_EXT_GEN2 = 0x82480UL, +}; +/* GPIO */ +enum { + GPIO_SWPORTA_DR = GPIO_BASE | 0x0UL, + GPIO_SWPORTA_DDR = GPIO_BASE | 0x200UL, +}; +/*--------------------------------------------------------------------------*/ +#endif /* _ASM_SW64_UNCORE_IO_XUELANG_H */ diff --git a/arch/sw_64/include/asm/unistd.h b/arch/sw_64/include/asm/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..6d1b8d1e201167d56c5e6ca8e0ecff89160b7af1 --- /dev/null +++ b/arch/sw_64/include/asm/unistd.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_UNISTD_H +#define _ASM_SW64_UNISTD_H + +#include + +#define NR_SYSCALLS __NR_syscalls +#define NR_syscalls NR_SYSCALLS + +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_OLD_READDIR +#define __ARCH_WANT_STAT64 +#define __ARCH_WANT_SYS_GETHOSTNAME +#define __ARCH_WANT_SYS_FADVISE64 +#define __ARCH_WANT_SYS_GETPGRP +#define __ARCH_WANT_SYS_OLD_GETRLIMIT +#define __ARCH_WANT_SYS_OLDUMOUNT +#define __ARCH_WANT_SYS_SIGPENDING +#define __ARCH_WANT_SYS_UTIME +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE +#define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_SIGPROCMASK +#define __ARCH_WANT_SYS_CLONE3 + +#endif /* _ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/include/asm/uprobes.h b/arch/sw_64/include/asm/uprobes.h new file mode 100644 index 0000000000000000000000000000000000000000..fcd2026c3622e20a781107c70d414f075d1bf588 --- /dev/null +++ b/arch/sw_64/include/asm/uprobes.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ +#ifndef _ASM_SW64_UPROBES_H +#define _ASM_SW64_UPROBES_H + +#include +#include +#include + +/* + * We want this to be defined as union sw64_instruction but that makes the + * generic code blow up. + */ +typedef u32 uprobe_opcode_t; + +#define MAX_UINSN_BYTES SW64_INSN_SIZE +#define UPROBE_XOL_SLOT_BYTES SW64_INSN_SIZE + +#define UPROBE_BRK_UPROBE 0x000d000d /* break 13 */ +#define UPROBE_BRK_UPROBE_XOL 0x000e000d /* break 14 */ + +#define UPROBE_SWBP_INSN UPROBE_BRK_UPROBE +#define UPROBE_SWBP_INSN_SIZE MAX_UINSN_BYTES + +struct arch_uprobe { + u32 insn; + u32 ixol[2]; +}; + +struct arch_uprobe_task { + unsigned long saved_trap_nr; +}; + +#ifdef CONFIG_UPROBES +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc); +#else +static inline void +sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) {} +#endif + +#endif /* _ASM_SW64_UPROBES_H */ diff --git a/arch/sw_64/include/asm/vcpu.h b/arch/sw_64/include/asm/vcpu.h new file mode 100644 index 0000000000000000000000000000000000000000..c4e3caacbc70c7409a3539317d781d8997eb5d6c --- /dev/null +++ b/arch/sw_64/include/asm/vcpu.h @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VCPU_H +#define _ASM_SW64_VCPU_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_SUBARCH_C3B + +struct vcpucb { + unsigned long go_flag; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr; + unsigned long soft_tid; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_irq_vector; + unsigned long pri_base; + unsigned long stack_pc_dfault; + unsigned long guest_p20; + unsigned long guest_dfault_double; + unsigned long guest_irqs_pending; + unsigned long guest_hm_r30; + unsigned long migration_mark; + unsigned long guest_longtime; + unsigned long guest_longtime_offset; + unsigned long reserved[3]; +}; + +#else + +struct vcpucb { + unsigned long ktp; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long soft_cid; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_vpcr; + unsigned long dtb_upcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr_usr; + unsigned long ptbr_sys; + unsigned long soft_tid; + unsigned long int_stat0; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_pc_save; + unsigned long shtclock_offset; + unsigned long reserved[8]; +}; +#endif + +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_SW64_VCPU_H */ diff --git a/arch/sw_64/include/asm/vdso.h b/arch/sw_64/include/asm/vdso.h new file mode 100644 index 0000000000000000000000000000000000000000..7a2e23c648f3d48c50ab72709e9d2091aee9410c --- /dev/null +++ b/arch/sw_64/include/asm/vdso.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SW64 Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#ifndef _ASM_SW64_VDSO_H +#define _ASM_SW64_VDSO_H + +#ifdef __KERNEL__ + +/* + * Default link address for the vDSO. + * Since we randomise the VDSO mapping, there's little point in trying + * to prelink this. + */ +#define VDSO_LBASE 0x0 + +#ifndef __ASSEMBLY__ + +#include +#include +#include +#define VDSO_SYMBOL(base, name) \ +({ \ + extern const unsigned long __vdso_##name; \ + ((unsigned long)(base) + __vdso_##name); \ +}) + + +struct vdso_data { + u64 xtime_sec; + u64 xtime_nsec; + u64 wall_to_mono_sec; + u64 wall_to_mono_nsec; + u32 cs_shift; + u32 cs_mult; + u64 cs_cycle_last; + u64 cs_mask; + s32 tz_minuteswest; + s32 tz_dsttime; + u32 seq_count; +}; + +static inline unsigned long get_vdso_base(void) +{ + unsigned long addr, tmp; + __asm__ __volatile__( + " br %1, 1f\n" + "1: ldi %0, 0(%1)\n" + : "=r" (addr), "=&r" (tmp) + ::); + + addr &= ~(PAGE_SIZE - 1); + return addr; +} + +static inline const struct vdso_data *get_vdso_data(void) +{ + return (const struct vdso_data *)(get_vdso_base() - PAGE_SIZE); +} + +static inline u32 vdso_data_read_begin(const struct vdso_data *data) +{ + u32 seq; + + while (true) { + seq = READ_ONCE(data->seq_count); + if (likely(!(seq & 1))) { + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return seq; + } + + cpu_relax(); + } +} + +static inline bool vdso_data_read_retry(const struct vdso_data *data, + u32 start_seq) +{ + /* Paired with smp_wmb() in vdso_data_write_*(). */ + smp_rmb(); + return unlikely(data->seq_count != start_seq); +} + +static inline void vdso_data_write_begin(struct vdso_data *data) +{ + ++data->seq_count; + + /* Ensure sequence update is written before other data page values. */ + smp_wmb(); +} + +static inline void vdso_data_write_end(struct vdso_data *data) +{ + /* Ensure data values are written before updating sequence again. */ + smp_wmb(); + ++data->seq_count; +} + + +#endif /* !__ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_SW64_VDSO_H */ diff --git a/arch/sw_64/include/asm/vmalloc.h b/arch/sw_64/include/asm/vmalloc.h new file mode 100644 index 0000000000000000000000000000000000000000..a76d1133d6c6f1851615c9546c4ca36ec3f927ee --- /dev/null +++ b/arch/sw_64/include/asm/vmalloc.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_VMALLOC_H +#define _ASM_SW64_VMALLOC_H + +#endif /* _ASM_SW64_VMALLOC_H */ diff --git a/arch/sw_64/include/asm/word-at-a-time.h b/arch/sw_64/include/asm/word-at-a-time.h new file mode 100644 index 0000000000000000000000000000000000000000..623efbec4429713d0ad20f116266b8f87cc4fc1d --- /dev/null +++ b/arch/sw_64/include/asm/word-at-a-time.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_WORD_AT_A_TIME_H +#define _ASM_SW64_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for SW64. + */ + +/* + * We do not use the word_at_a_time struct on SW64, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpgeb(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ + return __kernel_cttz(bits); +} + +#define zero_bytemask(mask) ((2ul << (find_zero(mask) * 8)) - 1) + +#endif /* _ASM_SW64_WORD_AT_A_TIME_H */ diff --git a/arch/sw_64/include/asm/xchg.h b/arch/sw_64/include/asm/xchg.h new file mode 100644 index 0000000000000000000000000000000000000000..38f067d5ed04330ad206fb10673f29f6f71d5001 --- /dev/null +++ b/arch/sw_64/include/asm/xchg.h @@ -0,0 +1,485 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SW64_XCHG_H +#define _ASM_SW64_XCHG_H + +#ifndef _ASM_SW64_CMPXCHG_H +#error Do not include xchg.h directly. Use cmpxchg.h +#endif +/* + * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code + * except that local version do not have the expensive memory barrier. + * So this file is included twice from asm/cmpxchg.h. + */ + +#if defined(CONFIG_SUBARCH_C3B) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " ldi %0, 1\n" + " wr_f %0\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " rd_f %2\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " ldi %1, 1\n" + " wr_f %1\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " rd_f %1\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: br 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ + +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " wr_f %3\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " rd_f %2\n" + " beq %3, 2f\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " wr_f %1\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " rd_f %4\n" + " beq %1, 2f\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: br 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#elif defined(CONFIG_SUBARCH_C4) +/* + * Atomic exchange. + * Since it can be used to implement critical sections + * it must clobber "memory" (also for interrupts in UP). + */ + +static inline unsigned long +____xchg(_u8, volatile char *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslb %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlb %2, %4, %0\n" + " masklb %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u16, volatile short *m, unsigned long val) +{ + unsigned long ret, tmp, addr64; + + __asm__ __volatile__( + " andnot %4, 7, %3\n" + " inslh %1, %4, %1\n" + "1: lldl %2, 0(%3)\n" + " extlh %2, %4, %0\n" + " masklh %2, %4, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%3)\n" + " beq %2, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (ret), "=&r" (val), "=&r" (tmp), "=&r" (addr64) + : "r" ((long)m), "1" (val) : "memory"); + + return ret; +} + +static inline unsigned long +____xchg(_u32, volatile int *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldw %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstw %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +static inline unsigned long +____xchg(_u64, volatile long *m, unsigned long val) +{ + unsigned long dummy, addr; + + __asm__ __volatile__( + " ldi %3, %5\n" + "1: lldl %0, 0(%3)\n" + " bis $31, %4, %1\n" + " lstl %1, 0(%3)\n" + " beq %1, 2f\n" + ".subsection 2\n" + "2: lbr 1b\n" + ".previous" + : "=&r" (val), "=&r" (dummy), "=m" (*m), "=&r"(addr) + : "rI" (val), "m" (*m) : "memory"); + + return val; +} + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + * + * The memory barrier should be placed in SMP only when we actually + * make the change. If we don't change anything (so if the returned + * prev is equal to old) then we aren't acquiring anything new and + * we don't need any memory barrier as far I can tell. + */ +static inline unsigned long +____cmpxchg(_u8, volatile char *m, unsigned char old, unsigned char new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslb %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlb %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklb %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u16, volatile short *m, unsigned short old, unsigned short new) +{ + unsigned long prev, tmp, cmp, addr64; + + __asm__ __volatile__( + " andnot %5, 7, %4\n" + " inslh %1, %5, %1\n" + "1: lldl %2, 0(%4)\n" + " extlh %2, %5, %0\n" + " cmpeq %0, %6, %3\n" + " beq %3, 2f\n" + " masklh %2, %5, %2\n" + " or %1, %2, %2\n" + " lstl %2, 0(%4)\n" + " beq %2, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r" (prev), "=&r" (new), "=&r" (tmp), "=&r" (cmp), "=&r" (addr64) + : "r" ((long)m), "Ir" (old), "1" (new) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u32, volatile int *m, int old, int new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldw %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstw %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +static inline unsigned long +____cmpxchg(_u64, volatile long *m, unsigned long old, unsigned long new) +{ + unsigned long prev, cmp, addr, tmp; + + __asm__ __volatile__( + " ldi %3, %7\n" + "1: lldl %0, 0(%3)\n" + " cmpeq %0, %5, %1\n" + " beq %1, 2f\n" + " bis $31, %6, %4\n" + " lstl %4, 0(%3)\n" + " beq %4, 3f\n" + "2:\n" + ".subsection 2\n" + "3: lbr 1b\n" + ".previous" + : "=&r"(prev), "=&r"(cmp), "=m"(*m), "=&r"(addr), "=&r"(tmp) + : "r"((long) old), "r"(new), "m"(*m) : "memory"); + + return prev; +} + +#endif + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid xchg(). + */ +extern void __xchg_called_with_bad_pointer(void); + +static __always_inline unsigned long +____xchg(, volatile void *ptr, unsigned long x, int size) +{ + switch (size) { + case 1: + return ____xchg(_u8, ptr, x); + case 2: + return ____xchg(_u16, ptr, x); + case 4: + return ____xchg(_u32, ptr, x); + case 8: + return ____xchg(_u64, ptr, x); + } + __xchg_called_with_bad_pointer(); + return x; +} + +/* This function doesn't exist, so you'll get a linker error + * if something tries to do an invalid cmpxchg(). + */ +extern void __cmpxchg_called_with_bad_pointer(void); + +static __always_inline unsigned long ____cmpxchg(, volatile void *ptr, + unsigned long old, + unsigned long new, int size) +{ + switch (size) { + case 1: + return ____cmpxchg(_u8, ptr, old, new); + case 2: + return ____cmpxchg(_u16, ptr, old, new); + case 4: + return ____cmpxchg(_u32, ptr, old, new); + case 8: + return ____cmpxchg(_u64, ptr, old, new); + } + __cmpxchg_called_with_bad_pointer(); + return old; +} + +#endif /* _ASM_SW64_XCHG_H */ diff --git a/arch/sw_64/include/asm/xor.h b/arch/sw_64/include/asm/xor.h new file mode 100644 index 0000000000000000000000000000000000000000..0aff8804f503e3f50f57d625b2454142e932d0b9 --- /dev/null +++ b/arch/sw_64/include/asm/xor.h @@ -0,0 +1,857 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized RAID-5 checksumming functions. + */ + +#ifndef _ASM_SW64_XOR_H +#define _ASM_SW64_XOR_H + +extern void xor_sw64_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +extern void xor_sw64_prefetch_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +extern void xor_sw64_prefetch_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +extern void xor_sw64_prefetch_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +extern void xor_sw64_prefetch_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +asm(" \n\ + .text \n\ + .align 3 \n\ + .ent xor_sw64_2 \n\ +xor_sw64_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + xor $0, $1, $0 # 7 cycles from $1 load \n\ + \n\ + ldl $27, 56($18) \n\ + xor $2, $3, $2 \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + \n\ + ret \n\ + .end xor_sw64_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_3 \n\ +xor_sw64_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 6 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + xor $0, $1, $1 # 4 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + stl $2, 40($17) \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + \n\ + stl $5, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $20, 56($17) \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_4 \n\ +xor_sw64_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + \n\ + ldl $25, 48($19) \n\ + xor $3, $5, $5 \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + ldl $3, 56($20) \n\ + \n\ + stl $5, 32($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + xor $7, $22, $22 \n\ + xor $23, $24, $24 # 5 cycles from $24 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $25, $27, $27 # 5 cycles from $27 load \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 5 cycles from $1 load \n\ + \n\ + stl $27, 48($17) \n\ + xor $2, $3, $3 # 4 cycles from $3 load \n\ + xor $1, $3, $3 \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_5 \n\ +xor_sw64_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + xor $28, $1, $1 \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + subl $16, 1, $16 \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + ret \n\ + .end xor_sw64_5 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_2 \n\ +xor_sw64_prefetch_2: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + .align 4 \n\ +2: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 8($17) \n\ + ldl $3, 8($18) \n\ + \n\ + ldl $4, 16($17) \n\ + ldl $5, 16($18) \n\ + ldl $6, 24($17) \n\ + ldl $7, 24($18) \n\ + \n\ + ldl $19, 32($17) \n\ + ldl $20, 32($18) \n\ + ldl $21, 40($17) \n\ + ldl $22, 40($18) \n\ + \n\ + ldl $23, 48($17) \n\ + ldl $24, 48($18) \n\ + ldl $25, 56($17) \n\ + ldl $27, 56($18) \n\ + \n\ + fillde 256($17) \n\ + xor $0, $1, $0 # 8 cycles from $1 load \n\ + fillde 256($18) \n\ + xor $2, $3, $2 \n\ + \n\ + stl $0, 0($17) \n\ + xor $4, $5, $4 \n\ + stl $2, 8($17) \n\ + xor $6, $7, $6 \n\ + \n\ + stl $4, 16($17) \n\ + xor $19, $20, $19 \n\ + stl $6, 24($17) \n\ + xor $21, $22, $21 \n\ + \n\ + stl $19, 32($17) \n\ + xor $23, $24, $23 \n\ + stl $21, 40($17) \n\ + xor $25, $27, $25 \n\ + \n\ + stl $23, 48($17) \n\ + subl $16, 1, $16 \n\ + stl $25, 56($17) \n\ + addl $17, 64, $17 \n\ + \n\ + addl $18, 64, $18 \n\ + bgt $16, 2b \n\ + ret \n\ + .end xor_sw64_prefetch_2 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_3 \n\ +xor_sw64_prefetch_3: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + .align 4 \n\ +3: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 8($17) \n\ + \n\ + ldl $4, 8($18) \n\ + ldl $6, 16($17) \n\ + ldl $7, 16($18) \n\ + ldl $21, 24($17) \n\ + \n\ + ldl $22, 24($18) \n\ + ldl $24, 32($17) \n\ + ldl $25, 32($18) \n\ + ldl $5, 8($19) \n\ + \n\ + ldl $20, 16($19) \n\ + ldl $23, 24($19) \n\ + ldl $27, 32($19) \n\ + \n\ + xor $0, $1, $1 # 8 cycles from $0 load \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + xor $6, $7, $7 # 6 cycles from $7 load \n\ + xor $21, $22, $22 # 5 cycles from $22 load \n\ + \n\ + xor $1, $2, $2 # 9 cycles from $2 load \n\ + xor $24, $25, $25 # 5 cycles from $25 load \n\ + stl $2, 0($17) \n\ + xor $4, $5, $5 # 6 cycles from $5 load \n\ + \n\ + stl $5, 8($17) \n\ + xor $7, $20, $20 # 7 cycles from $20 load \n\ + stl $20, 16($17) \n\ + xor $22, $23, $23 # 7 cycles from $23 load \n\ + \n\ + stl $23, 24($17) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + stl $27, 32($17) \n\ + \n\ + ldl $0, 40($17) \n\ + ldl $1, 40($18) \n\ + ldl $3, 48($17) \n\ + ldl $4, 48($18) \n\ + \n\ + ldl $6, 56($17) \n\ + ldl $7, 56($18) \n\ + ldl $2, 40($19) \n\ + ldl $5, 48($19) \n\ + \n\ + ldl $20, 56($19) \n\ + fillde 256($17) \n\ + fillde 256($18) \n\ + fillde 256($19) \n\ + \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + xor $3, $4, $4 # 5 cycles from $4 load \n\ + xor $6, $7, $7 # 5 cycles from $7 load \n\ + xor $1, $2, $2 # 4 cycles from $2 load \n\ + \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + xor $7, $20, $20 # 4 cycles from $20 load \n\ + stl $2, 40($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $5, 48($17) \n\ + addl $19, 64, $19 \n\ + stl $20, 56($17) \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 3b \n\ + ret \n\ + .end xor_sw64_prefetch_3 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_4 \n\ +xor_sw64_prefetch_4: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + .align 4 \n\ +4: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 8($17) \n\ + ldl $5, 8($18) \n\ + ldl $6, 8($19) \n\ + ldl $7, 8($20) \n\ + \n\ + ldl $21, 16($17) \n\ + ldl $22, 16($18) \n\ + ldl $23, 16($19) \n\ + ldl $24, 16($20) \n\ + \n\ + ldl $25, 24($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $27, 24($18) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 24($19) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $3, 0($17) \n\ + xor $6, $7, $7 \n\ + xor $21, $22, $22 # 7 cycles from $22 load \n\ + xor $5, $7, $7 \n\ + \n\ + stl $7, 8($17) \n\ + xor $23, $24, $24 # 7 cycles from $24 load \n\ + ldl $2, 32($17) \n\ + xor $22, $24, $24 \n\ + \n\ + ldl $3, 32($18) \n\ + ldl $4, 32($19) \n\ + ldl $5, 32($20) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + \n\ + ldl $6, 40($17) \n\ + ldl $7, 40($18) \n\ + ldl $21, 40($19) \n\ + ldl $22, 40($20) \n\ + \n\ + stl $24, 16($17) \n\ + xor $0, $1, $1 # 9 cycles from $1 load \n\ + xor $2, $3, $3 # 5 cycles from $3 load \n\ + xor $27, $1, $1 \n\ + \n\ + stl $1, 24($17) \n\ + xor $4, $5, $5 # 5 cycles from $5 load \n\ + ldl $23, 48($17) \n\ + xor $3, $5, $5 \n\ + \n\ + ldl $24, 48($18) \n\ + ldl $25, 48($19) \n\ + ldl $27, 48($20) \n\ + ldl $0, 56($17) \n\ + \n\ + ldl $1, 56($18) \n\ + ldl $2, 56($19) \n\ + ldl $3, 56($20) \n\ + xor $6, $7, $7 # 8 cycles from $6 load \n\ + \n\ + fillde 256($17) \n\ + xor $21, $22, $22 # 8 cycles from $22 load \n\ + fillde 256($18) \n\ + xor $7, $22, $22 \n\ + \n\ + fillde 256($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + fillde 256($20) \n\ + xor $25, $27, $27 # 6 cycles from $27 load \n\ + \n\ + stl $5, 32($17) \n\ + xor $24, $27, $27 \n\ + xor $0, $1, $1 # 7 cycles from $1 load \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + stl $22, 40($17) \n\ + xor $1, $3, $3 \n\ + stl $27, 48($17) \n\ + subl $16, 1, $16 \n\ + \n\ + stl $3, 56($17) \n\ + addl $20, 64, $20 \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + \n\ + addl $17, 64, $17 \n\ + bgt $16, 4b \n\ + ret \n\ + .end xor_sw64_prefetch_4 \n\ + \n\ + .align 3 \n\ + .ent xor_sw64_prefetch_5 \n\ +xor_sw64_prefetch_5: \n\ + .prologue 0 \n\ + srl $16, 6, $16 \n\ + \n\ + fillde 0($17) \n\ + fillde 0($18) \n\ + fillde 0($19) \n\ + fillde 0($20) \n\ + fillde 0($21) \n\ + \n\ + fillde 64($17) \n\ + fillde 64($18) \n\ + fillde 64($19) \n\ + fillde 64($20) \n\ + fillde 64($21) \n\ + \n\ + fillde 128($17) \n\ + fillde 128($18) \n\ + fillde 128($19) \n\ + fillde 128($20) \n\ + fillde 128($21) \n\ + \n\ + fillde 192($17) \n\ + fillde 192($18) \n\ + fillde 192($19) \n\ + fillde 192($20) \n\ + fillde 192($21) \n\ + .align 4 \n\ +5: \n\ + ldl $0, 0($17) \n\ + ldl $1, 0($18) \n\ + ldl $2, 0($19) \n\ + ldl $3, 0($20) \n\ + \n\ + ldl $4, 0($21) \n\ + ldl $5, 8($17) \n\ + ldl $6, 8($18) \n\ + ldl $7, 8($19) \n\ + \n\ + ldl $22, 8($20) \n\ + ldl $23, 8($21) \n\ + ldl $24, 16($17) \n\ + ldl $25, 16($18) \n\ + \n\ + ldl $27, 16($19) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + ldl $28, 16($20) \n\ + xor $2, $3, $3 # 6 cycles from $3 load \n\ + \n\ + ldl $0, 16($21) \n\ + xor $1, $3, $3 \n\ + ldl $1, 24($17) \n\ + xor $3, $4, $4 # 7 cycles from $4 load \n\ + \n\ + stl $4, 0($17) \n\ + xor $5, $6, $6 # 7 cycles from $6 load \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $6, $23, $23 # 7 cycles from $23 load \n\ + \n\ + ldl $2, 24($18) \n\ + xor $22, $23, $23 \n\ + ldl $3, 24($19) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $23, 8($17) \n\ + xor $25, $27, $27 # 8 cycles from $27 load \n\ + ldl $4, 24($20) \n\ + xor $28, $0, $0 # 7 cycles from $0 load \n\ + \n\ + ldl $5, 24($21) \n\ + xor $27, $0, $0 \n\ + ldl $6, 32($17) \n\ + ldl $7, 32($18) \n\ + \n\ + stl $0, 16($17) \n\ + xor $1, $2, $2 # 6 cycles from $2 load \n\ + ldl $22, 32($19) \n\ + xor $3, $4, $4 # 4 cycles from $4 load \n\ + \n\ + ldl $23, 32($20) \n\ + xor $2, $4, $4 \n\ + ldl $24, 32($21) \n\ + ldl $25, 40($17) \n\ + \n\ + ldl $27, 40($18) \n\ + ldl $28, 40($19) \n\ + ldl $0, 40($20) \n\ + xor $4, $5, $5 # 7 cycles from $5 load \n\ + \n\ + stl $5, 24($17) \n\ + xor $6, $7, $7 # 7 cycles from $7 load \n\ + ldl $1, 40($21) \n\ + ldl $2, 48($17) \n\ + \n\ + ldl $3, 48($18) \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + ldl $4, 48($19) \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + ldl $5, 48($20) \n\ + xor $22, $24, $24 \n\ + ldl $6, 48($21) \n\ + xor $25, $27, $27 # 7 cycles from $27 load \n\ + \n\ + stl $24, 32($17) \n\ + xor $27, $28, $28 # 8 cycles from $28 load \n\ + ldl $7, 56($17) \n\ + xor $0, $1, $1 # 6 cycles from $1 load \n\ + \n\ + ldl $22, 56($18) \n\ + ldl $23, 56($19) \n\ + ldl $24, 56($20) \n\ + ldl $25, 56($21) \n\ + \n\ + fillde 256($17) \n\ + xor $28, $1, $1 \n\ + fillde 256($18) \n\ + xor $2, $3, $3 # 9 cycles from $3 load \n\ + \n\ + fillde 256($19) \n\ + xor $3, $4, $4 # 9 cycles from $4 load \n\ + fillde 256($20) \n\ + xor $5, $6, $6 # 8 cycles from $6 load \n\ + \n\ + stl $1, 40($17) \n\ + xor $4, $6, $6 \n\ + xor $7, $22, $22 # 7 cycles from $22 load \n\ + xor $23, $24, $24 # 6 cycles from $24 load \n\ + \n\ + stl $6, 48($17) \n\ + xor $22, $24, $24 \n\ + fillde 256($21) \n\ + xor $24, $25, $25 # 8 cycles from $25 load \n\ + \n\ + stl $25, 56($17) \n\ + subl $16, 1, $16 \n\ + addl $21, 64, $21 \n\ + addl $20, 64, $20 \n\ + \n\ + addl $19, 64, $19 \n\ + addl $18, 64, $18 \n\ + addl $17, 64, $17 \n\ + bgt $16, 5b \n\ + \n\ + ret \n\ + .end xor_sw64_prefetch_5 \n\ +"); + +static struct xor_block_template xor_block_sw64 = { + .name = "sw64", + .do_2 = xor_sw64_2, + .do_3 = xor_sw64_3, + .do_4 = xor_sw64_4, + .do_5 = xor_sw64_5, +}; + +static struct xor_block_template xor_block_sw64_prefetch = { + .name = "sw64 prefetch", + .do_2 = xor_sw64_prefetch_2, + .do_3 = xor_sw64_prefetch_3, + .do_4 = xor_sw64_prefetch_4, + .do_5 = xor_sw64_prefetch_5, +}; + +/* For grins, also test the generic routines. */ +#include + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_sw64); \ + xor_speed(&xor_block_sw64_prefetch); \ + } while (0) + +/* Force the use of sw64_prefetch as it is significantly + * faster in the cold cache case. + */ +#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sw64_prefetch) + +#endif /* _ASM_SW64_XOR_H */ diff --git a/arch/sw_64/include/uapi/asm/Kbuild b/arch/sw_64/include/uapi/asm/Kbuild new file mode 100644 index 0000000000000000000000000000000000000000..15700040f13870d24902a9cb9ed60961b6144cca --- /dev/null +++ b/arch/sw_64/include/uapi/asm/Kbuild @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# UAPI Header export list + +generic-y += kvm_para.h +generated-y += unistd_64.h diff --git a/arch/sw_64/include/uapi/asm/auxvec.h b/arch/sw_64/include/uapi/asm/auxvec.h new file mode 100644 index 0000000000000000000000000000000000000000..309a8294be7a839fac7c55fe5959d8b1ad1404fc --- /dev/null +++ b/arch/sw_64/include/uapi/asm/auxvec.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_AUXVEC_H +#define _UAPI_ASM_SW64_AUXVEC_H + +/* VDSO location. */ +#define AT_SYSINFO_EHDR 33 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 1 + +#endif /* _UAPI_ASM_SW64_AUXVEC_H */ diff --git a/arch/sw_64/include/uapi/asm/bitsperlong.h b/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..712c823e23d82cc177f74a77fed021a68e35a941 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BITSPERLONG_H +#define _UAPI_ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* _UAPI_ASM_SW64_BITSPERLONG_H */ diff --git a/arch/sw_64/include/uapi/asm/bootparam.h b/arch/sw_64/include/uapi/asm/bootparam.h new file mode 100644 index 0000000000000000000000000000000000000000..6ce75d65e86e293e541a21a15e3f0a68afc96293 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bootparam.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BOOTPARAM_H +#define _UAPI_ASM_SW64_BOOTPARAM_H + +#ifndef __ASSEMBLY__ + +#include + +struct boot_params { + __u64 initrd_start; /* logical address of initrd */ + __u64 initrd_size; /* size of initrd */ + __u64 dtb_start; /* logical address of dtb */ + __u64 efi_systab; /* logical address of EFI system table */ + __u64 efi_memmap; /* logical address of EFI memory map */ + __u64 efi_memmap_size; /* size of EFI memory map */ + __u64 efi_memdesc_size; /* size of an EFI memory map descriptor */ + __u64 efi_memdesc_version; /* memory descriptor version */ + __u64 cmdline; /* logical address of cmdline */ +}; +#endif + +#endif /* _UAPI_ASM_SW64_BOOTPARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/bpf_perf_event.h b/arch/sw_64/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 0000000000000000000000000000000000000000..52f6f1e555f162ef7668965386cc758125726224 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BPF_PERF_EVENT_H +#define _UAPI_ASM_SW64_BPF_PERF_EVENT_H + +#include + +typedef struct user_pt_regs bpf_user_pt_regs_t; + +#endif /* _UAPI_ASM_SW64_BPF_PERF_EVENT_H */ diff --git a/arch/sw_64/include/uapi/asm/byteorder.h b/arch/sw_64/include/uapi/asm/byteorder.h new file mode 100644 index 0000000000000000000000000000000000000000..ededdd045e96b2dc915be110f6d278a5bbc58654 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/byteorder.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_BYTEORDER_H +#define _UAPI_ASM_SW64_BYTEORDER_H + +#include + +#endif /* _UAPI_ASM_SW64_BYTEORDER_H */ diff --git a/arch/sw_64/include/uapi/asm/compiler.h b/arch/sw_64/include/uapi/asm/compiler.h new file mode 100644 index 0000000000000000000000000000000000000000..64786df0f2668734957147e7ba30dbb52feb8dd1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/compiler.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_COMPILER_H +#define _UAPI_ASM_SW64_COMPILER_H + +/* + * Herein are macros we use when describing various patterns we want to GCC. + * In all cases we can get better schedules out of the compiler if we hide + * as little as possible inside inline assembly. However, we want to be + * able to know what we'll get out before giving up inline assembly. Thus + * these tests and macros. + */ + +#define __kernel_inslb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inslh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inslh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_insll(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("insll %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_inshw(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("inshw %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlb(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlb %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_extlh(val, shift) \ +({ \ + unsigned long __kir; \ + __asm__("extlh %2, %1, %0" : "=r"(__kir) : "rI"(shift), "r"(val));\ + __kir; \ +}) + +#define __kernel_cmpgeb(a, b) \ +({ \ + unsigned long __kir; \ + __asm__("cmpgeb %r2, %1, %0" : "=r"(__kir) : "rI"(b), "rJ"(a)); \ + __kir; \ +}) + +#define __kernel_cttz(x) \ +({ \ + unsigned long __kir; \ + __asm__("cttz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctlz(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctlz %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#define __kernel_ctpop(x) \ +({ \ + unsigned long __kir; \ + __asm__("ctpop %1, %0" : "=r"(__kir) : "r"(x)); \ + __kir; \ +}) + +#endif /* _UAPI_ASM_SW64_COMPILER_H */ diff --git a/arch/sw_64/include/uapi/asm/errno.h b/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 0000000000000000000000000000000000000000..969ee99ee86c7a72c125a903e438a2373cf36c4a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_ERRNO_H +#define _UAPI_ASM_SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale NFS file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _UAPI_ASM_SW64_ERRNO_H */ diff --git a/arch/sw_64/include/uapi/asm/fcntl.h b/arch/sw_64/include/uapi/asm/fcntl.h new file mode 100644 index 0000000000000000000000000000000000000000..be2daae2cc4dc1a1f7d9a8b653ade486e7329db9 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fcntl.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FCNTL_H +#define _UAPI_ASM_SW64_FCNTL_H + +#define O_CREAT 01000 /* not fcntl */ +#define O_TRUNC 02000 /* not fcntl */ +#define O_EXCL 04000 /* not fcntl */ +#define O_NOCTTY 010000 /* not fcntl */ + +#define O_NONBLOCK 00004 +#define O_APPEND 00010 +#define O_DSYNC 040000 /* used to be O_SYNC, see below */ +#define O_DIRECTORY 0100000 /* must be a directory */ +#define O_NOFOLLOW 0200000 /* don't follow links */ +#define O_LARGEFILE 0400000 /* will be set by the kernel on every open */ +#define O_DIRECT 02000000 /* direct disk access */ +#define O_NOATIME 04000000 +#define O_CLOEXEC 010000000 /* set close_on_exec */ +/* + * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using + * the O_SYNC flag. We continue to use the existing numerical value + * for O_DSYNC semantics now, but using the correct symbolic name for it. + * This new value is used to request true Posix O_SYNC semantics. It is + * defined in this strange way to make sure applications compiled against + * new headers get at least O_DSYNC semantics on older kernels. + * + * This has the nice side-effect that we can simply test for O_DSYNC + * wherever we do not care if O_DSYNC or O_SYNC is used. + * + * Note: __O_SYNC must never be used directly. + */ +#define __O_SYNC 020000000 +#define O_SYNC (__O_SYNC|O_DSYNC) + +#define O_PATH 040000000 +#define __O_TMPFILE 0100000000 + +#define F_GETLK 7 +#define F_SETLK 8 +#define F_SETLKW 9 + +#define F_SETOWN 5 /* for sockets. */ +#define F_GETOWN 6 /* for sockets. */ +#define F_SETSIG 10 /* for sockets. */ +#define F_GETSIG 11 /* for sockets. */ + +/* for posix fcntl() and lockf() */ +#define F_RDLCK 1 +#define F_WRLCK 2 +#define F_UNLCK 8 + +/* for old implementation of bsd flock () */ +#define F_EXLCK 16 /* or 3 */ +#define F_SHLCK 32 /* or 4 */ + +#include + +#endif /* _UAPI_ASM_SW64_FCNTL_H */ diff --git a/arch/sw_64/include/uapi/asm/fpu.h b/arch/sw_64/include/uapi/asm/fpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8945816c542b07ac363b11fc4f9150313ebb23ad --- /dev/null +++ b/arch/sw_64/include/uapi/asm/fpu.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_FPU_H +#define _UAPI_ASM_SW64_FPU_H + +/* + * SW-64 floating-point control register defines: + */ +#define FPCR_DNOD (1UL << 47) /* denorm INV trap disable */ +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_DNZ (1UL << 48) /* denorms to zero */ +#else +#define FPCR_DNOE (1UL << 48) /* hardware denormal support */ +#endif +#define FPCR_INVD (1UL << 49) /* invalid op disable (opt.) */ +#define FPCR_DZED (1UL << 50) /* division by zero disable (opt.) */ +#define FPCR_OVFD (1UL << 51) /* overflow disable (optional) */ +#define FPCR_INV (1UL << 52) /* invalid operation */ +#define FPCR_DZE (1UL << 53) /* division by zero */ +#define FPCR_OVF (1UL << 54) /* overflow */ +#define FPCR_UNF (1UL << 55) /* underflow */ +#define FPCR_INE (1UL << 56) /* inexact */ +#define FPCR_IOV (1UL << 57) /* integer overflow */ +#define FPCR_UNDZ (1UL << 60) /* underflow to zero (opt.) */ +#define FPCR_UNFD (1UL << 61) /* underflow disable (opt.) */ +#define FPCR_INED (1UL << 62) /* inexact disable (opt.) */ +#define FPCR_SUM (1UL << 63) /* summary bit */ + +#define FPCR_DYN_SHIFT 58 /* first dynamic rounding mode bit */ +#define FPCR_DYN_CHOPPED (0x0UL << FPCR_DYN_SHIFT) /* towards 0 */ +#define FPCR_DYN_MINUS (0x1UL << FPCR_DYN_SHIFT) /* towards -INF */ +#define FPCR_DYN_NORMAL (0x2UL << FPCR_DYN_SHIFT) /* towards nearest */ +#define FPCR_DYN_PLUS (0x3UL << FPCR_DYN_SHIFT) /* towards +INF */ +#define FPCR_DYN_MASK (0x3UL << FPCR_DYN_SHIFT) + +#define FPCR_MASK 0xffff800000000000L + +#ifdef CONFIG_SUBARCH_C3B +#define FPCR_INIT FPCR_DYN_NORMAL +#else +#define FPCR_INIT (FPCR_DYN_NORMAL | FPCR_DNOE) +#endif + +/* status bit coming from hardware fpcr . definde by fire3 */ +#define FPCR_STATUS_INV0 (1UL << 52) +#define FPCR_STATUS_DZE0 (1UL << 53) +#define FPCR_STATUS_OVF0 (1UL << 54) +#define FPCR_STATUS_UNF0 (1UL << 55) +#define FPCR_STATUS_INE0 (1UL << 56) +#define FPCR_STATUS_OVI0 (1UL << 57) + +#define FPCR_STATUS_INV1 (1UL << 36) +#define FPCR_STATUS_DZE1 (1UL << 37) +#define FPCR_STATUS_OVF1 (1UL << 38) +#define FPCR_STATUS_UNF1 (1UL << 39) +#define FPCR_STATUS_INE1 (1UL << 40) +#define FPCR_STATUS_OVI1 (1UL << 41) + +#define FPCR_STATUS_INV2 (1UL << 20) +#define FPCR_STATUS_DZE2 (1UL << 21) +#define FPCR_STATUS_OVF2 (1UL << 22) +#define FPCR_STATUS_UNF2 (1UL << 23) +#define FPCR_STATUS_INE2 (1UL << 24) +#define FPCR_STATUS_OVI2 (1UL << 25) + +#define FPCR_STATUS_INV3 (1UL << 4) +#define FPCR_STATUS_DZE3 (1UL << 5) +#define FPCR_STATUS_OVF3 (1UL << 6) +#define FPCR_STATUS_UNF3 (1UL << 7) +#define FPCR_STATUS_INE3 (1UL << 8) +#define FPCR_STATUS_OVI3 (1UL << 9) + +#define FPCR_STATUS_MASK0 (FPCR_STATUS_INV0 | FPCR_STATUS_DZE0 | \ + FPCR_STATUS_OVF0 | FPCR_STATUS_UNF0 | \ + FPCR_STATUS_INE0 | FPCR_STATUS_OVI0) + +#define FPCR_STATUS_MASK1 (FPCR_STATUS_INV1 | FPCR_STATUS_DZE1 | \ + FPCR_STATUS_OVF1 | FPCR_STATUS_UNF1 | \ + FPCR_STATUS_INE1 | FPCR_STATUS_OVI1) + +#define FPCR_STATUS_MASK2 (FPCR_STATUS_INV2 | FPCR_STATUS_DZE2 | \ + FPCR_STATUS_OVF2 | FPCR_STATUS_UNF2 | \ + FPCR_STATUS_INE2 | FPCR_STATUS_OVI2) + +#define FPCR_STATUS_MASK3 (FPCR_STATUS_INV3 | FPCR_STATUS_DZE3 | \ + FPCR_STATUS_OVF3 | FPCR_STATUS_UNF3 | \ + FPCR_STATUS_INE3 | FPCR_STATUS_OVI3) + + +/* + * IEEE trap enables are implemented in software. These per-thread + * bits are stored in the "ieee_state" field of "struct thread_info". + * Thus, the bits are defined so as not to conflict with the + * floating-point enable bit (which is architected). + */ +#define IEEE_TRAP_ENABLE_INV (1UL << 1) /* invalid op */ +#define IEEE_TRAP_ENABLE_DZE (1UL << 2) /* division by zero */ +#define IEEE_TRAP_ENABLE_OVF (1UL << 3) /* overflow */ +#define IEEE_TRAP_ENABLE_UNF (1UL << 4) /* underflow */ +#define IEEE_TRAP_ENABLE_INE (1UL << 5) /* inexact */ +#define IEEE_TRAP_ENABLE_DNO (1UL << 6) /* denorm */ +#define IEEE_TRAP_ENABLE_MASK (IEEE_TRAP_ENABLE_INV | IEEE_TRAP_ENABLE_DZE |\ + IEEE_TRAP_ENABLE_OVF | IEEE_TRAP_ENABLE_UNF |\ + IEEE_TRAP_ENABLE_INE | IEEE_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define IEEE_MAP_DMZ (1UL << 12) /* Map denorm inputs to zero */ +#define IEEE_MAP_UMZ (1UL << 13) /* Map underflowed outputs to zero */ + +#define IEEE_MAP_MASK (IEEE_MAP_DMZ | IEEE_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define IEEE_STATUS_INV (1UL << 17) +#define IEEE_STATUS_DZE (1UL << 18) +#define IEEE_STATUS_OVF (1UL << 19) +#define IEEE_STATUS_UNF (1UL << 20) +#define IEEE_STATUS_INE (1UL << 21) +#define IEEE_STATUS_DNO (1UL << 22) + + +#define IEEE_STATUS_MASK (IEEE_STATUS_INV | IEEE_STATUS_DZE | \ + IEEE_STATUS_OVF | IEEE_STATUS_UNF | \ + IEEE_STATUS_INE | IEEE_STATUS_DNO) + +#define IEEE_SW_MASK (IEEE_TRAP_ENABLE_MASK | \ + IEEE_STATUS_MASK | IEEE_MAP_MASK) + +#define IEEE_CURRENT_RM_SHIFT 32 +#define IEEE_CURRENT_RM_MASK (3UL << IEEE_CURRENT_RM_SHIFT) + +#define IEEE_STATUS_TO_EXCSUM_SHIFT 16 + +#define IEEE_INHERIT (1UL << 63) /* inherit on thread create? */ + +/* ieee_state expand to surport simd added by fire3 */ + +#define IEEE_STATUS_INV0 (1UL << 17) +#define IEEE_STATUS_DZE0 (1UL << 18) +#define IEEE_STATUS_OVF0 (1UL << 19) +#define IEEE_STATUS_UNF0 (1UL << 20) +#define IEEE_STATUS_INE0 (1UL << 21) +#define IEEE_STATUS_DNO0 (1UL << 22) +#define IEEE_STATUS_MASK0 (IEEE_STATUS_INV0 | IEEE_STATUS_DZE0 | \ + IEEE_STATUS_OVF0 | IEEE_STATUS_UNF0 | \ + IEEE_STATUS_INE0 | IEEE_STATUS_DNO0) + +#define IEEE_STATUS0_TO_EXCSUM_SHIFT 16 + +#define IEEE_STATUS_INV1 (1UL << 23) +#define IEEE_STATUS_DZE1 (1UL << 24) +#define IEEE_STATUS_OVF1 (1UL << 25) +#define IEEE_STATUS_UNF1 (1UL << 26) +#define IEEE_STATUS_INE1 (1UL << 27) +#define IEEE_STATUS_DNO1 (1UL << 28) +#define IEEE_STATUS_MASK1 (IEEE_STATUS_INV1 | IEEE_STATUS_DZE1 | \ + IEEE_STATUS_OVF1 | IEEE_STATUS_UNF1 | \ + IEEE_STATUS_INE1 | IEEE_STATUS_DNO1) + +#define IEEE_STATUS1_TO_EXCSUM_SHIFT 22 + +#define IEEE_STATUS_INV2 (1UL << 34) +#define IEEE_STATUS_DZE2 (1UL << 35) +#define IEEE_STATUS_OVF2 (1UL << 36) +#define IEEE_STATUS_UNF2 (1UL << 37) +#define IEEE_STATUS_INE2 (1UL << 38) +#define IEEE_STATUS_DNO2 (1UL << 39) +#define IEEE_STATUS_MASK2 (IEEE_STATUS_INV2 | IEEE_STATUS_DZE2 | \ + IEEE_STATUS_OVF2 | IEEE_STATUS_UNF2 | \ + IEEE_STATUS_INE2 | IEEE_STATUS_DNO2) + +#define IEEE_STATUS2_TO_EXCSUM_SHIFT 33 + +#define IEEE_STATUS_INV3 (1UL << 40) +#define IEEE_STATUS_DZE3 (1UL << 41) +#define IEEE_STATUS_OVF3 (1UL << 42) +#define IEEE_STATUS_UNF3 (1UL << 43) +#define IEEE_STATUS_INE3 (1UL << 44) +#define IEEE_STATUS_DNO3 (1UL << 45) +#define IEEE_STATUS_MASK3 (IEEE_STATUS_INV3 | IEEE_STATUS_DZE3 | \ + IEEE_STATUS_OVF3 | IEEE_STATUS_UNF3 | \ + IEEE_STATUS_INE3 | IEEE_STATUS_DNO3) + +#define IEEE_STATUS3_TO_EXCSUM_SHIFT 39 + + +/* + * Convert the software IEEE trap enable and status bits into the + * hardware fpcr format. + * + * Digital Unix engineers receive my thanks for not defining the + * software bits identical to the hardware bits. The chip designers + * receive my thanks for making all the not-implemented fpcr bits + * RAZ forcing us to use system calls to read/write this value. + */ +static inline unsigned long +ieee_swcr_to_fpcr(unsigned long sw) +{ + unsigned long fp; + + fp = (sw & IEEE_STATUS_MASK0) << 35; + fp |= (sw & IEEE_STATUS_MASK1) << 13; + fp |= (sw & IEEE_STATUS_MASK2) >> 14; + fp |= (sw & IEEE_STATUS_MASK3) >> 36; + + fp |= (sw & IEEE_MAP_DMZ) << 36; + fp |= (sw & IEEE_STATUS_MASK0 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK1 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK2 ? FPCR_SUM : 0); + fp |= (sw & IEEE_STATUS_MASK3 ? FPCR_SUM : 0); + fp |= (~sw & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF)) << 48; + fp |= (~sw & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE)) << 57; + fp |= (sw & IEEE_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0); + fp |= (~sw & IEEE_TRAP_ENABLE_DNO) << 41; + return fp; +} + +static inline unsigned long +ieee_fpcr_to_swcr(unsigned long fp) +{ + unsigned long sw; + + sw = (fp >> 35) & IEEE_STATUS_MASK; + sw |= (fp >> 36) & IEEE_MAP_DMZ; + sw |= (~fp >> 48) & (IEEE_TRAP_ENABLE_INV + | IEEE_TRAP_ENABLE_DZE + | IEEE_TRAP_ENABLE_OVF); + sw |= (~fp >> 57) & (IEEE_TRAP_ENABLE_UNF | IEEE_TRAP_ENABLE_INE); + sw |= (fp >> 47) & IEEE_MAP_UMZ; + sw |= (~fp >> 41) & IEEE_TRAP_ENABLE_DNO; + return sw; +} +#endif /* _UAPI_ASM_SW64_FPU_H */ diff --git a/arch/sw_64/include/uapi/asm/gentrap.h b/arch/sw_64/include/uapi/asm/gentrap.h new file mode 100644 index 0000000000000000000000000000000000000000..3786b8b52add336464589167bf99296d59e74c65 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/gentrap.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_GENTRAP_H +#define _UAPI_ASM_SW64_GENTRAP_H + +/* + * Definitions for gentrap causes. They are generated by user-level + * programs and therefore should be compatible with the corresponding + * legacy definitions. + */ +#define GEN_INTOVF -1 /* integer overflow */ +#define GEN_INTDIV -2 /* integer division by zero */ +#define GEN_FLTOVF -3 /* fp overflow */ +#define GEN_FLTDIV -4 /* fp division by zero */ +#define GEN_FLTUND -5 /* fp underflow */ +#define GEN_FLTINV -6 /* invalid fp operand */ +#define GEN_FLTINE -7 /* inexact fp operand */ +#define GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define GEN_DECDIV -9 /* decimal division by zero */ +#define GEN_DECINV -10 /* invalid decimal operand */ +#define GEN_ROPRAND -11 /* reserved operand */ +#define GEN_ASSERTERR -12 /* assertion error */ +#define GEN_NULPTRERR -13 /* null pointer error */ +#define GEN_STKOVF -14 /* stack overflow */ +#define GEN_STRLENERR -15 /* string length error */ +#define GEN_SUBSTRERR -16 /* substring error */ +#define GEN_RANGERR -17 /* range error */ +#define GEN_SUBRNG -18 +#define GEN_SUBRNG1 -19 +#define GEN_SUBRNG2 -20 +#define GEN_SUBRNG3 -21 /* these report range errors for */ +#define GEN_SUBRNG4 -22 /* subscripting (indexing) at levels 0..7 */ +#define GEN_SUBRNG5 -23 +#define GEN_SUBRNG6 -24 +#define GEN_SUBRNG7 -25 + +/* the remaining codes (-26..-1023) are reserved. */ + +#endif /* _UAPI_ASM_SW64_GENTRAP_H */ diff --git a/arch/sw_64/include/uapi/asm/hmcall.h b/arch/sw_64/include/uapi/asm/hmcall.h new file mode 100644 index 0000000000000000000000000000000000000000..6867fb7b4d244ee325b1d6294d1cae7453246af2 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/hmcall.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_HMCALL_H +#define _UAPI_ASM_SW64_HMCALL_H + +/* hmcall may be used in user mode */ + +#define HMC_bpt 0x80 +#define HMC_callsys 0x83 +#define HMC_imb 0x86 +#define HMC_rdtp 0x9E +#define HMC_wrtp 0x9F +#define HMC_rdunique HMC_rdtp +#define HMC_wrunique HMC_wrtp +#define HMC_gentrap 0xAA +#define HMC_wrperfmon 0xB0 + +#endif /* _UAPI_ASM_SW64_HMCALL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctl.h b/arch/sw_64/include/uapi/asm/ioctl.h new file mode 100644 index 0000000000000000000000000000000000000000..fb5267b034fca832f44e0c0794b5d0248f0e86b8 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctl.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTL_H +#define _UAPI_ASM_SW64_IOCTL_H + +#define _IOC_SIZEBITS 13 +#define _IOC_DIRBITS 3 + +/* + * Direction bits _IOC_NONE could be 0, but legacy version gives it a bit. + * And this turns out useful to catch old ioctl numbers in header files for + * us. + */ +#define _IOC_NONE 1U +#define _IOC_READ 2U +#define _IOC_WRITE 4U + +#include + +#endif /* _UAPI_ASM_SW64_IOCTL_H */ diff --git a/arch/sw_64/include/uapi/asm/ioctls.h b/arch/sw_64/include/uapi/asm/ioctls.h new file mode 100644 index 0000000000000000000000000000000000000000..36a7fc205aa787fe3d44b2bdf51fd4df47466e4b --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ioctls.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_IOCTLS_H +#define _UAPI_ASM_SW64_IOCTLS_H + +#include + +#define FIOCLEX _IO('f', 1) +#define FIONCLEX _IO('f', 2) +#define FIOASYNC _IOW('f', 125, int) +#define FIONBIO _IOW('f', 126, int) +#define FIONREAD _IOR('f', 127, int) +#define TIOCINQ FIONREAD +#define FIOQSIZE _IOR('f', 128, loff_t) + +#define TIOCGETP _IOR('t', 8, struct sgttyb) +#define TIOCSETP _IOW('t', 9, struct sgttyb) +#define TIOCSETN _IOW('t', 10, struct sgttyb) /* TIOCSETP wo flush */ + +#define TIOCSETC _IOW('t', 17, struct tchars) +#define TIOCGETC _IOR('t', 18, struct tchars) +#define TCGETS _IOR('t', 19, struct termios) +#define TCSETS _IOW('t', 20, struct termios) +#define TCSETSW _IOW('t', 21, struct termios) +#define TCSETSF _IOW('t', 22, struct termios) + +#define TCGETA _IOR('t', 23, struct termio) +#define TCSETA _IOW('t', 24, struct termio) +#define TCSETAW _IOW('t', 25, struct termio) +#define TCSETAF _IOW('t', 28, struct termio) + +#define TCSBRK _IO('t', 29) +#define TCXONC _IO('t', 30) +#define TCFLSH _IO('t', 31) + +#define TCGETS2 _IOR('T', 42, struct termios2) +#define TCSETS2 _IOW('T', 43, struct termios2) +#define TCSETSW2 _IOW('T', 44, struct termios2) +#define TCSETSF2 _IOW('T', 45, struct termios2) + +#define TIOCSWINSZ _IOW('t', 103, struct winsize) +#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSTART _IO('t', 110) /* start output, like ^Q */ +#define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ +#define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ + +#define TIOCGLTC _IOR('t', 116, struct ltchars) +#define TIOCSLTC _IOW('t', 117, struct ltchars) +#define TIOCSPGRP _IOW('t', 118, int) +#define TIOCGPGRP _IOR('t', 119, int) + +#define TIOCEXCL 0x540C +#define TIOCNXCL 0x540D +#define TIOCSCTTY 0x540E + +#define TIOCSTI 0x5412 +#define TIOCMGET 0x5415 +#define TIOCMBIS 0x5416 +#define TIOCMBIC 0x5417 +#define TIOCMSET 0x5418 +#define TIOCM_LE 0x001 +#define TIOCM_DTR 0x002 +#define TIOCM_RTS 0x004 +#define TIOCM_ST 0x008 +#define TIOCM_SR 0x010 +#define TIOCM_CTS 0x020 +#define TIOCM_CAR 0x040 +#define TIOCM_RNG 0x080 +#define TIOCM_DSR 0x100 +#define TIOCM_CD TIOCM_CAR +#define TIOCM_RI TIOCM_RNG +#define TIOCM_OUT1 0x2000 +#define TIOCM_OUT2 0x4000 +#define TIOCM_LOOP 0x8000 + +#define TIOCGSOFTCAR 0x5419 +#define TIOCSSOFTCAR 0x541A +#define TIOCLINUX 0x541C +#define TIOCCONS 0x541D +#define TIOCGSERIAL 0x541E +#define TIOCSSERIAL 0x541F +#define TIOCPKT 0x5420 +#define TIOCPKT_DATA 0 +#define TIOCPKT_FLUSHREAD 1 +#define TIOCPKT_FLUSHWRITE 2 +#define TIOCPKT_STOP 4 +#define TIOCPKT_START 8 +#define TIOCPKT_NOSTOP 16 +#define TIOCPKT_DOSTOP 32 +#define TIOCPKT_IOCTL 64 + + +#define TIOCNOTTY 0x5422 +#define TIOCSETD 0x5423 +#define TIOCGETD 0x5424 +#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TIOCSBRK 0x5427 /* BSD compatibility */ +#define TIOCCBRK 0x5428 /* BSD compatibility */ +#define TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TIOCGRS485 _IOR('T', 0x2E, struct serial_rs485) +#define TIOCSRS485 _IOWR('T', 0x2F, struct serial_rs485) +#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ +#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */ +#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */ +#define TIOCVHANGUP 0x5437 +#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */ +#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */ +#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */ +#define TIOCGPTPEER _IO('T', 0x41) /* Safely open the slave */ +#define TIOCGISO7816 _IOR('T', 0x42, struct serial_iso7816) +#define TIOCSISO7816 _IOWR('T', 0x43, struct serial_iso7816) + +#define TIOCSERCONFIG 0x5453 +#define TIOCSERGWILD 0x5454 +#define TIOCSERSWILD 0x5455 +#define TIOCGLCKTRMIOS 0x5456 +#define TIOCSLCKTRMIOS 0x5457 +#define TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TIOCSERGETLSR 0x5459 /* Get line status register */ +/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ + +#endif /* _UAPI_ASM_SW64_IOCTLS_H */ diff --git a/arch/sw_64/include/uapi/asm/kvm.h b/arch/sw_64/include/uapi/asm/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..2253475deaa5a12ca3c14637573f79ea4baf02c7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/kvm.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_KVM_H +#define _UAPI_ASM_SW64_KVM_H + +/* + * KVM SW specific structures and definitions. + */ +#define SWVM_IRQS 256 +#define IRQ_PENDING_INTX_SHIFT 16 +#define IRQ_PENDING_MSI_VECTORS_SHIFT 17 + +enum SW64_KVM_IRQ { + SW64_KVM_IRQ_IPI = 27, + SW64_KVM_IRQ_TIMER = 9, + SW64_KVM_IRQ_KBD = 29, + SW64_KVM_IRQ_MOUSE = 30, +}; + +#define SWVM_VM_TYPE_DEFAULT 0 +#define SWVM_VM_TYPE_PHYVCPU 1 +#define __KVM_HAVE_IRQ_LINE + +#define SWVM_NUM_NUMA_MEMBANKS 1 +#define KVM_NR_IRQCHIPS 1 +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + + unsigned long r27; + unsigned long r28; + unsigned long __padding0; + unsigned long fpcr; + + unsigned long fp[124]; + /* These are saved by HMcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + + +/* + * return stack for __sw64_vcpu_run + */ +struct vcpu_run_ret_stack { + unsigned long ra; + unsigned long r0; +}; + +struct host_int_args { + unsigned long r18; + unsigned long r17; + unsigned long r16; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { +}; + +struct hcall_args { + unsigned long arg0, arg1, arg2; +}; + +struct phyvcpu_hcall_args { + unsigned long call; + struct hcall_args args; +}; + +struct kvm_debug_exit_arch { + unsigned long epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct swvm_mem_bank { + unsigned long guest_phys_addr; + unsigned long host_phys_addr; + unsigned long host_addr; + unsigned long size; +}; + +struct swvm_mem { + struct swvm_mem_bank membank[SWVM_NUM_NUMA_MEMBANKS]; +}; + +#endif /* _UAPI_ASM_SW64_KVM_H */ diff --git a/arch/sw_64/include/uapi/asm/mman.h b/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..c83c4b50662a44c5f849990355ac763d4f210329 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,90 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_MMAN_H +#define _UAPI_ASM_SW64_MMAN_H + +#include + +#define PROT_READ 0x1 /* page can be read */ +#define PROT_WRITE 0x2 /* page can be written */ +#define PROT_EXEC 0x4 /* page can be executed */ +#define PROT_SEM 0x8 /* page may be used for atomic ops */ +#define PROT_NONE 0x0 /* page can not be accessed */ +#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */ +#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */ + +#define MAP_TYPE 0x0f /* Mask for type of mapping */ +#define MAP_FIXED 0x100 /* Interpret addr exactly */ +#define MAP_ANONYMOUS 0x10 /* don't use a file */ + +/* not used by linux, may be deprecated */ +#define _MAP_HASSEMAPHORE 0x0200 +#define _MAP_INHERIT 0x0400 +#define _MAP_UNALIGNED 0x0800 + +/* These are linux-specific */ +#define MAP_GROWSDOWN 0x01000 /* stack-like segment */ +#define MAP_DENYWRITE 0x02000 /* ETXTBSY */ +#define MAP_EXECUTABLE 0x04000 /* mark it as an executable */ +#define MAP_LOCKED 0x08000 /* lock the mapping */ +#define MAP_NORESERVE 0x10000 /* don't check for reservations */ +#define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */ +#define MAP_NONBLOCK 0x40000 /* do not block on IO */ +#define MAP_STACK 0x80000 /* give out an address that is best suited for process/thread stacks */ +#define MAP_HUGETLB 0x100000 /* create a huge page mapping */ +#define MAP_FIXED_NOREPLACE 0x200000 /* MAP_FIXED which doesn't unmap underlying mapping */ + +#define MS_ASYNC 1 /* sync memory asynchronously */ +#define MS_SYNC 2 /* synchronous memory sync */ +#define MS_INVALIDATE 4 /* invalidate the caches */ + +#define MCL_CURRENT 8192 /* lock all currently mapped pages */ +#define MCL_FUTURE 16384 /* lock all additions to address space */ +#define MCL_ONFAULT 32768 /* lock all pages that are faulted in */ + +#define MLOCK_ONFAULT 0x01 /* Lock pages in range after they are faulted in, do not prefault */ + +#define MADV_NORMAL 0 /* no further special treatment */ +#define MADV_RANDOM 1 /* expect random page references */ +#define MADV_SEQUENTIAL 2 /* expect sequential page references */ +#define MADV_WILLNEED 3 /* will need these pages */ +#define MADV_SPACEAVAIL 5 /* ensure resources are available */ +#define MADV_DONTNEED 6 /* don't need these pages */ + +/* common/generic parameters */ +#define MADV_FREE 8 /* free pages only if memory pressure */ +#define MADV_REMOVE 9 /* remove these pages & resources */ +#define MADV_DONTFORK 10 /* don't inherit across fork */ +#define MADV_DOFORK 11 /* do inherit across fork */ + +#define MADV_MERGEABLE 12 /* KSM may merge identical pages */ +#define MADV_UNMERGEABLE 13 /* KSM may not merge identical pages */ + +#define MADV_HUGEPAGE 14 /* Worth backing with hugepages */ +#define MADV_NOHUGEPAGE 15 /* Not worth backing with hugepages */ + +#define MADV_DONTDUMP 16 /* Explicity exclude from the core dump, + overrides the coredump filter bits */ +#define MADV_DODUMP 17 /* Clear the MADV_NODUMP flag */ + +#define MADV_WIPEONFORK 18 /* Zero memory on fork, child only */ +#define MADV_KEEPONFORK 19 /* Undo MADV_WIPEONFORK */ + +#define MADV_COLD 20 /* deactivate these pages */ +#define MADV_PAGEOUT 21 /* reclaim these pages */ + +#define MADV_POPULATE_READ 22 /* populate (prefault) page tables readable */ +#define MADV_POPULATE_WRITE 23 /* populate (prefault) page tables writable */ + +#define MADV_DONTNEED_LOCKED 24 /* like DONTNEED, but drop locked pages too */ + +#define MADV_COLLAPSE 25 /* Synchronous hugepage collapse */ + +/* compatibility flags */ +#define MAP_FILE 0 + + +#define PKEY_DISABLE_ACCESS 0x1 +#define PKEY_DISABLE_WRITE 0x2 +#define PKEY_ACCESS_MASK (PKEY_DISABLE_ACCESS | PKEY_DISABLE_WRITE) + +#endif /* _UAPI_ASM_SW64_MMAN_H */ diff --git a/arch/sw_64/include/uapi/asm/param.h b/arch/sw_64/include/uapi/asm/param.h new file mode 100644 index 0000000000000000000000000000000000000000..d38e8202dd97e8b4867e70bb3853da9440a1a0c1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/param.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PARAM_H +#define _UAPI_ASM_SW64_PARAM_H + +#define EXEC_PAGESIZE 8192 + +#include + +#endif /* _UAPI_ASM_SW64_PARAM_H */ diff --git a/arch/sw_64/include/uapi/asm/perf_regs.h b/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..871ad4663d1dbd29cd23395b977615323c67d81e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_SW64_PERF_REGS_H +#define _UAPI_ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _UAPI_ASM_SW64_PERF_REGS_H */ diff --git a/arch/sw_64/include/uapi/asm/ptrace.h b/arch/sw_64/include/uapi/asm/ptrace.h new file mode 100644 index 0000000000000000000000000000000000000000..3fd53450e418bcedc8faf45f37cb0e0bc8dca8ad --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ptrace.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_PTRACE_H +#define _UAPI_ASM_SW64_PTRACE_H + +#include + +#ifndef __ASSEMBLY__ +/* + * User structures for general purpose, floating point and debug registers. + */ +struct user_pt_regs { + __u64 regs[31]; + __u64 pc; + __u64 pstate; +}; + +/* 256 bits aligned for simd */ +struct fpreg { + __u64 v[4] __attribute__((aligned(32))); +}; + +struct user_fpsimd_state { + struct fpreg fp[31]; + __u64 fpcr; + __u64 __reserved[3]; +}; +#endif + +/* PTRACE_ATTACH is 16 */ +/* PTRACE_DETACH is 17 */ + +#define PT_REG_BASE 0 +#define PT_REG_END 30 +#define PT_FPREG_BASE 32 +#define PT_FPREG_END 62 +#define PT_FPCR 63 +#define PT_PC 64 +#define PT_TP 65 +#define PT_UNIQUE PT_TP +#define PT_VECREG_BASE 67 +#define PT_VECREG_END 161 +#define PT_F31_V1 98 +#define PT_F31_V2 130 +#define PT_DA_MATCH 163 +#define PT_DA_MASK 164 +#define PT_DV_MATCH 165 +#define PT_DV_MASK 166 +#define PT_DC_CTL 167 +#define PT_MATCH_CTL 167 +#define PT_IA_MATCH 168 +#define PT_IA_MASK 169 +#define PT_IV_MATCH 170 +#define PT_IDA_MATCH 171 +#define PT_IDA_MASK 172 + +#endif /* _UAPI_ASM_SW64_PTRACE_H */ diff --git a/arch/sw_64/include/uapi/asm/regdef.h b/arch/sw_64/include/uapi/asm/regdef.h new file mode 100644 index 0000000000000000000000000000000000000000..7460a987c7267b85ffbe5238cf258944e7c0309c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/regdef.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_REGDEF_H +#define _UAPI_ASM_SW64_REGDEF_H + +#define v0 $0 /* function return value */ + +#define t0 $1 /* temporary registers (caller-saved) */ +#define t1 $2 +#define t2 $3 +#define t3 $4 +#define t4 $5 +#define t5 $6 +#define t6 $7 +#define t7 $8 + +#define s0 $9 /* saved-registers (callee-saved registers) */ +#define s1 $10 +#define s2 $11 +#define s3 $12 +#define s4 $13 +#define s5 $14 +#define s6 $15 +#define fp s6 /* frame-pointer (s6 in frame-less procedures) */ + +#define a0 $16 /* argument registers (caller-saved) */ +#define a1 $17 +#define a2 $18 +#define a3 $19 +#define a4 $20 +#define a5 $21 + +#define t8 $22 /* more temps (caller-saved) */ +#define t9 $23 +#define t10 $24 +#define t11 $25 +#define ra $26 /* return address register */ +#define t12 $27 + +#define pv t12 /* procedure-variable register */ +#define AT $at /* assembler temporary */ +#define gp $29 /* global pointer */ +#define sp $30 /* stack pointer */ +#define zero $31 /* reads as zero, writes are noops */ + +#endif /* _UAPI_ASM_SW64_REGDEF_H */ diff --git a/arch/sw_64/include/uapi/asm/resource.h b/arch/sw_64/include/uapi/asm/resource.h new file mode 100644 index 0000000000000000000000000000000000000000..2e1ce8f6ee64cdf82e04eed3a5a571489040bb97 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/resource.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_RESOURCE_H +#define _UAPI_ASM_SW64_RESOURCE_H + +/* + * SW-64/Linux-specific ordering of these four resource limit IDs, + * the rest comes from the generic header: + */ +#define RLIMIT_NOFILE 6 /* max number of open files */ +#define RLIMIT_AS 7 /* address space limit */ +#define RLIMIT_NPROC 8 /* max number of processes */ +#define RLIMIT_MEMLOCK 9 /* max locked-in-memory address space */ + +#include + +#endif /* _UAPI_ASM_SW64_RESOURCE_H */ diff --git a/arch/sw_64/include/uapi/asm/setup.h b/arch/sw_64/include/uapi/asm/setup.h new file mode 100644 index 0000000000000000000000000000000000000000..e6cca45250491ca64bfc6a6c473da19f01984bb9 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/setup.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SETUP_H +#define _UAPI_ASM_SW64_SETUP_H + +#define COMMAND_LINE_SIZE 2048 + +#endif /* _UAPI_ASM_SW64_SETUP_H */ diff --git a/arch/sw_64/include/uapi/asm/sigcontext.h b/arch/sw_64/include/uapi/asm/sigcontext.h new file mode 100644 index 0000000000000000000000000000000000000000..08a0814703830370efae670d59f4262b7da50dbd --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sigcontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGCONTEXT_H +#define _UAPI_ASM_SW64_SIGCONTEXT_H + +/* + * Signal context structure + * + * The context is saved before a signal handler is invoked, and it is + * restored by sys_sigreturn / sys_rt_sigreturn. + */ +struct sigcontext { + long sc_onstack; + long sc_mask; + long sc_pc; + long sc_ps; + long sc_regs[32]; + long sc_ownedfp; + long sc_fpregs[128]; /* SIMD-FP */ + unsigned long sc_fpcr; + /* TODO: Following are unused, to be removed and synced with libc */ + unsigned long sc_fp_control; + unsigned long sc_reserved1, sc_reserved2; + unsigned long sc_ssize; + char *sc_sbase; + unsigned long sc_traparg_a0; + unsigned long sc_traparg_a1; + unsigned long sc_traparg_a2; + unsigned long sc_fp_trap_pc; + unsigned long sc_fp_trigger_sum; + unsigned long sc_fp_trigger_inst; +}; + + +#endif /* _UAPI_ASM_SW64_SIGCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/siginfo.h b/arch/sw_64/include/uapi/asm/siginfo.h new file mode 100644 index 0000000000000000000000000000000000000000..f47fb917c9b2858ec3fbc9fd01517daf9edf1864 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/siginfo.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGINFO_H +#define _UAPI_ASM_SW64_SIGINFO_H + +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) + +#include + + +#endif /* _UAPI_ASM_SW64_SIGINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/signal.h b/arch/sw_64/include/uapi/asm/signal.h new file mode 100644 index 0000000000000000000000000000000000000000..0d7a935fe37c7827866f467c3e205e8d7b2a890a --- /dev/null +++ b/arch/sw_64/include/uapi/asm/signal.h @@ -0,0 +1,119 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SIGNAL_H +#define _UAPI_ASM_SW64_SIGNAL_H + +#include + +/* Avoid too many header ordering problems. */ +struct siginfo; + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +#define NSIG 32 +typedef unsigned long sigset_t; + +#endif /* __KERNEL__ */ + + +/* + * Linux/sw64 different signal numbers that Linux/i386. + */ +#define SIGHUP 1 +#define SIGINT 2 +#define SIGQUIT 3 +#define SIGILL 4 +#define SIGTRAP 5 +#define SIGABRT 6 +#define SIGEMT 7 +#define SIGFPE 8 +#define SIGKILL 9 +#define SIGBUS 10 +#define SIGSEGV 11 +#define SIGSYS 12 +#define SIGPIPE 13 +#define SIGALRM 14 +#define SIGTERM 15 +#define SIGURG 16 +#define SIGSTOP 17 +#define SIGTSTP 18 +#define SIGCONT 19 +#define SIGCHLD 20 +#define SIGTTIN 21 +#define SIGTTOU 22 +#define SIGIO 23 +#define SIGXCPU 24 +#define SIGXFSZ 25 +#define SIGVTALRM 26 +#define SIGPROF 27 +#define SIGWINCH 28 +#define SIGINFO 29 +#define SIGUSR1 30 +#define SIGUSR2 31 + +#define SIGPOLL SIGIO +#define SIGPWR SIGINFO +#define SIGIOT SIGABRT + +/* These should not be considered constants from userland. */ +#define SIGRTMIN 32 +#define SIGRTMAX _NSIG + +/* + * SA_FLAGS values: + * + * SA_ONSTACK indicates that a registered stack_t will be used. + * SA_RESTART flag to get restarting signals (which were the default long ago) + * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop. + * SA_RESETHAND clears the handler when the signal is delivered. + * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies. + * SA_NODEFER prevents the current signal from being masked in the handler. + * + * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single + * Unix names RESETHAND and NODEFER respectively. + */ + +#define SA_ONSTACK 0x00000001 +#define SA_RESTART 0x00000002 +#define SA_NOCLDSTOP 0x00000004 +#define SA_NODEFER 0x00000008 +#define SA_RESETHAND 0x00000010 +#define SA_NOCLDWAIT 0x00000020 +#define SA_SIGINFO 0x00000040 + +#define SA_ONESHOT SA_RESETHAND +#define SA_NOMASK SA_NODEFER + +#define MINSIGSTKSZ 4096 +#define SIGSTKSZ 16384 + +#define SIG_BLOCK 1 /* for blocking signals */ +#define SIG_UNBLOCK 2 /* for unblocking signals */ +#define SIG_SETMASK 3 /* for setting the signal mask */ + +#include + +#ifndef __KERNEL__ +/* Here we must cater to libcs that poke about in kernel headers. */ + +struct sigaction { + union { + __sighandler_t _sa_handler; + void (*_sa_sigaction)(int sig, struct siginfo *info, void *ucontext); + } _u; + sigset_t sa_mask; + int sa_flags; +}; + +#define sa_handler _u._sa_handler +#define sa_sigaction _u._sa_sigaction + +#endif /* __KERNEL__ */ + +typedef struct sigaltstack { + void __user *ss_sp; + int ss_flags; + size_t ss_size; +} stack_t; + +#endif /* _UAPI_ASM_SW64_SIGNAL_H */ diff --git a/arch/sw_64/include/uapi/asm/socket.h b/arch/sw_64/include/uapi/asm/socket.h new file mode 100644 index 0000000000000000000000000000000000000000..1094d11fff5b7c7112935bc0f93353e7490c3dc1 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/socket.h @@ -0,0 +1,161 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKET_H +#define _UAPI_ASM_SW64_SOCKET_H + +#include +#include + +/* For setsockopt(2) */ +/* + * Note: we only bother about making the SOL_SOCKET options + * same as legacy, as that's all that "normal" programs are + * likely to set. We don't necessarily want to be binary + * compatible with _everything_. + */ +#define SOL_SOCKET 0xffff + +#define SO_DEBUG 0x0001 +#define SO_REUSEADDR 0x0004 +#define SO_KEEPALIVE 0x0008 +#define SO_DONTROUTE 0x0010 +#define SO_BROADCAST 0x0020 +#define SO_LINGER 0x0080 +#define SO_OOBINLINE 0x0100 +#define SO_REUSEPORT 0x0200 + +#define SO_TYPE 0x1008 +#define SO_ERROR 0x1007 +#define SO_SNDBUF 0x1001 +#define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b +#define SO_RCVLOWAT 0x1010 +#define SO_SNDLOWAT 0x1011 +#define SO_RCVTIMEO_OLD 0x1012 +#define SO_SNDTIMEO_OLD 0x1013 +#define SO_ACCEPTCONN 0x1014 +#define SO_PROTOCOL 0x1028 +#define SO_DOMAIN 0x1029 + +/* linux-specific, might as well be the same as on i386 */ +#define SO_NO_CHECK 11 +#define SO_PRIORITY 12 +#define SO_BSDCOMPAT 14 + +#define SO_PASSCRED 17 +#define SO_PEERCRED 18 +#define SO_BINDTODEVICE 25 + +/* Socket filtering */ +#define SO_ATTACH_FILTER 26 +#define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER + +#define SO_PEERNAME 28 + +#define SO_PEERSEC 30 +#define SO_PASSSEC 34 + +/* Security levels - as per NRL IPv6 - don't actually do anything */ +#define SO_SECURITY_AUTHENTICATION 19 +#define SO_SECURITY_ENCRYPTION_TRANSPORT 20 +#define SO_SECURITY_ENCRYPTION_NETWORK 21 + +#define SO_MARK 36 + +#define SO_RXQ_OVFL 40 + +#define SO_WIFI_STATUS 41 +#define SCM_WIFI_STATUS SO_WIFI_STATUS +#define SO_PEEK_OFF 42 + +/* Instruct lower device to use last 4-bytes of skb data as FCS */ +#define SO_NOFCS 43 + +#define SO_LOCK_FILTER 44 +#define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 +#define SO_BPF_EXTENSIONS 48 +#define SO_INCOMING_CPU 49 +#define SO_ATTACH_BPF 50 +#define SO_DETACH_BPF SO_DETACH_FILTER + +#define SO_ATTACH_REUSEPORT_CBPF 51 +#define SO_ATTACH_REUSEPORT_EBPF 52 + +#define SO_CNX_ADVICE 53 + +#define SCM_TIMESTAMPING_OPT_STATS 54 + +#define SO_MEMINFO 55 + +#define SO_INCOMING_NAPI_ID 56 + +#define SO_COOKIE 57 + +#define SCM_TIMESTAMPING_PKTINFO 58 + +#define SO_PEERGROUPS 59 + +#define SO_ZEROCOPY 60 + +#define SO_TXTIME 61 +#define SCM_TXTIME SO_TXTIME + +#define SO_BINDTOIFINDEX 62 + +#define SO_TIMESTAMP_OLD 29 +#define SO_TIMESTAMPNS_OLD 35 +#define SO_TIMESTAMPING_OLD 37 + +#define SO_TIMESTAMP_NEW 63 +#define SO_TIMESTAMPNS_NEW 64 +#define SO_TIMESTAMPING_NEW 65 + +#define SO_RCVTIMEO_NEW 66 +#define SO_SNDTIMEO_NEW 67 + +#define SO_DETACH_REUSEPORT_BPF 68 + +#define SO_PREFER_BUSY_POLL 69 +#define SO_BUSY_POLL_BUDGET 70 + +#define SO_NETNS_COOKIE 71 + +#define SO_BUF_LOCK 72 + +#define SO_RESERVE_MEM 73 + +#define SO_TXREHASH 74 + +#define SO_RCVMARK 75 + +#define SO_PASSPIDFD 76 +#define SO_PEERPIDFD 77 + +#if !defined(__KERNEL__) + +#if __BITS_PER_LONG == 64 +#define SO_TIMESTAMP SO_TIMESTAMP_OLD +#define SO_TIMESTAMPNS SO_TIMESTAMPNS_OLD +#define SO_TIMESTAMPING SO_TIMESTAMPING_OLD + +#define SO_RCVTIMEO SO_RCVTIMEO_OLD +#define SO_SNDTIMEO SO_SNDTIMEO_OLD +#else +#define SO_TIMESTAMP (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMP_OLD : SO_TIMESTAMP_NEW) +#define SO_TIMESTAMPNS (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPNS_OLD : SO_TIMESTAMPNS_NEW) +#define SO_TIMESTAMPING (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_TIMESTAMPING_OLD : SO_TIMESTAMPING_NEW) + +#define SO_RCVTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_RCVTIMEO_OLD : SO_RCVTIMEO_NEW) +#define SO_SNDTIMEO (sizeof(time_t) == sizeof(__kernel_long_t) ? SO_SNDTIMEO_OLD : SO_SNDTIMEO_NEW) +#endif + +#define SCM_TIMESTAMP SO_TIMESTAMP +#define SCM_TIMESTAMPNS SO_TIMESTAMPNS +#define SCM_TIMESTAMPING SO_TIMESTAMPING + +#endif + +#endif /* _UAPI_ASM_SW64_SOCKET_H */ diff --git a/arch/sw_64/include/uapi/asm/sockios.h b/arch/sw_64/include/uapi/asm/sockios.h new file mode 100644 index 0000000000000000000000000000000000000000..88e89dcf8300ea66394003769c351ed91c14cd55 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sockios.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SOCKIOS_H +#define _UAPI_ASM_SW64_SOCKIOS_H + +/* Socket-level I/O control calls. */ + +#define FIOGETOWN _IOR('f', 123, int) +#define FIOSETOWN _IOW('f', 124, int) + +#define SIOCATMARK _IOR('s', 7, int) +#define SIOCSPGRP _IOW('s', 8, pid_t) +#define SIOCGPGRP _IOR('s', 9, pid_t) + +#define SIOCGSTAMP_OLD 0x8906 /* Get stamp (timeval) */ +#define SIOCGSTAMPNS_OLD 0x8907 /* Get stamp (timespec) */ + +#endif /* _UAPI_ASM_SW64_SOCKIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/stat.h b/arch/sw_64/include/uapi/asm/stat.h new file mode 100644 index 0000000000000000000000000000000000000000..677a75f1cf5bfc05b81034278ebac4b25442f2d7 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/stat.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_STAT_H +#define _UAPI_ASM_SW64_STAT_H + +struct stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + long st_size; + unsigned long st_atime; + unsigned long st_mtime; + unsigned long st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +/* The stat64 structure increases the size of dev_t, blkcnt_t, adds + * nanosecond resolution times, and padding for expansion. + */ + +struct stat64 { + unsigned long st_dev; + unsigned long st_ino; + unsigned long st_rdev; + long st_size; + unsigned long st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + unsigned long st_atime; + unsigned long st_atime_nsec; + unsigned long st_mtime; + unsigned long st_mtime_nsec; + unsigned long st_ctime; + unsigned long st_ctime_nsec; + long __unused[3]; +}; + +#endif /* _UAPI_ASM_SW64_STAT_H */ diff --git a/arch/sw_64/include/uapi/asm/swab.h b/arch/sw_64/include/uapi/asm/swab.h new file mode 100644 index 0000000000000000000000000000000000000000..275661b346ac202afed612cee1f75bc1a0b6209e --- /dev/null +++ b/arch/sw_64/include/uapi/asm/swab.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_SWAB_H +#define _UAPI_ASM_SW64_SWAB_H + +#include +#include +#include + +#ifdef __GNUC__ + +static inline __attribute_const__ __u32 __arch_swab32(__u32 x) +{ + /* + * Unfortunately, we can't use the 6 instruction sequence + * on sw64 since the latency of the UNPKBW is 3, which is + * pretty hard to hide. Just in case a future implementation + * has a lower latency, here's the sequence (also by Mike Burrows) + * + * UNPKBW a0, v0 v0: 00AA00BB00CC00DD + * SLL v0, 24, a0 a0: BB00CC00DD000000 + * BIS v0, a0, a0 a0: BBAACCBBDDCC00DD + * EXTWL a0, 6, v0 v0: 000000000000BBAA + * ZAP a0, 0xf3, a0 a0: 00000000DDCC0000 + * ADDL a0, v0, v0 v0: ssssssssDDCCBBAA + */ + + __u64 t0, t1, t2, t3; + + t0 = __kernel_inshw(x, 7); /* t0 : 0000000000AABBCC */ + t1 = __kernel_inslh(x, 3); /* t1 : 000000CCDD000000 */ + t1 |= t0; /* t1 : 000000CCDDAABBCC */ + t2 = t1 >> 16; /* t2 : 0000000000CCDDAA */ + t0 = t1 & 0xFF00FF00; /* t0 : 00000000DD00BB00 */ + t3 = t2 & 0x00FF00FF; /* t3 : 0000000000CC00AA */ + t1 = t0 + t3; /* t1 : ssssssssDDCCBBAA */ + + return t1; +} +#define __arch_swab32 __arch_swab32 + +#endif /* __GNUC__ */ + +#endif /* _UAPI_ASM_SW64_SWAB_H */ diff --git a/arch/sw_64/include/uapi/asm/sysinfo.h b/arch/sw_64/include/uapi/asm/sysinfo.h new file mode 100644 index 0000000000000000000000000000000000000000..667405c3447cd841cda75d7e8cf624166db70d2c --- /dev/null +++ b/arch/sw_64/include/uapi/asm/sysinfo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * include/asm/sysinfo.h + */ + +#ifndef _UAPI_ASM_SW64_SYSINFO_H +#define _UAPI_ASM_SW64_SYSINFO_H + +#define GSI_IEEE_FP_CONTROL 45 + +#define SSI_IEEE_FP_CONTROL 14 +#define SSI_IEEE_RAISE_EXCEPTION 1001 /* linux specific */ + +#define UAC_BITMASK 7 +#define UAC_NOPRINT 1 +#define UAC_NOFIX 2 +#define UAC_SIGBUS 4 +#define PR_NOFIX 4 /* do not fix up unaligned accesses */ + +#endif /* _UAPI_ASM_SW64_SYSINFO_H */ diff --git a/arch/sw_64/include/uapi/asm/termbits.h b/arch/sw_64/include/uapi/asm/termbits.h new file mode 100644 index 0000000000000000000000000000000000000000..a71aaf33c26cc9f046e65e97d694ab0ef6ee9b06 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termbits.h @@ -0,0 +1,167 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMBITS_H +#define _UAPI_ASM_SW64_TERMBITS_H + +#include + +typedef unsigned int tcflag_t; + +/* + * termios type and macro definitions. Be careful about adding stuff + * to this file since it's used in GNU libc and there are strict rules + * concerning namespace pollution. + */ + +#define NCCS 19 +struct termios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has identical termios and termios2 */ + +struct termios2 { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* SW64 has matching termios and ktermios */ + +struct ktermios { + tcflag_t c_iflag; /* input mode flags */ + tcflag_t c_oflag; /* output mode flags */ + tcflag_t c_cflag; /* control mode flags */ + tcflag_t c_lflag; /* local mode flags */ + cc_t c_cc[NCCS]; /* control characters */ + cc_t c_line; /* line discipline (== c_cc[19]) */ + speed_t c_ispeed; /* input speed */ + speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define VEOF 0 +#define VEOL 1 +#define VEOL2 2 +#define VERASE 3 +#define VWERASE 4 +#define VKILL 5 +#define VREPRINT 6 +#define VSWTC 7 +#define VINTR 8 +#define VQUIT 9 +#define VSUSP 10 +#define VSTART 12 +#define VSTOP 13 +#define VLNEXT 14 +#define VDISCARD 15 +#define VMIN 16 +#define VTIME 17 + +/* c_iflag bits */ +#define IXON 0x0200 +#define IXOFF 0x0400 +#define IUCLC 0x1000 +#define IMAXBEL 0x2000 +#define IUTF8 0x4000 + +/* c_oflag bits */ +#define ONLCR 0x00002 +#define OLCUC 0x00004 +#define NLDLY 0x00300 +#define NL0 0x00000 +#define NL1 0x00100 +#define NL2 0x00200 +#define NL3 0x00300 +#define TABDLY 0x00c00 +#define TAB0 0x00000 +#define TAB1 0x00400 +#define TAB2 0x00800 +#define TAB3 0x00c00 +#define CRDLY 0x03000 +#define CR0 0x00000 +#define CR1 0x01000 +#define CR2 0x02000 +#define CR3 0x03000 +#define FFDLY 0x04000 +#define FF0 0x00000 +#define FF1 0x04000 +#define BSDLY 0x08000 +#define BS0 0x00000 +#define BS1 0x08000 +#define VTDLY 0x10000 +#define VT0 0x00000 +#define VT1 0x10000 +/* + * Should be equivalent to TAB3, see description of TAB3 in + * POSIX.1-2008, Ch. 11.2.3 "Output Modes" + */ +#define XTABS TAB3 + +/* c_cflag bit meaning */ +#define CBAUD 0x0000001f +#define CBAUDEX 0x00000000 +#define BOTHER 0x0000001f +#define B57600 0x00000010 +#define B115200 0x00000011 +#define B230400 0x00000012 +#define B460800 0x00000013 +#define B500000 0x00000014 +#define B576000 0x00000015 +#define B921600 0x00000016 +#define B1000000 0x00000017 +#define B1152000 0x00000018 +#define B1500000 0x00000019 +#define B2000000 0x0000001a +#define B2500000 0x0000001b +#define B3000000 0x0000001c +#define B3500000 0x0000001d +#define B4000000 0x0000001e +#define CSIZE 0x00000300 +#define CS5 0x00000000 +#define CS6 0x00000100 +#define CS7 0x00000200 +#define CS8 0x00000300 +#define CSTOPB 0x00000400 +#define CREAD 0x00000800 +#define PARENB 0x00001000 +#define PARODD 0x00002000 +#define HUPCL 0x00004000 +#define CLOCAL 0x00008000 +#define CIBAUD 0x001f0000 + +/* c_lflag bits */ +#define ISIG 0x00000080 +#define ICANON 0x00000100 +#define XCASE 0x00004000 +#define ECHO 0x00000008 +#define ECHOE 0x00000002 +#define ECHOK 0x00000004 +#define ECHONL 0x00000010 +#define NOFLSH 0x80000000 +#define TOSTOP 0x00400000 +#define ECHOCTL 0x00000040 +#define ECHOPRT 0x00000020 +#define ECHOKE 0x00000001 +#define FLUSHO 0x00800000 +#define PENDIN 0x20000000 +#define IEXTEN 0x00000400 +#define EXTPROC 0x10000000 + +/* Values for the OPTIONAL_ACTIONS argument to `tcsetattr'. */ +#define TCSANOW 0 +#define TCSADRAIN 1 +#define TCSAFLUSH 2 + +#endif /* _UAPI_ASM_SW64_TERMBITS_H */ diff --git a/arch/sw_64/include/uapi/asm/termios.h b/arch/sw_64/include/uapi/asm/termios.h new file mode 100644 index 0000000000000000000000000000000000000000..62f4b40551b241ff1ca7fd9c1c25e65415b976c4 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/termios.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_TERMIOS_H +#define _UAPI_ASM_SW64_TERMIOS_H + +#include +#include + +struct sgttyb { + char sg_ispeed; + char sg_ospeed; + char sg_erase; + char sg_kill; + short sg_flags; +}; + +struct tchars { + char t_intrc; + char t_quitc; + char t_startc; + char t_stopc; + char t_eofc; + char t_brkc; +}; + +struct ltchars { + char t_suspc; + char t_dsuspc; + char t_rprntc; + char t_flushc; + char t_werasc; + char t_lnextc; +}; + +struct winsize { + unsigned short ws_row; + unsigned short ws_col; + unsigned short ws_xpixel; + unsigned short ws_ypixel; +}; + +#define NCC 8 +struct termio { + unsigned short c_iflag; /* input mode flags */ + unsigned short c_oflag; /* output mode flags */ + unsigned short c_cflag; /* control mode flags */ + unsigned short c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[NCC]; /* control characters */ +}; + +/* + * c_cc characters in the termio structure. Oh, how I love being + * backwardly compatible. Notice that character 4 and 5 are + * interpreted differently depending on whether ICANON is set in + * c_lflag. If it's set, they are used as _VEOF and _VEOL, otherwise + * as _VMIN and V_TIME. This is for compatibility with sysV)... + */ +#define _VINTR 0 +#define _VQUIT 1 +#define _VERASE 2 +#define _VKILL 3 +#define _VEOF 4 +#define _VMIN 4 +#define _VEOL 5 +#define _VTIME 5 +#define _VEOL2 6 +#define _VSWTC 7 + + +#endif /* _UAPI_ASM_SW64_TERMIOS_H */ diff --git a/arch/sw_64/include/uapi/asm/ucontext.h b/arch/sw_64/include/uapi/asm/ucontext.h new file mode 100644 index 0000000000000000000000000000000000000000..c5d6e24e3e5fb0d8e8b9d9db8ab4c7e1a53f4c17 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/ucontext.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UCONTEXT_H +#define _UAPI_ASM_SW64_UCONTEXT_H + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + old_sigset_t uc_old_sigmask; + stack_t uc_stack; + struct sigcontext uc_mcontext; + sigset_t uc_sigmask; /* mask last for extensibility */ +}; + +#endif /* _UAPI_ASM_SW64_UCONTEXT_H */ diff --git a/arch/sw_64/include/uapi/asm/unistd.h b/arch/sw_64/include/uapi/asm/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..be844b2be9d5591b183e6fc54ae2d682a83c89d0 --- /dev/null +++ b/arch/sw_64/include/uapi/asm/unistd.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI_ASM_SW64_UNISTD_H +#define _UAPI_ASM_SW64_UNISTD_H + +/* + * These are traditionally the names uses for generic system calls + */ +#define __NR_umount __NR_umount2 + +#include + +#endif /* _UAPI_ASM_SW64_UNISTD_H */ diff --git a/arch/sw_64/kernel/.gitignore b/arch/sw_64/kernel/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..46c9537c5551f777409b6933590304ab07cad4f5 --- /dev/null +++ b/arch/sw_64/kernel/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +vmlinux.lds diff --git a/arch/sw_64/kernel/Makefile b/arch/sw_64/kernel/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..abf27ad19a942ba4444c1ec15cda7779f2c76ca6 --- /dev/null +++ b/arch/sw_64/kernel/Makefile @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +extra-y := vmlinux.lds +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Wno-sign-compare + +ifdef CONFIG_FTRACE +CFLAGS_REMOVE_ftrace.o = -pg +CFLAGS_REMOVE_insn.o = -pg +CFLAGS_REMOVE_printk.o = -pg +endif + +obj-y := entry.o fpu.o traps.o process.o sys_sw64.o irq.o \ + irq_sw64.o signal.o setup.o ptrace.o time.o \ + systbls.o dup_print.o chip_setup.o \ + insn.o early_init.o topology.o cacheinfo.o \ + vdso.o vdso/ hmcall.o stacktrace.o idle.o reset.o \ + head.o termios.o + +obj-$(CONFIG_SUBARCH_C3B) += tc.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_PM) += pm.o +obj-$(CONFIG_SUSPEND) += suspend_asm.o suspend.o +obj-$(CONFIG_PERF_EVENTS) += perf_event.o +obj-$(CONFIG_HIBERNATION) += hibernate_asm.o hibernate.o +obj-$(CONFIG_AUDIT) += audit.o +obj-$(CONFIG_RELOCATABLE) += relocate.o +obj-$(CONFIG_DEBUG_FS) += segvdbg.o unaligned.o +obj-$(CONFIG_JUMP_LABEL) += jump_label.o +obj-$(CONFIG_DEBUG_MATCH) += match.o + +ifndef CONFIG_PCI +obj-y += pci-noop.o +endif + +# Core logic support +obj-$(CONFIG_SW64_CPUAUTOPLUG) += cpuautoplug.o + +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o +obj-$(CONFIG_KPROBES) += kprobes/ +obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o diff --git a/arch/sw_64/kernel/acpi.c b/arch/sw_64/kernel/acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..9779d4bdea0d4ca8946bf03aa2e1125b7c7b6a82 --- /dev/null +++ b/arch/sw_64/kernel/acpi.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +#include + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +#include +#endif + +int acpi_disabled = 1; +EXPORT_SYMBOL(acpi_disabled); + +int acpi_noirq = 1; /* skip ACPI IRQ initialization */ +int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */ +EXPORT_SYMBOL(acpi_pci_disabled); + +static bool param_acpi_on __initdata; +static bool param_acpi_off __initdata; + +int acpi_strict; +u64 arch_acpi_wakeup_start; +u64 acpi_saved_sp_s3; + +#define MAX_LOCAL_APIC 256 + +#define PREFIX "ACPI: " +/* + * The default interrupt routing model is PIC (8259). This gets + * overridden if IOAPICs are enumerated (below). + */ +enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_IOSAPIC; +void __iomem *__init __acpi_map_table(unsigned long phys, unsigned long size) +{ + if (!phys || !size) + return NULL; + + return early_ioremap(phys, size); +} +void __init __acpi_unmap_table(void __iomem *map, unsigned long size) +{ + if (!map || !size) + return; + + early_iounmap(map, size); +} +/* + * Following __acpi_xx functions should be implemented for sepecific cpu. + */ +int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) +{ + if (irqp != NULL) + *irqp = acpi_register_gsi(NULL, gsi, -1, -1); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); + +int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi) +{ + if (gsi) + *gsi = isa_irq; + + return 0; +} + +int (*acpi_suspend_lowlevel)(void); + +/* + * success: return IRQ number (>=0) + * failure: return < 0 + */ +static struct irq_domain *irq_default_domain; +int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) +{ + u32 irq; + + irq = irq_find_mapping(irq_default_domain, gsi); + + return irq; +} +EXPORT_SYMBOL_GPL(acpi_register_gsi); + +void acpi_unregister_gsi(u32 gsi) +{ + +} +EXPORT_SYMBOL_GPL(acpi_unregister_gsi); + +/* + * ACPI based hotplug support for CPU + */ +#ifdef CONFIG_ACPI_HOTPLUG_CPU +/* wrapper to silence section mismatch warning */ +int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_map_lsapic); + +int acpi_unmap_lsapic(int cpu) +{ + return 0; +} +EXPORT_SYMBOL(acpi_unmap_lsapic); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +u8 acpi_checksum(u8 *table, u32 length) +{ + u8 ret = 0; + + while (length--) { + ret += *table; + table++; + } + return -ret; +} + +static int __init parse_acpi(char *arg) +{ + if (!arg) + return -EINVAL; + + /* disable both ACPI table parsing and interpreter */ + if (strcmp(arg, "off") == 0) + param_acpi_off = true; + else if (strcmp(arg, "on") == 0) /* prefer ACPI over device tree */ + param_acpi_on = true; + else + return -EINVAL; /* Core will printk when we return error. */ + + return 0; +} +early_param("acpi", parse_acpi); + +/* + * __acpi_acquire_global_lock + * will always return -1 indicating owning the lock. + * + * __acpi_release_global_lock will always return 0 indicating + * no acquring request pending. + */ +int __acpi_acquire_global_lock(unsigned int *lock) +{ + return -1; +} + +int __acpi_release_global_lock(unsigned int *lock) +{ + return 0; +} + +#ifdef CONFIG_ACPI_NUMA +static int rcid_to_cpu(int physical_id) +{ + int i; + + for (i = 0; i < NR_CPUS; ++i) { + if (__cpu_to_rcid[i] == physical_id) + return i; + } + + /* physical id not found */ + return -1; +} + +/* Callback for Proximity Domain -> CPUID mapping */ +void __init +acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) +{ + int pxm, node; + int cpu; // logical core id + + if (srat_disabled()) + return; + if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) { + bad_srat(); + return; + } + if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0) + return; + pxm = pa->proximity_domain_lo; + if (acpi_srat_revision >= 2) { + pxm |= (pa->proximity_domain_hi[0] << 8); + pxm |= (pa->proximity_domain_hi[1] << 16); + pxm |= (pa->proximity_domain_hi[2] << 24); + } + + node = acpi_map_pxm_to_node(pxm); + if (node < 0) { + pr_err("SRAT: Too many proximity domains %x\n", pxm); + bad_srat(); + return; + } + + if (pa->apic_id >= CONFIG_NR_CPUS) { + pr_err("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n", pxm, pa->apic_id, node); + return; + } + + /* Record the mapping from logical core id to node id */ + cpu = rcid_to_cpu(pa->apic_id); + if (cpu < 0) { + pr_err("SRAT: Can not find the logical id for physical Core 0x%02x\n", pa->apic_id); + return; + } + + early_map_cpu_to_node(cpu, node); + + node_set(node, numa_nodes_parsed); + pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static inline int save_add_info(void) { return 1; } +#else +static inline int save_add_info(void) { return 0; } +#endif + +#endif + +void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ +} + +#ifdef CONFIG_ACPI_HOTPLUG_CPU +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +{ +#ifdef CONFIG_ACPI_NUMA + int nid; + + nid = acpi_get_node(handle); + if (nid != NUMA_NO_NODE) { + set_cpuid_to_node(cpu, nid); + node_set(nid, numa_nodes_parsed); + } +#endif + return 0; +} + +int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, + int *pcpu) +{ + int cpu; + struct acpi_madt_local_apic *processor; + + processor = kzalloc(sizeof(struct acpi_madt_local_apic), GFP_KERNEL); + processor->id = physid; + processor->processor_id = acpi_id; + processor->lapic_flags = ACPI_MADT_ENABLED; + + cpu = set_processor_mask(processor); + if (cpu < 0) { + pr_info(PREFIX "Unable to map lapic to logical cpu number\n"); + return cpu; + } + + acpi_map_cpu2node(handle, cpu, physid); + + *pcpu = cpu; + return 0; +} +EXPORT_SYMBOL(acpi_map_cpu); + +int acpi_unmap_cpu(int cpu) +{ +#ifdef CONFIG_ACPI_NUMA + set_cpuid_to_node(cpu, NUMA_NO_NODE); +#endif + set_cpu_present(cpu, false); + num_processors--; + + pr_info("cpu%d hot remove!\n", cpu); + + return 0; +} +EXPORT_SYMBOL(acpi_unmap_cpu); +#endif /* CONFIG_ACPI_HOTPLUG_CPU */ + +void __init acpi_boot_table_init(void) +{ + /** + * ACPI is disabled by default. + * ACPI is only enabled when firmware passes ACPI table + * and sets boot parameter "acpi=on". + */ + if (param_acpi_on) + enable_acpi(); + + /* + * If acpi_disabled, bail out + */ + if (!acpi_disabled) { + pr_warn("Currently, ACPI is an experimental feature!\n"); + if (acpi_table_init()) { + pr_err("Failed to init ACPI tables\n"); + disable_acpi(); + } else + pr_info("Successfully parsed ACPI table\n"); + } +} diff --git a/arch/sw_64/kernel/asm-offsets.c b/arch/sw_64/kernel/asm-offsets.c new file mode 100644 index 0000000000000000000000000000000000000000..41310a8a7af12a6f1a4fabb8ec52a396a7ef6403 --- /dev/null +++ b/arch/sw_64/kernel/asm-offsets.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Generate definitions needed by assembly language modules. + * This code generates raw asm output which is post-processed to extract + * and format the required data. + */ + +#define GENERATING_ASM_OFFSETS /* asm/smp.h */ +#include +#include +#include +#include + +#include +#include + +#include "traps.c" +#include "signal.c" + +void foo(void) +{ + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + BLANK(); + + DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked)); + DEFINE(TASK_CRED, offsetof(struct task_struct, cred)); + DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent)); + DEFINE(TASK_GROUP_LEADER, offsetof(struct task_struct, group_leader)); + DEFINE(TASK_TGID, offsetof(struct task_struct, tgid)); + DEFINE(TASK_STACK, offsetof(struct task_struct, stack)); +#ifdef CONFIG_SMP + DEFINE(TASK_CPU, offsetof(struct task_struct, thread_info.cpu)); +#endif + BLANK(); + + OFFSET(PSTATE_REGS, processor_state, regs); + OFFSET(PSTATE_FPREGS, processor_state, fpregs); + OFFSET(PSTATE_FPCR, processor_state, fpcr); + OFFSET(PSTATE_KTP, processor_state, ktp); +#ifdef CONFIG_HIBERNATION + OFFSET(PSTATE_SP, processor_state, sp); +#endif + OFFSET(PBE_ADDR, pbe, address); + OFFSET(PBE_ORIG_ADDR, pbe, orig_address); + OFFSET(PBE_NEXT, pbe, next); + OFFSET(CALLEE_R9, callee_saved_regs, r9); + OFFSET(CALLEE_R10, callee_saved_regs, r10); + OFFSET(CALLEE_R11, callee_saved_regs, r11); + OFFSET(CALLEE_R12, callee_saved_regs, r12); + OFFSET(CALLEE_R13, callee_saved_regs, r13); + OFFSET(CALLEE_R14, callee_saved_regs, r14); + OFFSET(CALLEE_R15, callee_saved_regs, r15); + OFFSET(CALLEE_RA, callee_saved_regs, ra); + OFFSET(CALLEE_F2, callee_saved_fpregs, f2); + OFFSET(CALLEE_F3, callee_saved_fpregs, f3); + OFFSET(CALLEE_F4, callee_saved_fpregs, f4); + OFFSET(CALLEE_F5, callee_saved_fpregs, f5); + OFFSET(CALLEE_F6, callee_saved_fpregs, f6); + OFFSET(CALLEE_F7, callee_saved_fpregs, f7); + OFFSET(CALLEE_F8, callee_saved_fpregs, f8); + OFFSET(CALLEE_F9, callee_saved_fpregs, f9); + BLANK(); + DEFINE(CRED_UID, offsetof(struct cred, uid)); + DEFINE(CRED_EUID, offsetof(struct cred, euid)); + DEFINE(CRED_GID, offsetof(struct cred, gid)); + DEFINE(CRED_EGID, offsetof(struct cred, egid)); + BLANK(); + + DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs)); + DEFINE(PT_REGS_R0, offsetof(struct pt_regs, regs[0])); + DEFINE(PT_REGS_R1, offsetof(struct pt_regs, regs[1])); + DEFINE(PT_REGS_R2, offsetof(struct pt_regs, regs[2])); + DEFINE(PT_REGS_R3, offsetof(struct pt_regs, regs[3])); + DEFINE(PT_REGS_R4, offsetof(struct pt_regs, regs[4])); + DEFINE(PT_REGS_R5, offsetof(struct pt_regs, regs[5])); + DEFINE(PT_REGS_R6, offsetof(struct pt_regs, regs[6])); + DEFINE(PT_REGS_R7, offsetof(struct pt_regs, regs[7])); + DEFINE(PT_REGS_R8, offsetof(struct pt_regs, regs[8])); + DEFINE(PT_REGS_R9, offsetof(struct pt_regs, regs[9])); + DEFINE(PT_REGS_R10, offsetof(struct pt_regs, regs[10])); + DEFINE(PT_REGS_R11, offsetof(struct pt_regs, regs[11])); + DEFINE(PT_REGS_R12, offsetof(struct pt_regs, regs[12])); + DEFINE(PT_REGS_R13, offsetof(struct pt_regs, regs[13])); + DEFINE(PT_REGS_R14, offsetof(struct pt_regs, regs[14])); + DEFINE(PT_REGS_R15, offsetof(struct pt_regs, regs[15])); + DEFINE(PT_REGS_R16, offsetof(struct pt_regs, regs[16])); + DEFINE(PT_REGS_R17, offsetof(struct pt_regs, regs[17])); + DEFINE(PT_REGS_R18, offsetof(struct pt_regs, regs[18])); + DEFINE(PT_REGS_R19, offsetof(struct pt_regs, regs[19])); + DEFINE(PT_REGS_R20, offsetof(struct pt_regs, regs[20])); + DEFINE(PT_REGS_R21, offsetof(struct pt_regs, regs[21])); + DEFINE(PT_REGS_R22, offsetof(struct pt_regs, regs[22])); + DEFINE(PT_REGS_R23, offsetof(struct pt_regs, regs[23])); + DEFINE(PT_REGS_R24, offsetof(struct pt_regs, regs[24])); + DEFINE(PT_REGS_R25, offsetof(struct pt_regs, regs[25])); + DEFINE(PT_REGS_R26, offsetof(struct pt_regs, regs[26])); + DEFINE(PT_REGS_R27, offsetof(struct pt_regs, regs[27])); + DEFINE(PT_REGS_R28, offsetof(struct pt_regs, regs[28])); + DEFINE(PT_REGS_GP, offsetof(struct pt_regs, regs[29])); + DEFINE(PT_REGS_SP, offsetof(struct pt_regs, regs[30])); + DEFINE(PT_REGS_PC, offsetof(struct pt_regs, pc)); + DEFINE(PT_REGS_PS, offsetof(struct pt_regs, ps)); + DEFINE(PT_REGS_ORIG_R0, offsetof(struct pt_regs, orig_r0)); + DEFINE(PT_REGS_ORIG_R19, offsetof(struct pt_regs, orig_r19)); + DEFINE(PT_REGS_HM_PS, offsetof(struct pt_regs, hm_ps)); + DEFINE(PT_REGS_HM_PC, offsetof(struct pt_regs, hm_pc)); + DEFINE(PT_REGS_HM_GP, offsetof(struct pt_regs, hm_gp)); + DEFINE(PT_REGS_HM_R16, offsetof(struct pt_regs, hm_r16)); + DEFINE(PT_REGS_HM_R17, offsetof(struct pt_regs, hm_r17)); + DEFINE(PT_REGS_HM_R18, offsetof(struct pt_regs, hm_r18)); + BLANK(); + + DEFINE(KVM_REGS_SIZE, sizeof(struct kvm_regs)); + DEFINE(KVM_REGS_R0, offsetof(struct kvm_regs, r0)); + DEFINE(KVM_REGS_R1, offsetof(struct kvm_regs, r1)); + DEFINE(KVM_REGS_R2, offsetof(struct kvm_regs, r2)); + DEFINE(KVM_REGS_R3, offsetof(struct kvm_regs, r3)); + DEFINE(KVM_REGS_R4, offsetof(struct kvm_regs, r4)); + DEFINE(KVM_REGS_R5, offsetof(struct kvm_regs, r5)); + DEFINE(KVM_REGS_R6, offsetof(struct kvm_regs, r6)); + DEFINE(KVM_REGS_R7, offsetof(struct kvm_regs, r7)); + DEFINE(KVM_REGS_R8, offsetof(struct kvm_regs, r8)); + DEFINE(KVM_REGS_R9, offsetof(struct kvm_regs, r9)); + DEFINE(KVM_REGS_R10, offsetof(struct kvm_regs, r10)); + DEFINE(KVM_REGS_R11, offsetof(struct kvm_regs, r11)); + DEFINE(KVM_REGS_R12, offsetof(struct kvm_regs, r12)); + DEFINE(KVM_REGS_R13, offsetof(struct kvm_regs, r13)); + DEFINE(KVM_REGS_R14, offsetof(struct kvm_regs, r14)); + DEFINE(KVM_REGS_R15, offsetof(struct kvm_regs, r15)); + DEFINE(KVM_REGS_R19, offsetof(struct kvm_regs, r19)); + DEFINE(KVM_REGS_R20, offsetof(struct kvm_regs, r20)); + DEFINE(KVM_REGS_R21, offsetof(struct kvm_regs, r21)); + DEFINE(KVM_REGS_R22, offsetof(struct kvm_regs, r22)); + DEFINE(KVM_REGS_R23, offsetof(struct kvm_regs, r23)); + DEFINE(KVM_REGS_R24, offsetof(struct kvm_regs, r24)); + DEFINE(KVM_REGS_R25, offsetof(struct kvm_regs, r25)); + DEFINE(KVM_REGS_R26, offsetof(struct kvm_regs, r26)); + DEFINE(KVM_REGS_R27, offsetof(struct kvm_regs, r27)); + DEFINE(KVM_REGS_R28, offsetof(struct kvm_regs, r28)); + DEFINE(KVM_REGS_FPCR, offsetof(struct kvm_regs, fpcr)); + DEFINE(KVM_REGS_F0, offsetof(struct kvm_regs, fp[0 * 4])); + DEFINE(KVM_REGS_F1, offsetof(struct kvm_regs, fp[1 * 4])); + DEFINE(KVM_REGS_F2, offsetof(struct kvm_regs, fp[2 * 4])); + DEFINE(KVM_REGS_F3, offsetof(struct kvm_regs, fp[3 * 4])); + DEFINE(KVM_REGS_F4, offsetof(struct kvm_regs, fp[4 * 4])); + DEFINE(KVM_REGS_F5, offsetof(struct kvm_regs, fp[5 * 4])); + DEFINE(KVM_REGS_F6, offsetof(struct kvm_regs, fp[6 * 4])); + DEFINE(KVM_REGS_F7, offsetof(struct kvm_regs, fp[7 * 4])); + DEFINE(KVM_REGS_F8, offsetof(struct kvm_regs, fp[8 * 4])); + DEFINE(KVM_REGS_F9, offsetof(struct kvm_regs, fp[9 * 4])); + DEFINE(KVM_REGS_F10, offsetof(struct kvm_regs, fp[10 * 4])); + DEFINE(KVM_REGS_F11, offsetof(struct kvm_regs, fp[11 * 4])); + DEFINE(KVM_REGS_F12, offsetof(struct kvm_regs, fp[12 * 4])); + DEFINE(KVM_REGS_F13, offsetof(struct kvm_regs, fp[13 * 4])); + DEFINE(KVM_REGS_F14, offsetof(struct kvm_regs, fp[14 * 4])); + DEFINE(KVM_REGS_F15, offsetof(struct kvm_regs, fp[15 * 4])); + DEFINE(KVM_REGS_F16, offsetof(struct kvm_regs, fp[16 * 4])); + DEFINE(KVM_REGS_F17, offsetof(struct kvm_regs, fp[17 * 4])); + DEFINE(KVM_REGS_F18, offsetof(struct kvm_regs, fp[18 * 4])); + DEFINE(KVM_REGS_F19, offsetof(struct kvm_regs, fp[19 * 4])); + DEFINE(KVM_REGS_F20, offsetof(struct kvm_regs, fp[20 * 4])); + DEFINE(KVM_REGS_F21, offsetof(struct kvm_regs, fp[21 * 4])); + DEFINE(KVM_REGS_F22, offsetof(struct kvm_regs, fp[22 * 4])); + DEFINE(KVM_REGS_F23, offsetof(struct kvm_regs, fp[23 * 4])); + DEFINE(KVM_REGS_F24, offsetof(struct kvm_regs, fp[24 * 4])); + DEFINE(KVM_REGS_F25, offsetof(struct kvm_regs, fp[25 * 4])); + DEFINE(KVM_REGS_F26, offsetof(struct kvm_regs, fp[26 * 4])); + DEFINE(KVM_REGS_F27, offsetof(struct kvm_regs, fp[27 * 4])); + DEFINE(KVM_REGS_F28, offsetof(struct kvm_regs, fp[28 * 4])); + DEFINE(KVM_REGS_F29, offsetof(struct kvm_regs, fp[29 * 4])); + DEFINE(KVM_REGS_F30, offsetof(struct kvm_regs, fp[30 * 4])); + DEFINE(KVM_REGS_PS, offsetof(struct kvm_regs, ps)); + DEFINE(KVM_REGS_PC, offsetof(struct kvm_regs, pc)); + DEFINE(KVM_REGS_GP, offsetof(struct kvm_regs, gp)); + DEFINE(KVM_REGS_R16, offsetof(struct kvm_regs, r16)); + DEFINE(KVM_REGS_R17, offsetof(struct kvm_regs, r17)); + DEFINE(KVM_REGS_R18, offsetof(struct kvm_regs, r18)); + BLANK(); + + DEFINE(VCPU_RET_SIZE, sizeof(struct vcpu_run_ret_stack)); + DEFINE(VCPU_RET_RA, offsetof(struct vcpu_run_ret_stack, ra)); + DEFINE(VCPU_RET_R0, offsetof(struct vcpu_run_ret_stack, r0)); + BLANK(); + + DEFINE(HOST_INT_SIZE, sizeof(struct host_int_args)); + DEFINE(HOST_INT_R18, offsetof(struct host_int_args, r18)); + DEFINE(HOST_INT_R17, offsetof(struct host_int_args, r17)); + DEFINE(HOST_INT_R16, offsetof(struct host_int_args, r16)); + BLANK(); + + OFFSET(TASK_THREAD, task_struct, thread); + OFFSET(TASK_THREAD_F0, task_struct, thread.fpstate.fp[0]); + OFFSET(TASK_THREAD_F1, task_struct, thread.fpstate.fp[1]); + OFFSET(TASK_THREAD_F2, task_struct, thread.fpstate.fp[2]); + OFFSET(TASK_THREAD_F3, task_struct, thread.fpstate.fp[3]); + OFFSET(TASK_THREAD_F4, task_struct, thread.fpstate.fp[4]); + OFFSET(TASK_THREAD_F5, task_struct, thread.fpstate.fp[5]); + OFFSET(TASK_THREAD_F6, task_struct, thread.fpstate.fp[6]); + OFFSET(TASK_THREAD_F7, task_struct, thread.fpstate.fp[7]); + OFFSET(TASK_THREAD_F8, task_struct, thread.fpstate.fp[8]); + OFFSET(TASK_THREAD_F9, task_struct, thread.fpstate.fp[9]); + OFFSET(TASK_THREAD_F10, task_struct, thread.fpstate.fp[10]); + OFFSET(TASK_THREAD_F11, task_struct, thread.fpstate.fp[11]); + OFFSET(TASK_THREAD_F12, task_struct, thread.fpstate.fp[12]); + OFFSET(TASK_THREAD_F13, task_struct, thread.fpstate.fp[13]); + OFFSET(TASK_THREAD_F14, task_struct, thread.fpstate.fp[14]); + OFFSET(TASK_THREAD_F15, task_struct, thread.fpstate.fp[15]); + OFFSET(TASK_THREAD_F16, task_struct, thread.fpstate.fp[16]); + OFFSET(TASK_THREAD_F17, task_struct, thread.fpstate.fp[17]); + OFFSET(TASK_THREAD_F18, task_struct, thread.fpstate.fp[18]); + OFFSET(TASK_THREAD_F19, task_struct, thread.fpstate.fp[19]); + OFFSET(TASK_THREAD_F20, task_struct, thread.fpstate.fp[20]); + OFFSET(TASK_THREAD_F21, task_struct, thread.fpstate.fp[21]); + OFFSET(TASK_THREAD_F22, task_struct, thread.fpstate.fp[22]); + OFFSET(TASK_THREAD_F23, task_struct, thread.fpstate.fp[23]); + OFFSET(TASK_THREAD_F24, task_struct, thread.fpstate.fp[24]); + OFFSET(TASK_THREAD_F25, task_struct, thread.fpstate.fp[25]); + OFFSET(TASK_THREAD_F26, task_struct, thread.fpstate.fp[26]); + OFFSET(TASK_THREAD_F27, task_struct, thread.fpstate.fp[27]); + OFFSET(TASK_THREAD_F28, task_struct, thread.fpstate.fp[28]); + OFFSET(TASK_THREAD_F29, task_struct, thread.fpstate.fp[29]); + OFFSET(TASK_THREAD_F30, task_struct, thread.fpstate.fp[30]); + OFFSET(TASK_THREAD_FPCR, task_struct, thread.fpstate.fpcr); + BLANK(); + OFFSET(TASK_THREAD_RA, task_struct, thread.ra); + OFFSET(TASK_THREAD_SP, task_struct, thread.sp); + OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); + OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); + OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); + OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); + OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); + OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); + OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); + BLANK(); + DEFINE(ASM_THREAD_SIZE, THREAD_SIZE); + BLANK(); + DEFINE(RT_SIGFRAME_SIZE, sizeof(struct rt_sigframe)); + OFFSET(RT_SIGFRAME_MCTX, rt_sigframe, uc.uc_mcontext); +} diff --git a/arch/sw_64/kernel/audit.c b/arch/sw_64/kernel/audit.c new file mode 100644 index 0000000000000000000000000000000000000000..dcf58deee3e2018e0efa6cb355e8802aef04de35 --- /dev/null +++ b/arch/sw_64/kernel/audit.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned int dir_class[] = { +#include +~0U +}; + +static unsigned int read_class[] = { +#include +~0U +}; + +static unsigned int write_class[] = { +#include +~0U +}; + +static unsigned int chattr_class[] = { +#include +~0U +}; + +static unsigned int signal_class[] = { +#include +~0U +}; + +int audit_classify_arch(int arch) +{ + return 0; +} + +int audit_classify_syscall(int abi, unsigned int syscall) +{ + switch (syscall) { + case __NR_open: + return 2; + case __NR_openat: + return 3; + case __NR_execve: + return 5; + default: + return 0; + } +} + +static int __init audit_classes_init(void) +{ + audit_register_class(AUDIT_CLASS_WRITE, write_class); + audit_register_class(AUDIT_CLASS_READ, read_class); + audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); + audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); + audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); + return 0; +} + +device_initcall(audit_classes_init); diff --git a/arch/sw_64/kernel/cacheinfo.c b/arch/sw_64/kernel/cacheinfo.c new file mode 100644 index 0000000000000000000000000000000000000000..e340c53690a9e486269465512e9ddf494afd1fe0 --- /dev/null +++ b/arch/sw_64/kernel/cacheinfo.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 cacheinfo support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include + +#include + +/* Populates leaf and increments to next leaf */ +#define populate_cache(cache, leaf, c_level, c_type, c_id) \ +do { \ + leaf->id = c_id; \ + leaf->attributes = CACHE_ID; \ + leaf->type = c_type; \ + leaf->level = c_level; \ + leaf->coherency_line_size = c->cache.linesz; \ + leaf->number_of_sets = c->cache.sets; \ + leaf->ways_of_associativity = c->cache.ways; \ + leaf->size = c->cache.size; \ + leaf++; \ +} while (0) + +int init_cache_level(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + int levels = 0, leaves = 0; + + /* + * If Dcache is not set, we assume the cache structures + * are not properly initialized. + */ + if (c->dcache.size) + levels += 1; + else + return -ENOENT; + + + leaves += (c->icache.size) ? 2 : 1; + + if (c->scache.size) { + levels++; + leaves++; + } + + if (c->tcache.size) { + levels++; + leaves++; + } + + this_cpu_ci->num_levels = levels; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +int populate_cache_leaves(unsigned int cpu) +{ + struct cpuinfo_sw64 *c = &cpu_data[cpu]; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + struct cpu_topology *topo = &cpu_topology[cpu]; + + if (c->icache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA, cpu); + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST, cpu); + + } else { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->scache.size) { + cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); + populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED, cpu); + } + + if (c->tcache.size) { + cpumask_copy(&this_leaf->shared_cpu_map, topology_llc_cpumask(cpu)); + populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED, topo->package_id); + } + + this_cpu_ci->cpu_map_populated = true; + + return 0; +} diff --git a/arch/sw_64/kernel/chip_setup.c b/arch/sw_64/kernel/chip_setup.c new file mode 100644 index 0000000000000000000000000000000000000000..b8c359db2ef67b1272600987621860cbefbfc47f --- /dev/null +++ b/arch/sw_64/kernel/chip_setup.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include + +struct sw64_chip_ops *sw64_chip; +struct sw64_chip_init_ops *sw64_chip_init; + +static int get_cpu_nums(void) +{ + if (is_guest_or_emul()) + return 1; + + return __get_cpu_nums(); +} + +static unsigned long __init get_node_mem(int nodeid) +{ + + if (is_guest_or_emul()) + return *(unsigned long *)MMSIZE & MMSIZE_MASK; + + return __get_node_mem(nodeid); +} + +static void __init setup_core_map(struct cpumask *cpumask) +{ + int i, j, cpu_num, cpuid, max_cores_per_cpu; + unsigned long coreonline; + + cpu_num = get_cpu_nums(); + cpuid = 0; + for (i = 0; i < cpu_num; i++) { + coreonline = sw64_io_read(i, CORE_ONLINE); + max_cores_per_cpu = MAX_CORES_PER_CPU; + + if (is_guest_or_emul()) + max_cores_per_cpu = 64; + + for (j = 0; j < max_cores_per_cpu; j++) { + if (coreonline & (1UL << j)) { + __cpu_to_rcid[cpuid] = (i << DOMAIN_ID_SHIFT) | (j << CORE_ID_SHIFT); + cpuid++; + } + } + } + + if (is_in_host() && core_is_ht()) { + for (i = 0; i < cpuid; i++) + __cpu_to_rcid[cpuid + i] = __cpu_to_rcid[i] | (1 << THREAD_ID_SHIFT); + + cpuid = cpuid + i; + } + + while (cpuid < NR_CPUS) { + __cpu_to_rcid[cpuid] = -1; + cpuid++; + } +} + +#ifdef CONFIG_PM +static void i2c_srst(void) +{ + sw64_io_write(0, I2C0_SRST_L, 0x0); + sw64_io_write(0, I2C0_SRST_L, 0x1); + + sw64_io_write(0, I2C1_SRST_L, 0x0); + sw64_io_write(0, I2C1_SRST_L, 0x1); + + sw64_io_write(0, I2C2_SRST_L, 0x0); + sw64_io_write(0, I2C2_SRST_L, 0x1); +} + +static void pcie_save(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + piu_save = kzalloc(sizeof(*piu_save), GFP_KERNEL); + + node = hose->node; + index = hose->index; + hose->sysdata = piu_save; + + piu_save->piuconfig0 = read_piu_ior0(node, index, PIUCONFIG0); + piu_save->piuconfig1 = read_piu_ior1(node, index, PIUCONFIG1); + piu_save->epdmabar = read_piu_ior0(node, index, EPDMABAR); + piu_save->msiaddr = read_piu_ior0(node, index, MSIADDR); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + piu_save->msiconfig[i] = read_piu_ior0(node, index, + MSICONFIG0 + (i << 7)); + } + } + + piu_save->iommuexcpt_ctrl = read_piu_ior0(node, index, IOMMUEXCPT_CTRL); + piu_save->dtbaseaddr = read_piu_ior0(node, index, DTBASEADDR); + + piu_save->intaconfig = read_piu_ior0(node, index, INTACONFIG); + piu_save->intbconfig = read_piu_ior0(node, index, INTBCONFIG); + piu_save->intcconfig = read_piu_ior0(node, index, INTCCONFIG); + piu_save->intdconfig = read_piu_ior0(node, index, INTDCONFIG); + piu_save->pmeintconfig = read_piu_ior0(node, index, PMEINTCONFIG); + piu_save->aererrintconfig = read_piu_ior0(node, index, AERERRINTCONFIG); + piu_save->hpintconfig = read_piu_ior0(node, index, HPINTCONFIG); + + } +} + +static void pcie_restore(void) +{ + struct pci_controller *hose; + struct piu_saved *piu_save; + unsigned long node, index; + u32 rc_misc_ctrl; + unsigned int value; + unsigned long i; + + for (hose = hose_head; hose; hose = hose->next) { + node = hose->node; + index = hose->index; + piu_save = hose->sysdata; + + write_piu_ior0(node, index, PIUCONFIG0, piu_save->piuconfig0); + write_piu_ior1(node, index, PIUCONFIG1, piu_save->piuconfig1); + write_piu_ior0(node, index, EPDMABAR, piu_save->epdmabar); + write_piu_ior0(node, index, MSIADDR, piu_save->msiaddr); + + if (IS_ENABLED(CONFIG_UNCORE_XUELANG)) { + for (i = 0; i < 256; i++) { + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), + piu_save->msiconfig[i]); + } + } + + write_piu_ior0(node, index, IOMMUEXCPT_CTRL, piu_save->iommuexcpt_ctrl); + write_piu_ior0(node, index, DTBASEADDR, piu_save->dtbaseaddr); + + write_piu_ior0(node, index, INTACONFIG, piu_save->intaconfig); + write_piu_ior0(node, index, INTBCONFIG, piu_save->intbconfig); + write_piu_ior0(node, index, INTCCONFIG, piu_save->intcconfig); + write_piu_ior0(node, index, INTDCONFIG, piu_save->intdconfig); + write_piu_ior0(node, index, PMEINTCONFIG, piu_save->pmeintconfig); + write_piu_ior0(node, index, AERERRINTCONFIG, piu_save->aererrintconfig); + write_piu_ior0(node, index, HPINTCONFIG, piu_save->hpintconfig); + + /* Enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* Fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* Set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* Disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + } + +} + +static unsigned long saved_dvc_int, saved_long_time; + +static inline void intpu_save(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + saved_long_time = __io_read_longtime(0); + default: + break; + } +} + +static inline void intpu_restore(void) +{ + switch (cpu_desc.model) { + case CPU_SW831: + __io_write_longtime(0, saved_long_time); + __io_write_longtime_start_en(0, 0x1); + break; + default: + pr_info("long time start is disable!"); + break; + } +} + +static inline void spbu_save(void) +{ + saved_dvc_int = sw64_io_read(0, MCU_DVC_INT_EN); +} + +static inline void spbu_restore(void) +{ + i2c_srst(); + sw64_io_write(0, MCU_DVC_INT_EN, saved_dvc_int); +} + +static int io_suspend(void) +{ + spbu_save(); + intpu_save(); + pcie_save(); + + return 0; +} + +static void io_resume(void) +{ + pcie_restore(); + intpu_restore(); + spbu_restore(); +} +#endif /* CONFIG_PM */ + +static struct sw64_chip_init_ops chip_init_ops = { + .early_init = { + .setup_core_map = setup_core_map, + .get_node_mem = get_node_mem, + }, +}; + +static struct sw64_chip_ops chip_ops = { + .get_cpu_num = get_cpu_nums, +}; + +void __init setup_chip_ops(void) +{ + sw64_chip_init = &chip_init_ops; + sw64_chip = &chip_ops; + setup_chip_pci_ops(); +#ifdef CONFIG_PM + io_syscore_ops.suspend = io_suspend; + io_syscore_ops.resume = io_resume; +#endif +} diff --git a/arch/sw_64/kernel/cpuautoplug.c b/arch/sw_64/kernel/cpuautoplug.c new file mode 100644 index 0000000000000000000000000000000000000000..b4ea0ef080d8fae84d00af5dccab12d605aeb201 --- /dev/null +++ b/arch/sw_64/kernel/cpuautoplug.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +int autoplug_enabled; +int autoplug_verbose; +int autoplug_adjusting; + +DEFINE_PER_CPU(int, cpu_adjusting); + +struct cpu_autoplug_info { + cputime64_t prev_idle; + cputime64_t prev_wall; + struct delayed_work work; + unsigned int sampling_rate; + int maxcpus; /* max cpus for autoplug */ + int mincpus; /* min cpus for autoplug */ + int dec_reqs; /* continuous core-decreasing requests */ + int inc_reqs; /* continuous core-increasing requests */ +}; + +struct cpu_autoplug_info ap_info; + +static ssize_t enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_enabled); +} + + +static ssize_t enabled_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_enabled = n; + + return count; +} + +static ssize_t verbose_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", autoplug_verbose); +} + +static ssize_t verbose_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > 1 || n < 0) + return -EINVAL; + + autoplug_verbose = n; + + return count; +} + +static ssize_t maxcpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.maxcpus); +} + +static ssize_t maxcpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > num_possible_cpus() || n < ap_info.mincpus) + return -EINVAL; + + ap_info.maxcpus = n; + + return count; +} + +static ssize_t mincpus_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.mincpus); +} + +static ssize_t mincpus_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[5]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > ap_info.maxcpus || n < 1) + return -EINVAL; + + ap_info.mincpus = n; + + return count; +} + +static ssize_t sampling_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", ap_info.sampling_rate); +} + +#define SAMPLING_RATE_MAX 1000 +#define SAMPLING_RATE_MIN 600 + +static ssize_t sampling_rate_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + char val[6]; + int n; + + memcpy(val, buf, count); + n = kstrtol(val, 0, 0); + + if (n > SAMPLING_RATE_MAX || n < SAMPLING_RATE_MIN) + return -EINVAL; + + ap_info.sampling_rate = n; + + return count; +} + +static ssize_t available_value_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "enabled: 0-1\nverbose: 0-1\nmaxcpus:" + "1-%d\nmincpus: 1-%d\nsampling_rate: %d-%d\n", + num_possible_cpus(), num_possible_cpus(), + SAMPLING_RATE_MIN, SAMPLING_RATE_MAX); +} + +static DEVICE_ATTR_RW(enabled); +static DEVICE_ATTR_RW(verbose); +static DEVICE_ATTR_RW(maxcpus); +static DEVICE_ATTR_RW(mincpus); +static DEVICE_ATTR_RW(sampling_rate); +static DEVICE_ATTR_RO(available_value); + +static struct attribute *cpuclass_default_attrs[] = { + &dev_attr_enabled.attr, + &dev_attr_verbose.attr, + &dev_attr_maxcpus.attr, + &dev_attr_mincpus.attr, + &dev_attr_sampling_rate.attr, + &dev_attr_available_value.attr, + NULL +}; + +static struct attribute_group cpuclass_attr_group = { + .attrs = cpuclass_default_attrs, + .name = "cpuautoplug", +}; + +static int __init setup_autoplug(char *str) +{ + if (!strcmp(str, "off")) + autoplug_enabled = 0; + else if (!strcmp(str, "on")) + autoplug_enabled = 1; + else + return 0; + return 1; +} + +__setup("autoplug=", setup_autoplug); + +static cputime64_t calc_busy_time(unsigned int cpu) +{ + cputime64_t busy_time; + + busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE]; + busy_time += 1; + + return busy_time; +} + +static inline cputime64_t get_idle_time_jiffy(cputime64_t *wall) +{ + unsigned int cpu; + cputime64_t idle_time = 0; + cputime64_t cur_wall_time; + cputime64_t busy_time; + + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + for_each_online_cpu(cpu) { + busy_time = calc_busy_time(cpu); + + idle_time += cur_wall_time - busy_time; + } + + if (wall) + *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); + + return (cputime64_t)jiffies_to_usecs(idle_time); +} + +static inline cputime64_t sw64_get_idle_time(cputime64_t *wall) +{ + unsigned int cpu; + u64 idle_time = 0; + + for_each_online_cpu(cpu) { + idle_time += get_cpu_idle_time_us(cpu, wall); + if (idle_time == -1ULL) + return get_idle_time_jiffy(wall); + } + + return idle_time; +} + +static cputime64_t get_min_busy_time(cputime64_t arr[], int size) +{ + int i, min_cpu_idx; + cputime64_t min_time = arr[0]; + + for (i = 0; i < size; i++) { + if (arr[i] > 0 && arr[i] < min_time) { + min_time = arr[i]; + min_cpu_idx = i; + } + } + + return min_cpu_idx; +} + +static int find_min_busy_cpu(void) +{ + int nr_all_cpus = num_possible_cpus(); + unsigned int cpus, target_cpu; + cputime64_t busy_time; + cputime64_t b_time[NR_CPUS]; + + memset(b_time, 0, sizeof(b_time)); + for_each_online_cpu(cpus) { + busy_time = calc_busy_time(cpus); + b_time[cpus] = busy_time; + } + target_cpu = get_min_busy_time(b_time, nr_all_cpus); + return target_cpu; +} + +static void increase_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.maxcpus) + return; + + cur_cpus = cpumask_next_zero(0, cpu_online_mask); + + dev = get_cpu_device(cur_cpus); + + per_cpu(cpu_adjusting, dev->id) = 1; + lock_device_hotplug(); + cpu_device_up(dev); + pr_info("The target_cpu is %d, After cpu_up, the cpu_num is %d\n", + dev->id, num_online_cpus()); + get_cpu_device(dev->id)->offline = false; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; +} + +static void decrease_cores(int cur_cpus) +{ + struct device *dev; + + if (cur_cpus == ap_info.mincpus) + return; + + cur_cpus = find_min_busy_cpu(); + + dev = get_cpu_device(cur_cpus); + + if (dev->id > 0) { + per_cpu(cpu_adjusting, dev->id) = -1; + lock_device_hotplug(); + cpu_device_down(dev); + pr_info("The target_cpu is %d. After cpu_down, the cpu_num is %d\n", + cur_cpus, num_online_cpus()); + get_cpu_device(dev->id)->offline = true; + unlock_device_hotplug(); + per_cpu(cpu_adjusting, dev->id) = 0; + } +} + +#define INC_THRESHOLD 80 +#define DEC_THRESHOLD 40 + +static void do_autoplug_timer(struct work_struct *work) +{ + cputime64_t cur_wall_time = 0, cur_idle_time; + unsigned long idle_time, wall_time; + int delay, load; + int nr_cur_cpus = num_online_cpus(); + int nr_all_cpus = num_possible_cpus(); + int inc_req = 1, dec_req = 2; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(smp_processor_id()); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", 0); + return; + } + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + + if (strcmp(policy->governor->name, "performance") == 0) { + ap_info.mincpus = ap_info.maxcpus; + } else if (strcmp(policy->governor->name, "powersave") == 0) { + ap_info.maxcpus = ap_info.mincpus; + } else if (strcmp(policy->governor->name, "ondemand") == 0) { + ap_info.sampling_rate = 500; + inc_req = 0; + dec_req = 2; + } else if (strcmp(policy->governor->name, "conservative") == 0) { + inc_req = 1; + dec_req = 3; + ap_info.sampling_rate = 1000; /* 1s */ + } + + BUG_ON(smp_processor_id() != 0); + delay = msecs_to_jiffies(ap_info.sampling_rate); + if (!autoplug_enabled || system_state != SYSTEM_RUNNING) + goto out; + + autoplug_adjusting = 1; + + if (nr_cur_cpus > ap_info.maxcpus) { + decrease_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + if (nr_cur_cpus < ap_info.mincpus) { + increase_cores(nr_cur_cpus); + autoplug_adjusting = 0; + goto out; + } + + cur_idle_time = sw64_get_idle_time(&cur_wall_time); + if (cur_wall_time == 0) + cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); + + wall_time = (unsigned int)(cur_wall_time - ap_info.prev_wall); + ap_info.prev_wall = cur_wall_time; + + idle_time = (unsigned int)(cur_idle_time - ap_info.prev_idle); + idle_time += wall_time * (nr_all_cpus - nr_cur_cpus); + ap_info.prev_wall = cur_idle_time; + + if (unlikely(!wall_time || wall_time * nr_all_cpus < idle_time)) { + autoplug_adjusting = 0; + goto out; + } + + load = 100 * (wall_time * nr_all_cpus - idle_time) / wall_time; + + if (load < (nr_cur_cpus - 1) * 100 - DEC_THRESHOLD) { + ap_info.inc_reqs = 0; + if (ap_info.dec_reqs < dec_req) + ap_info.dec_reqs++; + else { + ap_info.dec_reqs = 0; + decrease_cores(nr_cur_cpus); + } + } else { + ap_info.dec_reqs = 0; + if (load > (nr_cur_cpus - 1) * 100 + INC_THRESHOLD) { + if (ap_info.inc_reqs < inc_req) + ap_info.inc_reqs++; + else { + ap_info.inc_reqs = 0; + increase_cores(nr_cur_cpus); + } + } + } + + autoplug_adjusting = 0; +out: + schedule_delayed_work_on(0, &ap_info.work, delay); +} + +static struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpuautoplug", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpuautoplug", + .owner = THIS_MODULE, + }, + .id_table = platform_device_ids, +}; + +static int __init cpuautoplug_init(void) +{ + int i, ret, delay; + struct device *dev_root; + + dev_root = bus_get_dev_root(&cpu_subsys); + if (dev_root) { + ret = sysfs_create_group(&dev_root->kobj, + &cpuclass_attr_group); + put_device(dev_root); + if (ret) + return ret; + } + + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("cpuautoplug: SW64 CPU autoplug driver.\n"); + + ap_info.maxcpus = + setup_max_cpus > nr_cpu_ids ? nr_cpu_ids : setup_max_cpus; + ap_info.mincpus = ap_info.maxcpus / 4; + ap_info.dec_reqs = 0; + ap_info.inc_reqs = 0; + ap_info.sampling_rate = 720; /* 720ms */ + if (setup_max_cpus == 0) { /* boot with npsmp */ + ap_info.maxcpus = 1; + autoplug_enabled = 0; + } + if (setup_max_cpus > num_possible_cpus()) + ap_info.maxcpus = num_possible_cpus(); + + pr_info("mincpu = %d, maxcpu = %d, autoplug_enabled = %d, rate = %d\n", + ap_info.mincpus, ap_info.maxcpus, autoplug_enabled, + ap_info.sampling_rate); + + for_each_possible_cpu(i) + per_cpu(cpu_adjusting, i) = 0; + delay = msecs_to_jiffies(ap_info.sampling_rate * 24); + INIT_DEFERRABLE_WORK(&ap_info.work, do_autoplug_timer); + schedule_delayed_work_on(0, &ap_info.work, delay); + + if (!autoplug_enabled) + cancel_delayed_work_sync(&ap_info.work); + + return ret; +} + +late_initcall(cpuautoplug_init); diff --git a/arch/sw_64/kernel/crash_dump.c b/arch/sw_64/kernel/crash_dump.c new file mode 100644 index 0000000000000000000000000000000000000000..4484673823b8e6065d9efb5f2299a21df67d421a --- /dev/null +++ b/arch/sw_64/kernel/crash_dump.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/crash_dump.c + * + * Copyright (C) 2019 JN + * Author: He Sheng + * + * This code is taken from arch/x86/kernel/crash_dump_64.c + * Created by: Hariprasad Nellitheertha (hari@in.ibm.com) + * Copyright (C) IBM Corporation, 2004. All rights reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include + +/** + * copy_oldmem_page() - copy one page from old kernel memory + * @pfn: page frame number to be copied + * @buf: buffer where the copied page is placed + * @csize: number of bytes to copy + * @offset: offset in bytes into the page + * @userbuf: if set, @buf is int he user address space + * + * This function copies one page from old kernel memory into buffer pointed by + * @buf. If @buf is in userspace, set @userbuf to %1. Returns number of bytes + * copied or negative error in case of failure. + */ +ssize_t copy_oldmem_page(unsigned long pfn, char *buf, + size_t csize, unsigned long offset, + int userbuf) +{ + void *vaddr; + + if (!csize) + return 0; + + vaddr = ioremap(__pfn_to_phys(pfn), PAGE_SIZE); + if (!vaddr) + return -ENOMEM; + + if (userbuf) { + if (copy_to_user(buf, vaddr + offset, csize)) { + iounmap(vaddr); + return -EFAULT; + } + } else { + memcpy(buf, vaddr + offset, csize); + } + + iounmap(vaddr); + return csize; +} diff --git a/arch/sw_64/kernel/dup_print.c b/arch/sw_64/kernel/dup_print.c new file mode 100644 index 0000000000000000000000000000000000000000..439ac75feb01f23ed201eb4e4dc3ba23da8ff232 --- /dev/null +++ b/arch/sw_64/kernel/dup_print.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#ifdef CONFIG_SW64_RRK + +#define KERNEL_PRINTK_BUFF_BASE (0x700000UL + __START_KERNEL_map) + +static DEFINE_SPINLOCK(printk_lock); + +unsigned long sw64_printk_offset; +#define PRINTK_SIZE 0x100000UL + +int sw64_printk(const char *fmt, va_list args) +{ + char *sw64_printk_buf; + int printed_len = 0; + unsigned long flags; + + spin_lock_irqsave(&printk_lock, flags); + + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + + if (sw64_printk_offset >= (PRINTK_SIZE-1024)) { //printk wrapped + sw64_printk_offset = 0; + sw64_printk_buf = (char *)(KERNEL_PRINTK_BUFF_BASE + sw64_printk_offset); + memset(sw64_printk_buf, 0, PRINTK_SIZE); + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + } else { + printed_len += vscnprintf(sw64_printk_buf, 1024, fmt, args); + if (is_in_emul()) { + void __iomem *addr = __va(QEMU_PRINTF_BUFF_BASE); + u64 data = ((u64)sw64_printk_buf & 0xffffffffUL) + | ((u64)printed_len << 32); + *(u64 *)addr = data; + } + } + sw64_printk_offset += printed_len; + spin_unlock_irqrestore(&printk_lock, flags); + return printed_len; +} +#endif + +#ifdef CONFIG_SW64_RRU +#include + +static DEFINE_SPINLOCK(printf_lock); +#define USER_PRINT_BUFF_BASE (0x600000UL + __START_KERNEL_map) +#define USER_PRINT_BUFF_LEN 0x100000UL +#define USER_MESSAGE_MAX_LEN 0x100000UL +unsigned long sw64_printf_offset; +int sw64_user_printf(const char __user *buf, int len) +{ + static char *user_printf_buf; + unsigned long flags; + + if (current->pid <= 0) + return 0; + + /* + * do not write large (fake) message which may not be from + * STDOUT/STDERR any more as file descriptor could be duplicated + * in a pipe. + */ + if (len > USER_MESSAGE_MAX_LEN) + return 0; + + spin_lock_irqsave(&printf_lock, flags); + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + + if (sw64_printf_offset == 0) + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + + if ((sw64_printf_offset + len) > USER_PRINT_BUFF_LEN) { + sw64_printf_offset = 0; + user_printf_buf = (char *)(USER_PRINT_BUFF_BASE + sw64_printf_offset); + memset(user_printf_buf, 0, USER_PRINT_BUFF_LEN); + } + copy_from_user(user_printf_buf, buf, len); + sw64_printf_offset += len; + spin_unlock_irqrestore(&printf_lock, flags); + return 0; +} +#endif diff --git a/arch/sw_64/kernel/early_init.c b/arch/sw_64/kernel/early_init.c new file mode 100644 index 0000000000000000000000000000000000000000..2ec7a3e994436034ec5d507858d9588d0f3ed6c0 --- /dev/null +++ b/arch/sw_64/kernel/early_init.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include + +asmlinkage __visible void __init sw64_start_kernel(void) +{ + fixup_hmcall(); + save_ktp(); + start_kernel(); +} diff --git a/arch/sw_64/kernel/early_printk.c b/arch/sw_64/kernel/early_printk.c new file mode 100644 index 0000000000000000000000000000000000000000..66af1165e89b266d1ea4ec5855da4baa1bfc67ff --- /dev/null +++ b/arch/sw_64/kernel/early_printk.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +static unsigned long early_serial_base; /* ttyS0 */ + +#define XMTRDY 0x20 + +#define DLAB 0x80 + +#define TXR 0 /* Transmit register (WRITE) */ +#define RXR 0 /* Receive register (READ) */ +#define IER 1 /* Interrupt Enable */ +#define IIR 2 /* Interrupt ID */ +#define FCR 2 /* FIFO control */ +#define LCR 3 /* Line control */ +#define MCR 4 /* Modem control */ +#define LSR 5 /* Line Status */ +#define MSR 6 /* Modem Status */ +#define DLL 0 /* Divisor Latch Low */ +#define DLH 1 /* Divisor latch High */ + +static void mem32_serial_out(unsigned long addr, int offset, int value) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + writel(value, vaddr + offset); +} + +static unsigned int mem32_serial_in(unsigned long addr, int offset) +{ + void __iomem *vaddr = (void __iomem *)addr; + + offset = offset << 9; + + return readl(vaddr + offset); +} + +static unsigned int (*serial_in)(unsigned long addr, int offset) = mem32_serial_in; +static void (*serial_out)(unsigned long addr, int offset, int value) = mem32_serial_out; + +static int early_serial_putc(unsigned char ch) +{ + unsigned int timeout = 0xffff; + + while ((serial_in(early_serial_base, LSR) & XMTRDY) == 0 && --timeout) + cpu_relax(); + serial_out(early_serial_base, TXR, ch); + + return timeout ? 0 : -1; +} + +static void early_serial_write(struct console *con, const char *s, unsigned int n) +{ + while (*s && n-- > 0) { + if (*s == '\n') + early_serial_putc('\r'); + early_serial_putc(*s); + s++; + } +} + +static unsigned int uart_get_refclk(void) +{ + return 24000000UL; +} + +static unsigned int uart_calculate_baudrate_divisor(unsigned long baudrate) +{ + unsigned int refclk = uart_get_refclk(); + + return (1 + (2 * refclk) / (baudrate * 16)) / 2; +} + +static __init void early_serial_hw_init(unsigned long baud) +{ + unsigned char c; + unsigned long divisor = uart_calculate_baudrate_divisor(baud); + + serial_out(early_serial_base, LCR, 0x3); /* 8n1 */ + serial_out(early_serial_base, IER, 0); /* no interrupt */ + serial_out(early_serial_base, FCR, 0); /* no fifo */ + serial_out(early_serial_base, MCR, 0x3); /* DTR + RTS */ + + c = serial_in(early_serial_base, LCR); + serial_out(early_serial_base, LCR, c | DLAB); + serial_out(early_serial_base, DLL, divisor & 0xff); + serial_out(early_serial_base, DLH, (divisor >> 8) & 0xff); + serial_out(early_serial_base, LCR, c & ~DLAB); +} + +#define DEFAULT_BAUD 115200 + +static __init void early_serial_init(char *s) +{ + unsigned long baud = DEFAULT_BAUD; + int err; + + if (*s == ',') + ++s; + + if (*s) { + unsigned int port; + static const long bases[] __initconst = { 0xfff0803300000000ULL, + 0xfff0903300000000ULL }; + + if (!strncmp(s, "ttyS", 4)) + s += 4; + err = kstrtouint(s, 10, &port); + if (err || port > 1) + port = 0; + early_serial_base = bases[port]; + s += strcspn(s, ","); + if (*s == ',') + s++; + } + + if (*s) { + err = kstrtoul(s, 0, &baud); + if (err || baud == 0) + baud = DEFAULT_BAUD; + } + + /* These will always be IO based ports */ + serial_in = mem32_serial_in; + serial_out = mem32_serial_out; + + /* Set up the HW */ + early_serial_hw_init(baud); +} + +static struct console early_serial_console = { + .name = "early", + .write = early_serial_write, + .flags = CON_PRINTBUFFER, + .index = -1, +}; + +static void early_console_register(struct console *con, int keep_early) +{ + if (con->index != -1) { + pr_crit("ERROR: earlyprintk= %s already used\n", + con->name); + return; + } + early_console = con; + + if (keep_early) + early_console->flags &= ~CON_BOOT; + else + early_console->flags |= CON_BOOT; + + register_console(early_console); +} + +static int __init setup_early_printk(char *buf) +{ + int keep; + + if (!buf) + return 0; + + if (early_console) + return 0; + + keep = (strstr(buf, "keep") != NULL); + + if (!strncmp(buf, "serial", 6)) { + buf += 6; + early_serial_init(buf); + early_console_register(&early_serial_console, keep); + if (!strncmp(buf, ",ttyS", 5)) + buf += 5; + } + + return 0; +} + +early_param("earlyprintk", setup_early_printk); diff --git a/arch/sw_64/kernel/entry-ftrace.S b/arch/sw_64/kernel/entry-ftrace.S new file mode 100644 index 0000000000000000000000000000000000000000..73e8e043fc9d14fbbaa50bb164fcc4326329001b --- /dev/null +++ b/arch/sw_64/kernel/entry-ftrace.S @@ -0,0 +1,326 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/entry-ftrace.S + * + * Author: linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include + + .text + .set noat + .align 4 + +#define FTRACE_SP_OFF 0x50 + .macro mcount_enter + subl $sp, FTRACE_SP_OFF, $sp + stl $16, 0($sp) + stl $17, 0x8($sp) + stl $18, 0x10($sp) + stl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + stl $9, 0x20($sp) +#endif + stl $28, 0x28($sp) + stl $29, 0x30($sp) + stl $19, 0x38($sp) + stl $20, 0x40($sp) + stl $21, 0x48($sp) + .endm + + .macro mcount_end + ldl $16, 0($sp) + ldl $17, 0x8($sp) + ldl $18, 0x10($sp) + ldl $26, 0x18($sp) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $9, 0x20($sp) +#endif + ldl $28, 0x28($sp) + ldl $29, 0x30($sp) + ldl $19, 0x38($sp) + ldl $20, 0x40($sp) + ldl $21, 0x48($sp) + addl $sp, FTRACE_SP_OFF, $sp + .endm + + .macro RESTORE_GRAPH_ARGS + ldi $16, 0x18($sp) /* &ra */ + bis $31, $9, $17 /* pc */ + #ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 /* fp */ + #endif + .endm + + .macro SAVE_PT_REGS + ldi $sp, -PT_REGS_SIZE($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + stl $28, PT_REGS_R28($sp) + stl $29, PT_REGS_GP($sp) + ldi $0, PT_REGS_SIZE($sp) + stl $0, PT_REGS_SP($sp) + .endm + + .macro RESTORE_PT_REGS + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $16, PT_REGS_R16($sp) + ldl $17, PT_REGS_R17($sp) + ldl $18, PT_REGS_R18($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldl $29, PT_REGS_GP($sp) + ldi $sp, PT_REGS_SIZE($sp) + .endm + + .macro RESTORE_GRAPH_REG_ARGS + ldi $16, PT_REGS_R26($sp) + bis $31, $9, $17 +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + bis $31, $15, $18 +#endif + .endm + + /* save return value regs*/ + .macro save_return_regs + subl $sp, 0x8, $sp + stl $0, 0x0($sp) + .endm + + /* restore return value regs*/ + .macro restore_return_regs + ldl $0, 0x0($sp) + addl $sp, 0x8, $sp + .endm + + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * void ftrace_graph_caller(void) + * + * Called from ftrace_caller() or ftrace_regs_caller() when function_graph + * tracer is selected. + * This function prepare_ftrace_return() fakes ra's value on the call + * stack in order to intercept instrumented function's return path and + * run return_to_handler() later on its exit. + */ + +ENTRY(ftrace_graph_caller) + ldgp $29, 0($27) + ldi $sp, -16($sp) + stl $26, 0($sp) + stl $15, 8($sp) + bis $31, $sp, $15 + + ldi $27, prepare_ftrace_return +ftrace_graph_call: + .global ftrace_graph_call + /* + * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite + * the nop below. + */ + nop /* nop, or call prepare_ftrace_return() */ + + ldl $26, 0($sp) + ldl $15, 8($sp) + ldi $sp, 16($sp) + ret $31, ($26), 1 +ENDPROC(ftrace_graph_caller) + +/* + * void return_to_handler(void) + * + * Run ftrace_return_to_handler() before going back to parent. + * @fp is checked against the value passed by ftrace_graph_caller() + * only when HAVE_FUNCTION_GRAPH_FP_TEST is enabled. + * + * It is run by "ret" instruction which does not modify $27, so it + * has to recaculate $27 before ldgp. + */ +ENTRY(return_to_handler) + br $27, 1f +1: ldgp $29, 0($27) + save_return_regs + bis $31, $15, $16 /* parent's fp */ + ldi $27, ftrace_return_to_handler + call $26, ($27) + bis $31, $0, $26 + restore_return_regs + ret $31, ($26), 1 +END(return_to_handler) + +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + .global _mcount + .ent _mcount +_mcount: + ret $31, ($28), 1 + .end _mcount + + + .global ftrace_caller + .ent ftrace_caller +ftrace_caller: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldl $18, function_trace_op + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* + * the graph tracer (specifically, prepare_ftrace_return) needs these + * arguments but for now the function tracer occupies the regs, so we + * save them in callee-saved regs to recover later. + */ + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_call +ftrace_call: /* tracer(pc, ra); */ + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_ARGS + call ftrace_graph_caller +#endif + mcount_end + ret $31, ($28), 1 + .end ftrace_caller +#else /* !CONFIG_DYNAMIC_FTRACE */ + + .global _mcount + .ent _mcount +_mcount: + mcount_enter + br $27, 1f +1: ldgp $29, 0($27) + + ldl $27, ftrace_trace_function // if (ftrace_trace_function + ldi $5, ftrace_stub // != ftrace_stub) + cmpeq $27, $5, $6 // + bne $6, skip_ftrace + + subl $28, MCOUNT_INSN_SIZE, $16 // function's pc +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + bis $26, $31, $17 // function's ra (parent's pc) + call $26, ($27) // (*ftrace_trace_function)(pc, ra); + +skip_ftrace: +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + ldl $4, ftrace_graph_return // if ((ftrace_graph_return + cmpeq $4, $5, $6 // != ftrace_stub) + beq $6, 2f + ldl $4, ftrace_graph_entry // || (ftrace_graph_entry + ldi $5, ftrace_graph_entry_stub // != ftrace_graph_entry_stub)) + cmpeq $4, $5, $6 + bne $6, 3f +2: RESTORE_GRAPH_ARGS + call ftrace_graph_caller // ftrace_graph_caller(); +#endif +3: mcount_end + ret $31, ($28), 1 + .end _mcount + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + .global ftrace_regs_caller + .ent ftrace_regs_caller +ftrace_regs_caller: + SAVE_PT_REGS + br $27, 1f +1: ldgp $29, 0($27) + + subl $28, MCOUNT_INSN_SIZE, $16 + bis $26, $31, $17 + ldi $4, function_trace_op + ldl $18, 0($4) + mov $sp, $19 + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + bis $31, $16, $9 +#endif + ldi $4, current_tracer + ldl $27, 0($4) + + .global ftrace_regs_call +ftrace_regs_call: + nop + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER + RESTORE_GRAPH_REG_ARGS + call ftrace_graph_caller +#endif + RESTORE_PT_REGS + ret $31, ($28), 1 + .end ftrace_regs_caller +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + + .global ftrace_stub + .ent ftrace_stub +ftrace_stub: + ret $31, ($26), 1 + .end ftrace_stub diff --git a/arch/sw_64/kernel/entry.S b/arch/sw_64/kernel/entry.S new file mode 100644 index 0000000000000000000000000000000000000000..59c2ff4eb91504efc802dd8994854947aea6315f --- /dev/null +++ b/arch/sw_64/kernel/entry.S @@ -0,0 +1,306 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel entry-points. + */ + +#include +#include +#include +#include +#include +#include + + .text + .set noat +/* + * This defines the normal kernel pt-regs layout. + * + * regs 9-15 preserved by C code, saving to pt_regs will make + * them easier to be accessed in an unified way. + * regs 16-18 saved by HMcode + * regs 29-30 saved and set up by HMcode + */ + + .macro SAVE_ALL + ldi $sp, -PT_REGS_HM_PS($sp) + stl $0, PT_REGS_R0($sp) + stl $1, PT_REGS_R1($sp) + stl $2, PT_REGS_R2($sp) + stl $3, PT_REGS_R3($sp) + stl $4, PT_REGS_R4($sp) + stl $28, PT_REGS_R28($sp) + stl $5, PT_REGS_R5($sp) + stl $6, PT_REGS_R6($sp) + stl $7, PT_REGS_R7($sp) + stl $8, PT_REGS_R8($sp) + stl $9, PT_REGS_R9($sp) + stl $10, PT_REGS_R10($sp) + stl $11, PT_REGS_R11($sp) + stl $12, PT_REGS_R12($sp) + stl $13, PT_REGS_R13($sp) + stl $14, PT_REGS_R14($sp) + stl $15, PT_REGS_R15($sp) + stl $19, PT_REGS_R19($sp) + stl $20, PT_REGS_R20($sp) + stl $21, PT_REGS_R21($sp) + stl $22, PT_REGS_R22($sp) + stl $23, PT_REGS_R23($sp) + stl $24, PT_REGS_R24($sp) + stl $25, PT_REGS_R25($sp) + stl $26, PT_REGS_R26($sp) + stl $27, PT_REGS_R27($sp) + ldl $1, PT_REGS_HM_R16($sp) + ldl $2, PT_REGS_HM_R17($sp) + ldl $3, PT_REGS_HM_R18($sp) + ldl $4, PT_REGS_HM_GP($sp) + ldl $5, PT_REGS_HM_PC($sp) + ldl $6, PT_REGS_HM_PS($sp) + stl $1, PT_REGS_R16($sp) + stl $2, PT_REGS_R17($sp) + stl $3, PT_REGS_R18($sp) + stl $4, PT_REGS_GP($sp) + stl $5, PT_REGS_PC($sp) + stl $6, PT_REGS_PS($sp) + and $6, 0x8, $7 + beq $7, 1f + sys_call HMC_rdusp + br 2f +1: ldi $0, PT_REGS_SIZE($sp) +2: stl $0, PT_REGS_SP($sp) + ldi $1, NO_SYSCALL + stl $1, PT_REGS_ORIG_R0($sp) + sys_call HMC_rdktp + .endm + + .macro RESTORE_ALL + ldl $16, PT_REGS_SP($sp) + /* skip wrusp if returning to kernel */ + blt $16, 1f + sys_call HMC_wrusp +1: ldl $1, PT_REGS_R16($sp) + ldl $2, PT_REGS_R17($sp) + ldl $3, PT_REGS_R18($sp) + ldl $4, PT_REGS_GP($sp) + ldl $5, PT_REGS_PC($sp) + ldl $6, PT_REGS_PS($sp) + stl $1, PT_REGS_HM_R16($sp) + stl $2, PT_REGS_HM_R17($sp) + stl $3, PT_REGS_HM_R18($sp) + stl $4, PT_REGS_HM_GP($sp) + stl $5, PT_REGS_HM_PC($sp) + stl $6, PT_REGS_HM_PS($sp) + ldl $0, PT_REGS_R0($sp) + ldl $1, PT_REGS_R1($sp) + ldl $2, PT_REGS_R2($sp) + ldl $3, PT_REGS_R3($sp) + ldl $4, PT_REGS_R4($sp) + ldl $5, PT_REGS_R5($sp) + ldl $6, PT_REGS_R6($sp) + ldl $7, PT_REGS_R7($sp) + ldl $8, PT_REGS_R8($sp) + ldl $9, PT_REGS_R9($sp) + ldl $10, PT_REGS_R10($sp) + ldl $11, PT_REGS_R11($sp) + ldl $12, PT_REGS_R12($sp) + ldl $13, PT_REGS_R13($sp) + ldl $14, PT_REGS_R14($sp) + ldl $15, PT_REGS_R15($sp) + ldl $19, PT_REGS_R19($sp) + ldl $20, PT_REGS_R20($sp) + ldl $21, PT_REGS_R21($sp) + ldl $22, PT_REGS_R22($sp) + ldl $23, PT_REGS_R23($sp) + ldl $24, PT_REGS_R24($sp) + ldl $25, PT_REGS_R25($sp) + ldl $26, PT_REGS_R26($sp) + ldl $27, PT_REGS_R27($sp) + ldl $28, PT_REGS_R28($sp) + ldi $sp, PT_REGS_HM_PS($sp) + .endm + +/* + * Non-syscall kernel entry points. + */ + + .align 4 + .globl entInt + .ent entInt +entInt: + SAVE_ALL + mov $sp, $19 + call $26, do_entInt + br ret_from_sys_call + .end entInt + + .align 4 + .globl entArith + .ent entArith +entArith: + SAVE_ALL + mov $sp, $18 + call $26, do_entArith + br ret_from_sys_call + .end entArith + + .align 4 + .globl entMM + .ent entMM +entMM: + SAVE_ALL + mov $sp, $19 + call $26, do_page_fault + br ret_from_sys_call + .end entMM + + .align 4 + .globl entIF + .ent entIF +entIF: + SAVE_ALL + mov $sp, $18 + call $26, do_entIF + br ret_from_sys_call + .end entIF + +/* + * Handle unalignment exception. + * We don't handle the "gp" register correctly, but if we fault on a + * gp-register unaligned load/store, something is _very_ wrong in the + * kernel anyway. + */ + .align 4 + .globl entUna + .ent entUna +entUna: + SAVE_ALL + mov $sp, $19 + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 /* user mode ? */ + beq $0, 1f + call $26, do_entUnaUser /* return to ret_from_syscall */ + br ret_from_sys_call +1: ldl $9, PT_REGS_GP($sp) + call $26, do_entUna + stl $9, PT_REGS_GP($sp) + RESTORE_ALL + sys_call HMC_rti + .end entUna + +/* + * The system call entry point is special. Most importantly, it looks + * like a function call to userspace as far as clobbered registers. We + * do preserve the argument registers (for syscall restarts) and $26 + * (for leaf syscall functions). + * + * So much for theory. We don't take advantage of this yet. + * + * Note that a0-a2 are not saved by HMcode as with the other entry points. + */ + + .align 4 + .globl entSys + .ent entSys +entSys: + SAVE_ALL + stl $16, PT_REGS_R16($sp) + stl $17, PT_REGS_R17($sp) + stl $18, PT_REGS_R18($sp) + mov $sp, $16 + call $26, do_entSys + br ret_from_sys_call + .end entSys + + .align 4 + .globl ret_from_sys_call + .ent ret_from_sys_call +ret_from_sys_call: +#ifdef CONFIG_SUBARCH_C3B + fillcs 0($sp) /* prefetch */ + fillcs 128($sp) /* prefetch */ +#endif + br $27, 1f +1: ldgp $29, 0($27) + /* Make sure need_resched and sigpending don't change between + sampling and the rti. */ + ldi $16, 7 + sys_call HMC_swpipl + ldl $0, PT_REGS_PS($sp) + and $0, 8, $0 + beq $0, restore_all +ret_to_user: + ldw $17, TI_FLAGS($8) + and $17, _TIF_WORK_MASK, $2 + beq $2, restore_all + mov $sp, $16 + call $26, do_notify_resume +restore_all: + RESTORE_ALL + sys_call HMC_rti + .end ret_from_sys_call + +/* + * Integer register context switch + * The callee-saved registers must be saved and restored. + * + * a0: previous task_struct (must be preserved across the switch) + * a1: next task_struct + * + * The value of a0 must be preserved by this function, as that's how + * arguments are passed to schedule_tail. + */ + .align 4 + .globl __switch_to + .ent __switch_to +__switch_to: + .prologue 0 + /* Save context into prev->thread */ + stl $26, TASK_THREAD_RA($16) + stl $30, TASK_THREAD_SP($16) + stl $9, TASK_THREAD_S0($16) + stl $10, TASK_THREAD_S1($16) + stl $11, TASK_THREAD_S2($16) + stl $12, TASK_THREAD_S3($16) + stl $13, TASK_THREAD_S4($16) + stl $14, TASK_THREAD_S5($16) + stl $15, TASK_THREAD_S6($16) + /* Restore context from next->thread */ + ldl $26, TASK_THREAD_RA($17) + ldl $30, TASK_THREAD_SP($17) + ldl $9, TASK_THREAD_S0($17) + ldl $10, TASK_THREAD_S1($17) + ldl $11, TASK_THREAD_S2($17) + ldl $12, TASK_THREAD_S3($17) + ldl $13, TASK_THREAD_S4($17) + ldl $14, TASK_THREAD_S5($17) + ldl $15, TASK_THREAD_S6($17) + mov $17, $8 + sys_call HMC_wrktp + mov $16, $0 + ret + .end __switch_to + +/* + * New processes begin life here. + */ + + .globl ret_from_fork + .align 4 + .ent ret_from_fork +ret_from_fork: + call $26, schedule_tail + br ret_from_sys_call + .end ret_from_fork + +/* + * ... and new kernel threads - here + */ + .align 4 + .globl ret_from_kernel_thread + .ent ret_from_kernel_thread +ret_from_kernel_thread: + call $26, schedule_tail + mov $9, $27 + mov $10, $16 + call $26, ($9) + br ret_to_user + .end ret_from_kernel_thread diff --git a/arch/sw_64/kernel/fpu.S b/arch/sw_64/kernel/fpu.S new file mode 100644 index 0000000000000000000000000000000000000000..ddc988681fdd01986c6d2ed068f6b37c75e811d3 --- /dev/null +++ b/arch/sw_64/kernel/fpu.S @@ -0,0 +1,111 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include +#include + + .text + .set noat +ENTRY(__fpstate_save) + /* a0: prev task */ +#ifdef CONFIG_SUBARCH_C4 + csrr $1, CSR_WR_FREGS + beq $1, out +#endif + vstd $f0, TASK_THREAD_F0(a0) + vstd $f1, TASK_THREAD_F1(a0) + vstd $f2, TASK_THREAD_F2(a0) + vstd $f3, TASK_THREAD_F3(a0) + vstd $f4, TASK_THREAD_F4(a0) + vstd $f5, TASK_THREAD_F5(a0) + vstd $f6, TASK_THREAD_F6(a0) + vstd $f7, TASK_THREAD_F7(a0) + vstd $f8, TASK_THREAD_F8(a0) + vstd $f9, TASK_THREAD_F9(a0) + vstd $f10, TASK_THREAD_F10(a0) + vstd $f11, TASK_THREAD_F11(a0) + vstd $f12, TASK_THREAD_F12(a0) + vstd $f13, TASK_THREAD_F13(a0) + vstd $f14, TASK_THREAD_F14(a0) + vstd $f15, TASK_THREAD_F15(a0) + vstd $f16, TASK_THREAD_F16(a0) + vstd $f17, TASK_THREAD_F17(a0) + vstd $f18, TASK_THREAD_F18(a0) + vstd $f19, TASK_THREAD_F19(a0) + vstd $f20, TASK_THREAD_F20(a0) + vstd $f21, TASK_THREAD_F21(a0) + vstd $f22, TASK_THREAD_F22(a0) + vstd $f23, TASK_THREAD_F23(a0) + vstd $f24, TASK_THREAD_F24(a0) + vstd $f25, TASK_THREAD_F25(a0) + vstd $f26, TASK_THREAD_F26(a0) + vstd $f27, TASK_THREAD_F27(a0) + rfpcr $f0 + vstd $f28, TASK_THREAD_F28(a0) + vstd $f29, TASK_THREAD_F29(a0) + vstd $f30, TASK_THREAD_F30(a0) + fstd $f0, TASK_THREAD_FPCR(a0) + vldd $f0, TASK_THREAD_F0(a0) +out: + ret +END(__fpstate_save) + +ENTRY(__fpstate_restore) + /* a0: next task */ + fldd $f0, TASK_THREAD_FPCR(a0) + wfpcr $f0 + fimovd $f0, t1 + and t1, 0x3, t1 + beq t1, $setfpec_0 + subl t1, 0x1, t1 + beq t1, $setfpec_1 + subl t1, 0x1, t1 + beq t1, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f0, TASK_THREAD_F0(a0) + vldd $f1, TASK_THREAD_F1(a0) + vldd $f2, TASK_THREAD_F2(a0) + vldd $f3, TASK_THREAD_F3(a0) + vldd $f4, TASK_THREAD_F4(a0) + vldd $f5, TASK_THREAD_F5(a0) + vldd $f6, TASK_THREAD_F6(a0) + vldd $f7, TASK_THREAD_F7(a0) + vldd $f8, TASK_THREAD_F8(a0) + vldd $f9, TASK_THREAD_F9(a0) + vldd $f10, TASK_THREAD_F10(a0) + vldd $f11, TASK_THREAD_F11(a0) + vldd $f12, TASK_THREAD_F12(a0) + vldd $f13, TASK_THREAD_F13(a0) + vldd $f14, TASK_THREAD_F14(a0) + vldd $f15, TASK_THREAD_F15(a0) + vldd $f16, TASK_THREAD_F16(a0) + vldd $f17, TASK_THREAD_F17(a0) + vldd $f18, TASK_THREAD_F18(a0) + vldd $f19, TASK_THREAD_F19(a0) + vldd $f20, TASK_THREAD_F20(a0) + vldd $f21, TASK_THREAD_F21(a0) + vldd $f22, TASK_THREAD_F22(a0) + vldd $f23, TASK_THREAD_F23(a0) + vldd $f24, TASK_THREAD_F24(a0) + vldd $f25, TASK_THREAD_F25(a0) + vldd $f26, TASK_THREAD_F26(a0) + vldd $f27, TASK_THREAD_F27(a0) + vldd $f28, TASK_THREAD_F28(a0) + vldd $f29, TASK_THREAD_F29(a0) + vldd $f30, TASK_THREAD_F30(a0) +#ifdef CONFIG_SUBARCH_C4 + csrw $31, CSR_WR_FREGS +#endif + ret +END(__fpstate_restore) diff --git a/arch/sw_64/kernel/ftrace.c b/arch/sw_64/kernel/ftrace.c new file mode 100644 index 0000000000000000000000000000000000000000..fb25ffe3dbdaf4f26bf4389e63d37fd1aaaa754b --- /dev/null +++ b/arch/sw_64/kernel/ftrace.c @@ -0,0 +1,176 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/ftrace.c + * + * Copyright (C) 2019 os kernel team + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include + +#include + +#ifdef CONFIG_FUNCTION_TRACER +EXPORT_SYMBOL(_mcount); +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE + +#define TI_FTRACE_ADDR (offsetof(struct thread_info, dyn_ftrace_addr)) +#define TI_FTRACE_REGS_ADDR \ + (offsetof(struct thread_info, dyn_ftrace_regs_addr)) + +unsigned long current_tracer = (unsigned long)ftrace_stub; + +/* + * Replace a single instruction, which may be a branch or NOP. + */ +static int ftrace_modify_code(unsigned long pc, u32 new) +{ + if (sw64_insn_write((void *)pc, new)) + return -EPERM; + return 0; +} + +/* + * Replace tracer function in ftrace_caller() + */ +int ftrace_update_ftrace_func(ftrace_func_t func) +{ + unsigned long pc; + u32 new; + int ret; + + current_tracer = (unsigned long)func; + pc = (unsigned long)&ftrace_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + + if (!ret) { + pc = (unsigned long)&ftrace_regs_call; + new = SW64_CALL(R26, R27, 0); + ret = ftrace_modify_code(pc, new); + } + + return ret; +} + +/* + * Turn on the call to ftrace_caller() in instrumented function + */ +int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) +{ + unsigned int insn[3]; + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned long offset; + + if (addr == FTRACE_ADDR) + offset = TI_FTRACE_ADDR; + else + offset = TI_FTRACE_REGS_ADDR; + + insn[0] = SW64_NOP; + /* ldl r28,(ftrace_addr_offset)(r8) */ + insn[1] = (0x23U << 26) | (28U << 21) | (8U << 16) | offset; + insn[2] = SW64_CALL(R28, R28, 0); + + /* replace the 3 mcount instructions at once */ + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +/* + * Turn off the call to ftrace_caller() in instrumented function + */ +int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, + unsigned long addr) +{ + unsigned long pc = rec->ip + MCOUNT_LDGP_SIZE; + unsigned int insn[3] = {SW64_NOP, SW64_NOP, SW64_NOP}; + + return copy_to_kernel_nofault((void *)pc, insn, 3 * SW64_INSN_SIZE); +} + +void arch_ftrace_update_code(int command) +{ + ftrace_modify_all_code(command); +} + +int __init ftrace_dyn_arch_init(void) +{ + struct thread_info *ti = task_thread_info(&init_task); + + ti->dyn_ftrace_addr = FTRACE_ADDR; + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS + ti->dyn_ftrace_regs_addr = FTRACE_REGS_ADDR; +#endif + return 0; +} +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return 0; +} +#endif + +#ifdef CONFIG_FUNCTION_GRAPH_TRACER +/* + * function_graph tracer expects ftrace_return_to_handler() to be called + * on the way back to parent. For this purpose, this function is called + * in _mcount() or ftrace_caller() to replace return address (*parent) on + * the call stack to return_to_handler. + * + * Note that @frame_pointer is used only for sanity check later. + */ +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer) +{ + unsigned long return_hooker = (unsigned long)&return_to_handler; + unsigned long old; + + if (unlikely(atomic_read(¤t->tracing_graph_pause))) + return; + + /* + * Note: + * No protection against faulting at *parent, which may be seen + * on other archs. It's unlikely on AArch64. + */ + old = *parent; + + if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) + *parent = return_hooker; +} + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * Turn on/off the call to ftrace_graph_caller() in ftrace_caller() + * depending on @enable. + */ +static int ftrace_modify_graph_caller(bool enable) +{ + unsigned long pc = (unsigned long)&ftrace_graph_call; + u32 new = SW64_NOP; + + if (enable) + new = SW64_CALL(R26, R27, 0); + return ftrace_modify_code(pc, new); +} + +int ftrace_enable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(true); +} + +int ftrace_disable_ftrace_graph_caller(void) +{ + return ftrace_modify_graph_caller(false); +} +#endif /* CONFIG_DYNAMIC_FTRACE */ +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/sw_64/kernel/head.S b/arch/sw_64/kernel/head.S new file mode 100644 index 0000000000000000000000000000000000000000..fd0fbfbcf5b6418b561d3d56265d567d36ed437b --- /dev/null +++ b/arch/sw_64/kernel/head.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * initial boot stuff.. At this point, the bootloader has already + * switched into HMcode, and loaded us at the correct address + * (START_ADDR). So there isn't much left for us to do: just set up + * the kernel global pointer and jump to the kernel entry-point. + */ + +#include +#include +#include +#include + +__HEAD + .globl _stext + .set noreorder + .globl __start + .ent __start +_stext: +__start: + .prologue 0 + br $27, 1f +1: ldgp $29, 0($27) + /* We need to get current_task_info loaded up... */ + ldi $8, init_task + ldl $30, TASK_STACK($8) + /* ... and find our stack ... */ + ldi $30, ASM_THREAD_SIZE($30) + + /* ... and then we can clear bss data. */ + ldi $16, __bss_start + ldi $18, __bss_stop + subl $18, $16, $18 + mov $31, $17 + call $26, __constant_c_memset +#ifdef CONFIG_RELOCATABLE + ldi $30, -8($30) + stl $29, 0($30) + /* Copy kernel and apply the relocations */ + call $26, relocate_kernel + ldl $29, 0($30) + addl $29, $0, $29 + addl $8, $0, $8 + ldi $30, 8($30) + /* Repoint the sp into the new kernel image */ + addl $30, $0, $30 +#endif + /* ... and then we can start the kernel. */ + call $26, sw64_start_kernel + sys_call HMC_halt + .end __start + +#ifdef CONFIG_SMP + .align 3 + .globl __smp_callin + .ent __smp_callin + /* On entry here the PCB of the idle task for this processor + * has been loaded. We've arranged for the tilde_pcb[x] for + * this process to contain the PCBB of the target idle task. + */ +__smp_callin: + .prologue 1 + br $27, 2f # we copy this from above "br $27 1f" +2: ldgp $29, 0($27) # First order of business, load the GP. + + bis $31, $31, $16 # invalidate all TLB with current VPN + sys_call HMC_tbi + +#if defined(CONFIG_SUBARCH_C3B) + sys_call HMC_whami # Get hard cid + ldi $1, __cpu_to_rcid + ldi $2, 0($31) + ldi $4, CONFIG_NR_CPUS +3: ldw $3, 0($1) + cmpeq $3, $0, $3 + bne $3, 4f + addl $1, 4, $1 + addl $2, 1, $2 + cmpeq $2, $4, $5 + bne $5, 5f + br $31, 3b +4: ldi $0, 0($2) +#else + rcid $0 +#endif + + ldi $2, idle_task_pointer + s8addl $0, $2, $2 + ldl $8, 0($2) # Get ksp of idle thread + sys_call HMC_wrktp + + ldl $30, TASK_STACK($8) + ldi $30, ASM_THREAD_SIZE($30) + + call $26, smp_callin +5: + sys_call HMC_halt + .end __smp_callin +#endif /* CONFIG_SMP */ + # + # It is handy, on occasion, to make halt actually just loop. + # Putting it here means we dont have to recompile the whole + # kernel. + # + + .align 3 + .globl halt + .ent halt +halt: + .prologue 0 + sys_call HMC_halt + .end halt diff --git a/arch/sw_64/kernel/hibernate.c b/arch/sw_64/kernel/hibernate.c new file mode 100644 index 0000000000000000000000000000000000000000..644ea85043136066c1129b059735d3feb7dc9f71 --- /dev/null +++ b/arch/sw_64/kernel/hibernate.c @@ -0,0 +1,79 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +struct processor_state hibernate_state; +/* Defined in hibernate_asm.S */ +extern int restore_image(void); + +void save_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + vcb->ksp = rdksp(); + vcb->usp = rdusp(); + vcb->soft_tid = rtid(); + vcb->ptbr = rdptbr(); +} + +void restore_processor_state(void) +{ + struct vcpucb *vcb = &(hibernate_state.vcb); + + wrksp(vcb->ksp); + wrusp(vcb->usp); + wrtp(vcb->soft_tid); + wrptbr(vcb->ptbr); + sflush(); + tbiv(); +} + +int swsusp_arch_resume(void) +{ + restore_image(); + return 0; +} +/* References to section boundaries */ +extern const void __nosave_begin, __nosave_end; +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn = PFN_DOWN(__pa(&__nosave_begin)); + unsigned long nosave_end_pfn = PFN_UP(__pa(&__nosave_end)); + + return (pfn >= nosave_begin_pfn) && (pfn < nosave_end_pfn); +} + +struct restore_data_record { + unsigned long magic; +}; + +#define RESTORE_MAGIC 0x0123456789ABCDEFUL + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->magic = RESTORE_MAGIC; + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + return (rdr->magic == RESTORE_MAGIC) ? 0 : -EINVAL; +} diff --git a/arch/sw_64/kernel/hibernate_asm.S b/arch/sw_64/kernel/hibernate_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..ff997cd76c5aef4bb9fa2eaaced2f57c21a0c631 --- /dev/null +++ b/arch/sw_64/kernel/hibernate_asm.S @@ -0,0 +1,122 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(swsusp_arch_suspend) + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + + stl $8, PSTATE_KTP($16) + stl sp, PSTATE_SP($16) + call swsusp_save + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + ldl $26, CALLEE_RA($1) + + /* save current_thread_info()->pcbb */ + ret +END(swsusp_arch_suspend) + +ENTRY(restore_image) + /* prepare to copy image data to their original locations */ + ldi t0, restore_pblist + ldl t0, 0(t0) +$loop: + beq t0, $done + + /* get addresses from the pbe and copy the page */ + ldl t1, PBE_ADDR(t0) /* source */ + ldl t2, PBE_ORIG_ADDR(t0) /* destination */ + ldi t3, PAGE_SIZE + addl t1, t3, t3 +$cpyloop: + ldl t8, 0(t1) + stl t8, 0(t2) + addl t1, 8, t1 + addl t2, 8, t2 + cmpeq t1, t3, t4 + beq t4, $cpyloop + + /* progress to the next pbe */ + ldl t0, PBE_NEXT(t0) + bne t0, $loop +$done: + + /* tell the hibernation core that we've just restored the memory */ + ldi $0, in_suspend + stl $31, 0($0) + + ldi $16, hibernate_state + ldi $1, PSTATE_REGS($16) + + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($16) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $hibernate_setfpec_0 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_1 + subl $2, 0x1, $2 + beq $2, $hibernate_setfpec_2 + setfpec3 + br $hibernate_setfpec_over +$hibernate_setfpec_0: + setfpec0 + br $hibernate_setfpec_over +$hibernate_setfpec_1: + setfpec1 + br $hibernate_setfpec_over +$hibernate_setfpec_2: + setfpec2 +$hibernate_setfpec_over: + ldi $1, PSTATE_FPREGS($16) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + + ldl sp, PSTATE_SP($16) + ldl $8, PSTATE_KTP($16) + sys_call HMC_wrktp + + ldi $0, 0($31) + + ret +END(restore_image) diff --git a/arch/sw_64/kernel/hmcall.c b/arch/sw_64/kernel/hmcall.c new file mode 100644 index 0000000000000000000000000000000000000000..d2054a930bd72f648c363bee4282ddddd0f36572 --- /dev/null +++ b/arch/sw_64/kernel/hmcall.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/hmcall.c + * + * Copyright (C) 2022 WXIAT + * Author: He Sheng + */ + +#include +#include + +#define A0(func) (((HMC_##func & 0xFF) >> 6) & 0x1) +#define A1(func) ((((HMC_##func & 0xFF)>>6) & 0x2) >> 1) +#define A2(func) ((HMC_##func & 0x3F) << 7) + +#define T(func) ((A0(func) ^ A1(func)) & 0x1) +#define B0(func) ((T(func) | A0(func)) << 13) +#define B1(func) (((~T(func) & 1) | A1(func)) << 14) + +#define PRI_BASE 0x10000UL + +#define HMCALL_ENTRY(func) (PRI_BASE | B1(func) | B0(func) | A2(func)) + + +static inline void fixup_rdtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdtp)); + + entry[0] = 0x181ffec7; /* pri_rcsr $0, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrtp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrtp)); + + entry[0] = 0x1a1fffc7; /* pri_wcsr $16, CSR__TID */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_tbiasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(tbisasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x1a3fff46; /* pri_wcsr r17, CSR__DTB_IS */ + entry[8] = 0x18ffff47; /* pri_wcsr p7, CSR__DTB_PCR */ + entry[9] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[10] = 0x189ffe22; /* pri_rcsr p4, CSR__UPCR_UPN */ + entry[11] = 0x18dfff22; /* pri_wcsr p6, CSR__UPCR_UPN */ + entry[12] = 0x1a3fff06; /* pri_wcsr r17, CSR__ITB_IS */ + entry[13] = 0x1bffff15; /* pri_wcsr r31, CSR__IC_FLUSH */ + entry[14] = 0x189fff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[15] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_wrasid(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrasid)); + + entry[0] = 0x18fffe47; /* pri_rcsr p7, CSR__DTB_PCR*/ + entry[1] = 0x4a05c905; /* sll r16, CSR__DTB_PCR__UPN__S, p5 */ + entry[2] = 0xf89f03ff; /* ldi p4, CSR__DTB_PCR__UPN__M */ + entry[3] = 0x4885c904; /* sll p4, CSR__DTB_PCR__UPN__S, p4 */ + entry[4] = 0x40e40724; /* bic p7, p4, p4 */ + entry[5] = 0x40850745; /* bis p4, p5, p5 */ + entry[6] = 0x18bfff47; /* pri_wcsr p5, CSR__DTB_PCR */ + entry[7] = 0x4a04e906; /* sll r16, CSR__UPCR_UPN__UPN__S, p6 */ + entry[8] = 0x18dfff22; /* pri_wcsr p4, CSR__UPCR_UPN */ + entry[9] = 0x1ef00000; /* pri_ret/b p23 */ +} + +static inline void fixup_rdktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdktp)); + + entry[0] = 0x95161000; /* pri_ldl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrktp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrktp)); + + entry[0] = 0xb5161000; /* pri_stl/p $8, VC__KTP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_rdusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(rdusp)); + + entry[0] = 0x94161018; /* pri_ldl/p $0, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +static inline void fixup_wrusp(void) +{ + unsigned int *entry = __va(HMCALL_ENTRY(wrusp)); + + entry[0] = 0xb6161018; /* pri_stl/p $16, VC__USP(vcpucb) */ + entry[1] = 0x1ee00000; /* pri_ret $23 */ +} + +void __init fixup_hmcall(void) +{ +#if defined(CONFIG_SUBARCH_C3B) + fixup_rdtp(); + fixup_wrtp(); + fixup_tbiasid(); + fixup_wrasid(); + fixup_rdktp(); + fixup_wrktp(); + fixup_rdusp(); + fixup_wrusp(); + imemb(); +#endif +} + +#undef A0 +#undef A1 +#undef A2 +#undef T +#undef B0 +#undef B1 diff --git a/arch/sw_64/kernel/idle.c b/arch/sw_64/kernel/idle.c new file mode 100644 index 0000000000000000000000000000000000000000..d26bdc405b53a946b8974ca821239490b5b9b848 --- /dev/null +++ b/arch/sw_64/kernel/idle.c @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 idle loop support. + * + */ +#include +#include +#include +#include +#include + +void arch_cpu_idle(void) +{ + local_irq_enable(); + cpu_relax(); + + if (is_in_guest()) { + if (!need_resched()) + hcall(HCALL_HALT, 0, 0, 0); + } else { + asm( + ".globl __idle_start\n" + "__idle_start = .\n" + "ldw $1, %0($8)\n" + "srl $1, %1, $1\n" + "blbs $1, $need_resched\n" + "halt\n" + ".globl __idle_end\n" + "__idle_end = .\n" + "$need_resched:" + :: "i"(TI_FLAGS), "i"(TIF_NEED_RESCHED) + : "$1"); + } + local_irq_disable(); +} diff --git a/arch/sw_64/kernel/insn.c b/arch/sw_64/kernel/insn.c new file mode 100644 index 0000000000000000000000000000000000000000..281578e1bfc03b708be124e0e3d28644d811b512 --- /dev/null +++ b/arch/sw_64/kernel/insn.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ +#include +#include + +//static DEFINE_RAW_SPINLOCK(patch_lock); + +int __kprobes sw64_insn_read(void *addr, u32 *insnp) +{ + int ret; + __le32 val; + + ret = copy_from_kernel_nofault(&val, addr, SW64_INSN_SIZE); + if (!ret) + *insnp = le32_to_cpu(val); + + return ret; +} + +static int __kprobes __sw64_insn_write(void *addr, __le32 insn) +{ + void *waddr = addr; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +static int __kprobes __sw64_insn_double_write(void *addr, __le64 insn) +{ + void *waddr = addr; + //unsigned long flags = 0; + int ret; + + //raw_spin_lock_irqsave(&patch_lock, flags); + + ret = copy_to_kernel_nofault(waddr, &insn, 2 * SW64_INSN_SIZE); + + //raw_spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + +int __kprobes sw64_insn_write(void *addr, u32 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_write(addr, cpu_to_le32(insn)); +} + +int __kprobes sw64_insn_double_write(void *addr, u64 insn) +{ + u32 *tp = addr; + /* SW64 instructions must be word aligned */ + if ((uintptr_t)tp & 0x3) + return -EINVAL; + return __sw64_insn_double_write(addr, cpu_to_le64(insn)); +} +unsigned int __kprobes sw64_insn_nop(void) +{ + return SW64_BIS(R31, R31, R31); +} + +unsigned int __kprobes sw64_insn_call(unsigned int ra, unsigned int rb) +{ + return SW64_CALL(ra, rb, 0); +} + +unsigned int __kprobes sw64_insn_sys_call(unsigned int num) +{ + return SW64_SYS_CALL(num); +} + +/* 'pc' is the address of br instruction, not the +4 PC. 'new_pc' is the target address. */ +unsigned int __kprobes sw64_insn_br(unsigned int ra, unsigned long pc, unsigned long new_pc) +{ + int offset = new_pc - pc; + unsigned int disp, minus = 0x1fffff; + + if (!(offset <= BR_MAX_DISP && offset >= -BR_MAX_DISP)) + return -1; + if (offset > 0) + disp = (offset - 4) / 4; + else + disp = ~(-offset / 4) & minus; + + return SW64_BR(ra, disp); + +} diff --git a/arch/sw_64/kernel/irq.c b/arch/sw_64/kernel/irq.c new file mode 100644 index 0000000000000000000000000000000000000000..126fe2f70495e10c9cc313dc2cdecb0e6b65516d --- /dev/null +++ b/arch/sw_64/kernel/irq.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/irq.c + * + * Copyright (C) 1995 Linus Torvalds + * + * This file contains the code used by various IRQ handling routines: + * asking for different IRQ's should be done through these routines + * instead of just grabbing them. Thus setups with different IRQ numbers + * shouldn't result in any weird surprises, and installing new handlers + * should be easier. + */ + +#include +#include +#include +#include + +volatile unsigned long irq_err_count; +DEFINE_PER_CPU(unsigned long, irq_pmi_count); +DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); +EXPORT_PER_CPU_SYMBOL(irq_stat); + +void ack_bad_irq(unsigned int irq) +{ + irq_err_count++; + pr_crit("Unexpected IRQ trap at vector %u\n", irq); +} + +u64 arch_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; + + return sum; +} + +u64 arch_irq_stat(void) +{ + return 0; +} + +int arch_show_interrupts(struct seq_file *p, int prec) +{ + int j; + + seq_printf(p, "%*s: ", prec, "TIMER"); + for_each_online_cpu(j) + seq_printf(p, "%10u", per_cpu(irq_stat, j).timer_irqs_event); + seq_puts(p, "\n"); + +#ifdef CONFIG_SMP + seq_printf(p, "%*s: ", prec, "IPI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", cpu_data[j].ipi_count); + seq_puts(p, "\n"); +#endif + seq_printf(p, "%*s: ", prec, "PMI"); + for_each_online_cpu(j) + seq_printf(p, "%10lu ", per_cpu(irq_pmi_count, j)); + seq_puts(p, "\n"); + + seq_printf(p, "ERR: %10lu\n", irq_err_count); + return 0; +} + +/* + * handle_irq handles all normal device IRQ's (the special + * SMP cross-CPU interrupts have their own specific + * handlers). + */ + +#define MAX_ILLEGAL_IRQS 16 + +void +handle_irq(int irq) +{ + /* + * We ack quickly, we don't want the irq controller + * thinking we're snobs just because some other CPU has + * disabled global interrupts (we have already done the + * INT_ACK cycles, it's too late to try to pretend to the + * controller that we aren't taking the interrupt). + * + * 0 return value means that this irq is already being + * handled by some other CPU. (or is disabled) + */ + static unsigned int illegal_count; + struct irq_desc *desc = irq_to_desc(irq); + + if (!desc || ((unsigned int) irq > ACTUAL_NR_IRQS && + illegal_count < MAX_ILLEGAL_IRQS)) { + irq_err_count++; + illegal_count++; + pr_crit("device_interrupt: invalid interrupt %d\n", irq); + return; + } + + irq_enter(); + generic_handle_irq_desc(desc); + irq_exit(); +} + +#ifdef CONFIG_HOTPLUG_CPU +void fixup_irqs(void) +{ + irq_migrate_all_off_this_cpu(); +} +#endif diff --git a/arch/sw_64/kernel/irq_sw64.c b/arch/sw_64/kernel/irq_sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..989d55ee1b1b83afd6aad586d0d29b28e48f7284 --- /dev/null +++ b/arch/sw_64/kernel/irq_sw64.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 specific irq code. + */ + +#include +#include + +#include +#include + +void __init +init_IRQ(void) +{ + /* + * Just in case the platform init_irq() causes interrupts/mchecks + * (as is the case with RAWHIDE, at least). + */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + + wrent(entInt, 0); + + sw64_init_irq(); + irqchip_init(); +} + +DEFINE_SPINLOCK(irq_lock); + +static void +__enable_irq(struct irq_data *d) +{ +} + +static void +__disable_irq(struct irq_data *d) +{ +} + +static unsigned int +__startup_irq(struct irq_data *d) +{ + __enable_irq(d); + return 0; +} + +static void +__mask_and_ack_irq(struct irq_data *d) +{ + spin_lock(&irq_lock); + __disable_irq(d); + spin_unlock(&irq_lock); +} + +struct irq_chip sw64_irq_chip = { + .name = "SW64_NODE", + .irq_startup = __startup_irq, + .irq_unmask = __enable_irq, + .irq_mask = __disable_irq, + .irq_mask_ack = __mask_and_ack_irq, +}; + +void __weak arch_init_msi_domain(struct irq_domain *parent) {} + +int __init arch_early_irq_init(void) +{ + int i; + + for (i = 0; i < NR_IRQS; ++i) { + irq_set_chip_and_handler(i, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(i, IRQ_LEVEL); + } + arch_init_msi_domain(NULL); + return 0; +} + +int __init arch_probe_nr_irqs(void) +{ + return NR_IRQS_LEGACY; +} diff --git a/arch/sw_64/kernel/jump_label.c b/arch/sw_64/kernel/jump_label.c new file mode 100644 index 0000000000000000000000000000000000000000..f3bc40370e4de9b77889343338b509d6bdcad8c6 --- /dev/null +++ b/arch/sw_64/kernel/jump_label.c @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include + +void arch_jump_label_transform(struct jump_entry *entry, + enum jump_label_type type) +{ + u32 *insnp = (u32 *)entry->code; + u32 insn; + + if (type == JUMP_LABEL_JMP) { + insn = sw64_insn_br(R31, (entry->code), entry->target); + BUG_ON(insn == -1); + } else { + insn = sw64_insn_nop(); + } + + *insnp = insn; + + flush_icache_range(entry->code, entry->code + SW64_INSN_SIZE); +} + +void arch_jump_label_transform_static(struct jump_entry *entry, + enum jump_label_type type) +{ + /* + * no need to rewrite NOP + */ +} diff --git a/arch/sw_64/kernel/kgdb.c b/arch/sw_64/kernel/kgdb.c new file mode 100644 index 0000000000000000000000000000000000000000..833f72a1577ca8f2d2f01113c0443739a9a9c025 --- /dev/null +++ b/arch/sw_64/kernel/kgdb.c @@ -0,0 +1,233 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sw64 KGDB support + * + * Based on arch/arm64/kernel/kgdb.c + * + * Copyright (C) Xia Bin + * Author: Xia Bin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include + +struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { + { "r0", 8, offsetof(struct pt_regs, regs[0])}, + { "r1", 8, offsetof(struct pt_regs, regs[1])}, + { "r2", 8, offsetof(struct pt_regs, regs[2])}, + { "r3", 8, offsetof(struct pt_regs, regs[3])}, + { "r4", 8, offsetof(struct pt_regs, regs[4])}, + { "r5", 8, offsetof(struct pt_regs, regs[5])}, + { "r6", 8, offsetof(struct pt_regs, regs[6])}, + { "r7", 8, offsetof(struct pt_regs, regs[7])}, + { "r8", 8, offsetof(struct pt_regs, regs[8])}, + + { "r9", 8, offsetof(struct pt_regs, regs[9])}, + { "r10", 8, offsetof(struct pt_regs, regs[10])}, + { "r11", 8, offsetof(struct pt_regs, regs[11])}, + { "r12", 8, offsetof(struct pt_regs, regs[12])}, + { "r13", 8, offsetof(struct pt_regs, regs[13])}, + { "r14", 8, offsetof(struct pt_regs, regs[14])}, + { "r15", 8, offsetof(struct pt_regs, regs[15])}, + + { "r16", 8, offsetof(struct pt_regs, regs[16])}, + { "r17", 8, offsetof(struct pt_regs, regs[17])}, + { "r18", 8, offsetof(struct pt_regs, regs[18])}, + + { "r19", 8, offsetof(struct pt_regs, regs[19])}, + { "r20", 8, offsetof(struct pt_regs, regs[20])}, + { "r21", 8, offsetof(struct pt_regs, regs[21])}, + { "r22", 8, offsetof(struct pt_regs, regs[22])}, + { "r23", 8, offsetof(struct pt_regs, regs[23])}, + { "r24", 8, offsetof(struct pt_regs, regs[24])}, + { "r25", 8, offsetof(struct pt_regs, regs[25])}, + { "r26", 8, offsetof(struct pt_regs, regs[26])}, + { "r27", 8, offsetof(struct pt_regs, regs[27])}, + { "at", 8, offsetof(struct pt_regs, regs[28])}, + { "gp", 8, offsetof(struct pt_regs, regs[29])}, + { "sp", 8, offsetof(struct pt_regs, regs[30])}, + { "zero", 8, -1 }, + + { "f0", 8, -1 }, + { "f1", 8, -1 }, + { "f2", 8, -1 }, + { "f3", 8, -1 }, + { "f4", 8, -1 }, + { "f5", 8, -1 }, + { "f6", 8, -1 }, + { "f7", 8, -1 }, + { "f8", 8, -1 }, + { "f9", 8, -1 }, + { "f10", 8, -1 }, + { "f11", 8, -1 }, + { "f12", 8, -1 }, + { "f13", 8, -1 }, + { "f14", 8, -1 }, + { "f15", 8, -1 }, + { "f16", 8, -1 }, + { "f17", 8, -1 }, + { "f18", 8, -1 }, + { "f19", 8, -1 }, + { "f20", 8, -1 }, + { "f21", 8, -1 }, + { "f22", 8, -1 }, + { "f23", 8, -1 }, + { "f24", 8, -1 }, + { "f25", 8, -1 }, + { "f26", 8, -1 }, + { "f27", 8, -1 }, + { "f28", 8, -1 }, + { "f29", 8, -1 }, + { "f30", 8, -1 }, + { "fpcr", 8, -1 }, + + { "pc", 8, offsetof(struct pt_regs, pc)}, + { "", 8, -1 }, + { "tp", 8, -1}, +}; + +char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return NULL; + + if (dbg_reg_def[regno].offset != -1) + memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, + dbg_reg_def[regno].size); + else + memset(mem, 0, dbg_reg_def[regno].size); + return dbg_reg_def[regno].name; +} + +int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) +{ + if (regno >= DBG_MAX_REG_NUM || regno < 0) + return -EINVAL; + + if (dbg_reg_def[regno].offset != -1) + memcpy((void *)regs + dbg_reg_def[regno].offset, mem, + dbg_reg_def[regno].size); + return 0; +} + +void +sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) +{ + int i; + /* Initialize to zero */ + memset((char *)gdb_regs, 0, NUMREGBYTES); + for (i = 0; i < DBG_MAX_REG_NUM; i++) + gdb_regs[i] = get_reg(task, i); +} + +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + pr_info("BEFORE SET PC WITH %lx\n", pc); + instruction_pointer(regs) = pc; + pr_info("AFTER SET PC IS %lx\n", instruction_pointer(regs)); +} + +void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), NULL); +} + +void kgdb_roundup_cpus(void) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + +int kgdb_arch_handle_exception(int exception_vector, int signo, + int err_code, char *remcom_in_buffer, + char *remcom_out_buffer, + struct pt_regs *linux_regs) +{ + char *ptr; + unsigned long address = -1; + + switch (remcom_in_buffer[0]) { + case 'c': + ptr = &remcom_in_buffer[1]; + if (kgdb_hex2long(&ptr, &address)) + kgdb_arch_set_pc(linux_regs, address); + return 0; + } + return -1; +} + +static int __kgdb_notify(struct die_args *args, unsigned long cmd) +{ + struct pt_regs *regs = args->regs; + + /* Userspace events, ignore. */ + if (user_mode(regs)) + return NOTIFY_DONE; + + if (kgdb_handle_exception(1, args->signr, cmd, regs)) + return NOTIFY_DONE; + + return NOTIFY_STOP; +} + +static int +kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) +{ + unsigned long flags; + int ret; + + local_irq_save(flags); + ret = __kgdb_notify(ptr, cmd); + local_irq_restore(flags); + + return ret; +} + +static struct notifier_block kgdb_notifier = { + .notifier_call = kgdb_notify, +}; + +/* + * kgdb_arch_init - Perform any architecture specific initalization. + * This function will handle the initalization of any architecture + * specific callbacks. + */ +int kgdb_arch_init(void) +{ + int ret = register_die_notifier(&kgdb_notifier); + + if (ret != 0) + return ret; + return 0; +} + +/* + * kgdb_arch_exit - Perform any architecture specific uninitalization. + * This function will handle the uninitalization of any architecture + * specific callbacks, for dynamic registration and unregistration. + */ +void kgdb_arch_exit(void) +{ + unregister_die_notifier(&kgdb_notifier); +} + +/* + * sw64 instructions are always in LE. + * Break instruction is encoded in LE format + */ +const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x80, 00, 00, 00} +}; diff --git a/arch/sw_64/kernel/kprobes/Makefile b/arch/sw_64/kernel/kprobes/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..110ba2bf7752361442022553269447ceb802d465 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o +obj-$(CONFIG_KPROBES_ON_FTRACE) += kprobes-ftrace.o diff --git a/arch/sw_64/kernel/kprobes/common.h b/arch/sw_64/kernel/kprobes/common.h new file mode 100644 index 0000000000000000000000000000000000000000..de10058f0376ea342c973e0e03a8ef1bd9faa72c --- /dev/null +++ b/arch/sw_64/kernel/kprobes/common.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_KPROBES_COMMON_H +#define _SW64_KERNEL_KPROBES_COMMON_H + + +extern bool sw64_insn_can_kprobe(kprobe_opcode_t *addr); + + +#endif /* _SW64_KERNEL_KPROBES_COMMON_H */ diff --git a/arch/sw_64/kernel/kprobes/decode-insn.c b/arch/sw_64/kernel/kprobes/decode-insn.c new file mode 100644 index 0000000000000000000000000000000000000000..91c31111f2b73273d186d6b0c1cb9961e12dd68a --- /dev/null +++ b/arch/sw_64/kernel/kprobes/decode-insn.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm64/kernel/probes/decode-insn.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#include + +#include "common.h" + +static bool __kprobes sw64_insn_is_steppable(u32 insn) +{ + /* + * Branch instructions will write a new value into the PC which is + * likely to be relative to the XOL address and therefore invalid. + * Deliberate generation of an exception during stepping is also not + * currently safe. Lastly, MSR instructions can do any number of nasty + * things we can't handle during single-stepping. + */ + if (sw64_insn_is_sys_call_b(insn) || + sw64_insn_is_sys_call(insn) || + sw64_insn_is_call(insn) || + sw64_insn_is_ret(insn) || + sw64_insn_is_jmp(insn) || + sw64_insn_is_br(insn) || + sw64_insn_is_bsr(insn) || + sw64_insn_is_memb(insn) || + sw64_insn_is_imemb(insn) || + sw64_insn_is_rtc(insn) || + sw64_insn_is_lldl(insn) || + sw64_insn_is_lldw(insn) || + sw64_insn_is_beq(insn) || + sw64_insn_is_bne(insn) || + sw64_insn_is_blt(insn) || + sw64_insn_is_ble(insn) || + sw64_insn_is_bgt(insn) || + sw64_insn_is_bge(insn) || + sw64_insn_is_blbc(insn) || + sw64_insn_is_blbs(insn) || + sw64_insn_is_fbeq(insn) || + sw64_insn_is_fbne(insn) || + sw64_insn_is_fblt(insn) || + sw64_insn_is_fble(insn) || + sw64_insn_is_fbgt(insn) || + sw64_insn_is_fbge(insn)) + return false; + + return true; +} + + +#ifdef CONFIG_KPROBES +// lldl rd_f +static bool __kprobes is_probed_between_atomic(kprobe_opcode_t *addr) +{ + int count = 0; + unsigned long size = 0, offset = 0; + kprobe_opcode_t *scan_start = NULL; + + if (kallsyms_lookup_size_offset((unsigned long)addr, &size, &offset)) + scan_start = addr - (offset / sizeof(kprobe_opcode_t)); + + while (scan_start < addr) { + if (sw64_insn_is_lldl(le32_to_cpu(*scan_start)) || + sw64_insn_is_lldw(le32_to_cpu(*scan_start))) + count++; + if (sw64_insn_is_rd_f(le32_to_cpu(*scan_start))) + count--; + scan_start++; + } + if (count) + return false; + + return true; +} + +bool __kprobes sw64_insn_can_kprobe(kprobe_opcode_t *addr) +{ + u32 insn = le32_to_cpu(*addr); + + if (!sw64_insn_is_steppable(insn)) { + pr_warn("addr is not steppable\n"); + return false; + } +#ifdef CONFIG_SUBARCH_C3B + if (!is_probed_between_atomic(addr)) { + pr_warn("addr between atomic can't probe\n"); + return false; + } +#endif + return true; +} +#endif diff --git a/arch/sw_64/kernel/kprobes/kprobes-ftrace.c b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c new file mode 100644 index 0000000000000000000000000000000000000000..a0b33a52a9e4101aba6ca5e173265e90018f6ddf --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes-ftrace.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Dynamic Ftrace based Kprobes Optimization + */ + +#include +#include +#include +#include +#include + +/* Ftrace callback handler for kprobes */ +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct ftrace_regs *fregs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + struct pt_regs *regs; + int bit; + + bit = ftrace_test_recursion_trylock(ip, parent_ip); + if (bit < 0) + return; + + regs = ftrace_get_regs(fregs); + preempt_disable_notrace(); + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto out; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + regs->regs[28] -= MCOUNT_INSN_SIZE; + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) { + regs->regs[28] += MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + } + __this_cpu_write(current_kprobe, NULL); + } +out: + preempt_enable_notrace(); + ftrace_test_recursion_unlock(bit); +} +NOKPROBE_SYMBOL(kprobe_ftrace_handler); + +int arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} diff --git a/arch/sw_64/kernel/kprobes/kprobes.c b/arch/sw_64/kernel/kprobes/kprobes.c new file mode 100644 index 0000000000000000000000000000000000000000..024ce7d99e61688b7b95c5120e9432a030c65735 --- /dev/null +++ b/arch/sw_64/kernel/kprobes/kprobes.c @@ -0,0 +1,309 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel Probes (KProbes) + * arch/sw_64/kernel/kprobes.c + */ + +#include +#include +#include + +#include "common.h" + +static u32 breakpoint_insn = BREAK_KPROBE; +static u32 breakpoint2_insn = BREAK_KPROBE_SS; + +int post_kprobe_handler(struct pt_regs *regs); + +DEFINE_PER_CPU(struct kprobe *, current_kprobe); +DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); + +int __kprobes arch_prepare_kprobe(struct kprobe *p) +{ + int ret = 0; + extern char __start_rodata[]; + extern char __end_rodata[]; + unsigned long probe_addr = (unsigned long)p->addr; + + if (probe_addr & 0x3) + return -EINVAL; + + if (!sw64_insn_can_kprobe(p->addr)) + return -EINVAL; + /* copy instruction */ + p->opcode = le32_to_cpu(*p->addr); + + + if (probe_addr >= (unsigned long) __start_rodata && + probe_addr <= (unsigned long) __end_rodata) + return -EINVAL; + + + /* insn: must be on special executable page on mips. */ + p->ainsn.insn = get_insn_slot(); + if (!p->ainsn.insn) { + ret = -ENOMEM; + goto out; + } + /* + * In the kprobe->ainsn.insn[] array we store the original + * instruction at index zero and a break trap instruction at + * index one. + */ + p->ainsn.insn[0] = p->opcode; + p->ainsn.insn[1] = breakpoint2_insn; +out: + return ret; +} + +void __kprobes arch_arm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, breakpoint_insn); + flush_insn_slot(p); +} + +void __kprobes arch_disarm_kprobe(struct kprobe *p) +{ + sw64_insn_write(p->addr, p->opcode); + flush_insn_slot(p); +} + +void __kprobes arch_remove_kprobe(struct kprobe *p) +{ + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } +} + +static void save_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + kcb->prev_kprobe.kp = kprobe_running(); + kcb->prev_kprobe.status = kcb->kprobe_status; +} + +static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) +{ + __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); + kcb->kprobe_status = kcb->prev_kprobe.status; +} + +static void __kprobes set_current_kprobe(struct kprobe *p) +{ + __this_cpu_write(current_kprobe, p); +} + + +static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) +{ + if (reenter) { + save_previous_kprobe(kcb); + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_REENTER; + } else { + kcb->kprobe_status = KPROBE_HIT_SS; + } + + /* insn simulation */ + kcb->target_pc = regs->pc; + regs->pc = (unsigned long)&p->ainsn.insn[0]; +} + +static int __kprobes reenter_kprobe(struct kprobe *p, + struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + switch (kcb->kprobe_status) { + case KPROBE_HIT_SSDONE: + case KPROBE_HIT_ACTIVE: + kprobes_inc_nmissed_count(p); + setup_singlestep(p, regs, kcb, 1); + break; + case KPROBE_HIT_SS: + case KPROBE_REENTER: + pr_warn("Unrecoverable kprobe detected.\n"); + dump_kprobe(p); + BUG(); + break; + default: + WARN_ON(1); + return 0; + } + return 1; +} + +int __kprobes kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long addr = instruction_pointer(regs); + + if (user_mode(regs)) + return 0; + /* + * We don't want to be preempted for the entire + * duration of kprobe processing + */ + preempt_disable(); + kcb = get_kprobe_ctlblk(); + p = get_kprobe((kprobe_opcode_t *)(addr - 4)); + + if (p) { + if (kprobe_running()) { + if (reenter_kprobe(p, regs, kcb)) + return 1; + } else { + set_current_kprobe(p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + + /* + * If we have no pre-handler or it returned 0, we + * continue with normal processing. If we have a + * pre-handler and it returned non-zero, that means + * user handler setup registers to exit to another + * instruction, we must skip the single stepping. + */ + if (!p->pre_handler || !p->pre_handler(p, regs)) + setup_singlestep(p, regs, kcb, 0); + else + reset_current_kprobe(); + return 1; + } + } + return 0; + +} +int __kprobes post_kprobe_handler(struct pt_regs *regs) +{ + struct kprobe *cur = kprobe_running(); + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (!cur) + return 0; + + if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + cur->post_handler(cur, regs, 0); + } + + // resume_execution(cur, regs, kcb); + regs->pc = kcb->target_pc; + + + /* Restore back the original saved kprobes variables and continue. */ + if (kcb->kprobe_status == KPROBE_REENTER) { + restore_previous_kprobe(kcb); + goto out; + } + reset_current_kprobe(); +out: + preempt_enable_no_resched(); + + return 1; +} + +int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long mmcsr) +{ + struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); + + if (kcb->kprobe_status & KPROBE_HIT_SS) { + regs->pc = kcb->target_pc; + + reset_current_kprobe(); + preempt_enable_no_resched(); + } + return 0; +} + +/* + * Wrapper routine for handling exceptions. + */ +int __kprobes kprobe_exceptions_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + + struct die_args *args = (struct die_args *)data; + int ret = NOTIFY_DONE; + + switch (val) { + case DIE_BREAK: + if (kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + case DIE_SSTEPBP: + if (post_kprobe_handler(args->regs)) + ret = NOTIFY_STOP; + break; + default: + break; + } + return ret; +} +/* + * Function return probe trampoline: + * - init_kprobes() establishes a probepoint here + * - When the probed function returns, this probe causes the + * handlers to fire + */ +static void __used kretprobe_trampoline_holder(void) +{ + asm volatile( + /* Keep the assembler from reordering and placing JR here. */ + ".set noreorder\n\t" + "nop\n\t" + ".global __kretprobe_trampoline\n" + "__kretprobe_trampoline:\n\t" + "nop\n\t" + : : : "memory"); +} + +void __kretprobe_trampoline(void); + +void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, + struct pt_regs *regs) +{ + ri->ret_addr = (kprobe_opcode_t *) regs->regs[26]; + ri->fp = NULL; + + /* Replace the return addr with trampoline addr */ + regs->regs[26] = (unsigned long)__kretprobe_trampoline; +} + +/* + * Called when the probe at kretprobe trampoline is hit + */ +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) +{ + unsigned long orig_ret_address; + + orig_ret_address = __kretprobe_trampoline_handler(regs, NULL); + instruction_pointer(regs) = orig_ret_address; + regs->regs[26] = orig_ret_address; + + /* + * By returning a non-zero value, we are telling + * kprobe_handler() that we don't want the post_handler + * to run (and have re-enabled preemption) + */ + return 1; +} + +int __kprobes arch_trampoline_kprobe(struct kprobe *p) +{ + if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline) + return 1; + + return 0; +} + +static struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *)__kretprobe_trampoline, + .pre_handler = trampoline_probe_handler +}; + +int __init arch_init_kprobes(void) +{ + return register_kprobe(&trampoline_p); +} diff --git a/arch/sw_64/kernel/machine_kexec.c b/arch/sw_64/kernel/machine_kexec.c new file mode 100644 index 0000000000000000000000000000000000000000..950998476cdaced4b7368cb4712a1d7081e11047 --- /dev/null +++ b/arch/sw_64/kernel/machine_kexec.c @@ -0,0 +1,209 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * machine_kexec.c for kexec + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ +#include +#include +#include +#include +#include + +#include + +extern void *kexec_control_page; +extern const unsigned char relocate_new_kernel[]; +extern const size_t relocate_new_kernel_size; + +extern unsigned long kexec_start_address; +extern unsigned long kexec_indirection_page; + +static atomic_t waiting_for_crash_ipi; + +#ifdef CONFIG_SMP +extern struct smp_rcb_struct *smp_rcb; + +/* + * Wait for relocation code is prepared and send + * secondary CPUs to spin until kernel is relocated. + */ +static void kexec_smp_down(void *ignored) +{ + int cpu = smp_processor_id(); + + local_irq_disable(); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + set_cpu_online(cpu, false); + reset_cpu(cpu); +} +#endif + +int machine_kexec_prepare(struct kimage *kimage) +{ + return 0; +} + +void machine_kexec_cleanup(struct kimage *kimage) +{ +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + smp_call_function(kexec_smp_down, NULL, 0); + smp_wmb(); + while (num_online_cpus() > 1) { + cpu_relax(); + mdelay(1); + } +#endif +} + +#ifdef CONFIG_SMP +static void machine_crash_nonpanic_core(void *unused) +{ + int cpu; + struct pt_regs regs; + + cpu = smp_processor_id(); + + local_irq_disable(); + crash_setup_regs(®s, NULL); + pr_debug("CPU %u will stop doing anything useful since another CPU has crashed\n", cpu); + crash_save_cpu(®s, cpu); + flush_cache_all(); + + set_cpu_online(cpu, false); + atomic_dec(&waiting_for_crash_ipi); + while (READ_ONCE(smp_rcb->ready) != 0) + mdelay(1); + if (cpu != 0) + reset_cpu(cpu); + else + machine_kexec(kexec_crash_image); +} +#else +static inline void machine_crash_nonpanic_core(void *unused) { } +#endif + +static void machine_kexec_mask_interrupts(void) +{ + unsigned int i; + struct irq_desc *desc; + + for_each_irq_desc(i, desc) { + struct irq_chip *chip; + + chip = irq_desc_get_chip(desc); + if (!chip) + continue; + + if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) + chip->irq_eoi(&desc->irq_data); + + if (chip->irq_mask) + chip->irq_mask(&desc->irq_data); + + if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) + chip->irq_disable(&desc->irq_data); + } +} + +void machine_crash_shutdown(struct pt_regs *regs) +{ + int cpu; + unsigned long msecs; + + cpu = smp_processor_id(); + local_irq_disable(); + kernel_restart_prepare(NULL); + atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); + smp_call_function(machine_crash_nonpanic_core, NULL, false); + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); + msecs--; + } + if (atomic_read(&waiting_for_crash_ipi) > 0) + pr_warn("Non-crashing CPUs did not react to IPI\n"); + + crash_save_cpu(regs, cpu); + machine_kexec_mask_interrupts(); + pr_info("Loading crashdump kernel...\n"); +#ifdef CONFIG_SMP + WRITE_ONCE(smp_rcb->ready, 0); + if (cpu != 0) + reset_cpu(cpu); +#endif +} + +#define phys_to_ktext(pa) (__START_KERNEL_map + (pa)) + +typedef void (*noretfun_t)(void) __noreturn; + +void machine_kexec(struct kimage *image) +{ + void *reboot_code_buffer; + unsigned long entry; + unsigned long *ptr; + struct boot_params *params = sunway_boot_params; + + + reboot_code_buffer = kexec_control_page; + pr_info("reboot_code_buffer = %px\n", reboot_code_buffer); + kexec_start_address = phys_to_ktext(image->start); + pr_info("kexec_start_address = %#lx\n", kexec_start_address); + if (image->type == KEXEC_TYPE_DEFAULT) + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + else + kexec_indirection_page = (unsigned long)&image->head; + + pr_info("kexec_indirection_page = %#lx, image->head=%#lx\n", + kexec_indirection_page, image->head); + + params->cmdline = kexec_start_address - COMMAND_LINE_OFF; + params->initrd_start = *(__u64 *)(kexec_start_address - INITRD_START_OFF); + params->initrd_size = *(__u64 *)(kexec_start_address - INITRD_SIZE_OFF); + + pr_info("initrd_start = %#llx, initrd_size = %#llx\n" + "dtb_start = %#llx, efi_systab = %#llx\n" + "efi_memmap = %#llx, efi_memmap_size = %#llx\n" + "efi_memdesc_size = %#llx, efi_memdesc_version = %#llx\n" + "cmdline = %#llx\n", + params->initrd_start, params->initrd_size, + params->dtb_start, params->efi_systab, + params->efi_memmap, params->efi_memmap_size, + params->efi_memdesc_size, params->efi_memdesc_version, + params->cmdline); + + memcpy(reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); + + /* + * The generic kexec code builds a page list with physical + * addresses. they are directly accessible through KSEG0 (or + * CKSEG0 or XPHYS if on 64bit system), hence the + * phys_to_virt() call. + */ + for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); + ptr = (entry & IND_INDIRECTION) ? + phys_to_virt(entry & PAGE_MASK) : ptr + 1) { + if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || + *ptr & IND_DESTINATION) + *ptr = (unsigned long) phys_to_virt(*ptr); + } + + /* + * we do not want to be bothered. + */ + local_irq_disable(); + + pr_info("Will call new kernel at %08lx\n", image->start); + pr_info("Bye ...\n"); + smp_wmb(); + ((noretfun_t) reboot_code_buffer)(); +} diff --git a/arch/sw_64/kernel/match.c b/arch/sw_64/kernel/match.c new file mode 100644 index 0000000000000000000000000000000000000000..3926391270daa6d3e03ce7e34dd9b04eba111a95 --- /dev/null +++ b/arch/sw_64/kernel/match.c @@ -0,0 +1,551 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + + +char da_match_buf[1024], dv_match_buf[1024], dav_match_buf[1024]; +char ia_match_buf[1024], iv_match_buf[1024], ida_match_buf[1024]; + +unsigned long da_match_cf1, da_match_cf2, da_match_cf3; +unsigned long dv_match_cf1, dv_match_cf2, dv_match_cf3; +unsigned long dav_match_cf1, dav_match_cf2, dav_match_cf3, + dav_match_cf4, dav_match_cf5; +unsigned long ia_match_cf1, ia_match_cf2, ia_match_cf3, ia_match_cf4; +unsigned long iv_match_cf1, iv_match_cf2; +unsigned long ida_match_cf1, ida_match_cf2; + +static int da_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", da_match_buf); + return 0; +} + +static int dv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dv_match_buf); + return 0; +} + +static int dav_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", dav_match_buf); + return 0; +} + +static int ia_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ia_match_buf); + return 0; +} + +static int iv_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", iv_match_buf); + return 0; +} + +static int ida_match_show(struct seq_file *m, void *v) +{ + + seq_printf(m, "%s", ida_match_buf); + return 0; +} + +static int da_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, da_match_show, NULL); +} + +static int dv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dv_match_show, NULL); +} + +static int dav_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, dav_match_show, NULL); +} + +static int ia_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ia_match_show, NULL); +} + +static int iv_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, iv_match_show, NULL); +} + +static int ida_match_open(struct inode *inode, struct file *file) +{ + return single_open(file, ida_match_show, NULL); +} + +static void +write_da_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(da_match_cf1, CSR_DA_MATCH); + write_csr(da_match_cf2, CSR_DA_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= da_match_cf3; + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dv_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dv_match_cf1, CSR_DV_MATCH); + write_csr(dv_match_cf2, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | dv_match_cf3); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_dav_match(void *i) +{ + unsigned long dc_ctl; + + write_csr(dav_match_cf1, CSR_DA_MATCH); + write_csr(dav_match_cf2, CSR_DA_MASK); + write_csr(dav_match_cf3, CSR_DV_MATCH); + write_csr(dav_match_cf4, CSR_DV_MASK); + dc_ctl = read_csr(CSR_DC_CTLP); + dc_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) + | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH)); + dc_ctl |= ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S) + | dav_match_cf5); + write_csr(dc_ctl, CSR_DC_CTLP); +} + +static void +write_ia_match(void *i) +{ + ia_match_cf1 |= (0x1UL << IA_MATCH_EN_S); + write_csr_imb(ia_match_cf1, CSR_IA_MATCH); + write_csr_imb(ia_match_cf2, CSR_IA_MASK); + write_csr(((0x3ffUL << 18) | ia_match_cf3), CSR_IA_VPNMATCH); + write_csr(((0x3ffUL << 18) | ia_match_cf4), CSR_IA_UPNMATCH); +} + +static void +write_iv_match(void *i) +{ + unsigned long ia_match_tmp; + + ia_match_tmp = read_csr(CSR_IA_MATCH); + ia_match_tmp &= ~(0x1UL << IV_PM_EN_S); + ia_match_tmp |= ((((iv_match_cf2 >> IV_PM_EN_S) & 0x1) << IV_PM_EN_S) + | (iv_match_cf2 & 0x3) | (0x1UL << IV_MATCH_EN_S)); + write_csr_imb(iv_match_cf1, CSR_IV_MATCH); + write_csr_imb(ia_match_tmp, CSR_IA_MATCH); +} + +static void +write_ida_match(void *i) +{ + + ida_match_cf1 |= (0x1UL << IDA_MATCH_EN_S); + write_csr(ida_match_cf1, CSR_IDA_MATCH); + write_csr(ida_match_cf2, CSR_IDA_MASK); +} + +static ssize_t da_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(da_match_buf) - 1, len); + if (copy_from_user(da_match_buf, user_buf, size)) + return -EFAULT; + + da_match_buf[size] = '\0'; + strcpy(tmp, da_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &da_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &da_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &da_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_da_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(dv_match_buf) - 1, len); + if (copy_from_user(dv_match_buf, user_buf, size)) + return -EFAULT; + + dv_match_buf[size] = '\0'; + strcpy(tmp, dv_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dv_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dv_match_cf3); + if (err) + return err; + + if (on_each_cpu(write_dv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static ssize_t dav_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[500]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[500]; + int err; + char *ret = NULL; + + size = min(sizeof(dav_match_buf) - 1, len); + if (copy_from_user(dav_match_buf, user_buf, size)) + return -EFAULT; + + dav_match_buf[size] = '\0'; + strcpy(tmp, dav_match_buf); + p = tmp; + + for (i = 0 ; i < 5; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[500] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &dav_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &dav_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &dav_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &dav_match_cf4); + if (err) + return err; + + err = kstrtoul(&tmp1[400], 0, &dav_match_cf5); + if (err) + return err; + + + if (on_each_cpu(write_dav_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t ia_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ia_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ia_match_cf2); + if (err) + return err; + + err = kstrtoul(&tmp1[200], 0, &ia_match_cf3); + if (err) + return err; + + err = kstrtoul(&tmp1[300], 0, &ia_match_cf4); + if (err) + return err; + + if (on_each_cpu(write_ia_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + +static ssize_t iv_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ia_match_buf) - 1, len); + if (copy_from_user(ia_match_buf, user_buf, size)) + return -EFAULT; + + ia_match_buf[size] = '\0'; + strcpy(tmp, ia_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &iv_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &iv_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_iv_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + return len; +} + + +static ssize_t ida_match_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + char tmp[400]; + char *p; + int i, m; + const char *sep = " "; + char tmp1[400]; + int err; + char *ret = NULL; + + size = min(sizeof(ida_match_buf) - 1, len); + if (copy_from_user(ida_match_buf, user_buf, size)) + return -EFAULT; + + ida_match_buf[size] = '\0'; + strcpy(tmp, ida_match_buf); + p = tmp; + + for (i = 0 ; i < 4; i++) { + m = i*100; + ret = strsep(&p, sep); + if (ret != NULL) + strcpy(&tmp1[m], ret); + } + tmp1[400] = '\0'; + + err = kstrtoul(&tmp1[0], 0, &ida_match_cf1); + if (err) + return err; + + err = kstrtoul(&tmp1[100], 0, &ida_match_cf2); + if (err) + return err; + + if (on_each_cpu(write_ida_match, NULL, 1)) + pr_crit("%s: timed out\n", __func__); + + return len; +} + +static const struct file_operations set_da_match_fops = { + .open = da_match_open, + .read = seq_read, + .write = da_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dv_match_fops = { + .open = dv_match_open, + .read = seq_read, + .write = dv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_dav_match_fops = { + .open = dav_match_open, + .read = seq_read, + .write = dav_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_ia_match_fops = { + .open = ia_match_open, + .read = seq_read, + .write = ia_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations set_iv_match_fops = { + .open = iv_match_open, + .read = seq_read, + .write = iv_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + + +static const struct file_operations set_ida_match_fops = { + .open = ida_match_open, + .read = seq_read, + .write = ida_match_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init match_debugfs_init(void) +{ + struct dentry *match_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + match_entry = debugfs_create_file("da_match", 0600, + sw64_debugfs_dir, NULL, + &set_da_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dv_match", 0600, + sw64_debugfs_dir, NULL, + &set_dv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("dav_match", 0600, + sw64_debugfs_dir, NULL, + &set_dav_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ia_match", 0600, + sw64_debugfs_dir, NULL, + &set_ia_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("iv_match", 0600, + sw64_debugfs_dir, NULL, + &set_iv_match_fops); + if (!match_entry) + return -ENOMEM; + + match_entry = debugfs_create_file("ida_match", 0600, + sw64_debugfs_dir, NULL, + &set_ida_match_fops); + if (!match_entry) + return -ENOMEM; + + return 0; +} +late_initcall(match_debugfs_init); diff --git a/arch/sw_64/kernel/module.c b/arch/sw_64/kernel/module.c new file mode 100644 index 0000000000000000000000000000000000000000..67264e3644a75341b898b9419f2b89b7119c5981 --- /dev/null +++ b/arch/sw_64/kernel/module.c @@ -0,0 +1,279 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#define DEBUGP(fmt...) + +/* Allocate the GOT at the end of the core sections. */ + +struct got_entry { + struct got_entry *next; + Elf64_Sxword r_addend; + int got_offset; +}; + +static inline void +process_reloc_for_got(Elf64_Rela *rela, + struct got_entry *chains, Elf64_Xword *poffset) +{ + unsigned long r_sym = ELF64_R_SYM(rela->r_info); + unsigned long r_type = ELF64_R_TYPE(rela->r_info); + Elf64_Sxword r_addend = rela->r_addend; + struct got_entry *g; + + if (r_type != R_SW64_LITERAL) + return; + + for (g = chains + r_sym; g ; g = g->next) + if (g->r_addend == r_addend) { + if (g->got_offset == 0) { + g->got_offset = *poffset; + *poffset += 8; + } + goto found_entry; + } + + g = kmalloc(sizeof(*g), GFP_KERNEL); + g->next = chains[r_sym].next; + g->r_addend = r_addend; + g->got_offset = *poffset; + *poffset += 8; + chains[r_sym].next = g; + + found_entry: + /* + * Trick: most of the ELF64_R_TYPE field is unused. There are + * 42 valid relocation types, and a 32-bit field. Co-opt the + * bits above 256 to store the got offset for this reloc. + */ + rela->r_info |= g->got_offset << 8; +} + +int +module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, + char *secstrings, struct module *me) +{ + struct got_entry *chains; + Elf64_Rela *rela; + Elf64_Shdr *esechdrs, *symtab, *s, *got; + unsigned long nsyms, nrela, i; + + esechdrs = sechdrs + hdr->e_shnum; + symtab = got = NULL; + + /* Find out how large the symbol table is. Allocate one got_entry + * head per symbol. Normally this will be enough, but not always. + * We'll chain different offsets for the symbol down each head. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_SYMTAB) + symtab = s; + else if (!strcmp(".got", secstrings + s->sh_name)) { + got = s; + me->arch.gotsecindex = s - sechdrs; + } + + if (!symtab) { + pr_err("module %s: no symbol table\n", me->name); + return -ENOEXEC; + } + if (!got) { + pr_err("module %s: no got section\n", me->name); + return -ENOEXEC; + } + + nsyms = symtab->sh_size / sizeof(Elf64_Sym); + chains = kcalloc(nsyms, sizeof(struct got_entry), GFP_KERNEL); + if (!chains) { + pr_err("module %s: no memory for symbol chain buffer\n", + me->name); + return -ENOMEM; + } + + got->sh_size = 0; + got->sh_addralign = 8; + got->sh_type = SHT_NOBITS; + + /* Examine all LITERAL relocations to find out what GOT entries + * are required. This sizes the GOT section as well. + */ + for (s = sechdrs; s < esechdrs; ++s) + if (s->sh_type == SHT_RELA) { + nrela = s->sh_size / sizeof(Elf64_Rela); + rela = (void *)hdr + s->sh_offset; + for (i = 0; i < nrela; ++i) + process_reloc_for_got(rela+i, chains, + &got->sh_size); + } + + /* Free the memory we allocated. */ + for (i = 0; i < nsyms; ++i) { + struct got_entry *g, *n; + + for (g = chains[i].next; g ; g = n) { + n = g->next; + kfree(g); + } + } + kfree(chains); + + return 0; +} + +int +apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab, + unsigned int symindex, unsigned int relsec, + struct module *me) +{ + Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr; + unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela); + Elf64_Sym *symtab, *sym; + void *base, *location; + unsigned long got, gp; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + + base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr; + symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr; + + /* The small sections were sorted to the end of the segment. + * The following should definitely cover them. + */ + got = sechdrs[me->arch.gotsecindex].sh_addr; + gp = got + 0x8000; + + for (i = 0; i < n; i++) { + unsigned long r_sym = ELF64_R_SYM(rela[i].r_info); + unsigned long r_type = ELF64_R_TYPE(rela[i].r_info); + unsigned long r_got_offset = r_type >> 8; + unsigned long value, hi, lo; + + r_type &= 0xff; + + /* This is where to make the change. */ + location = base + rela[i].r_offset; + + /* This is the symbol it is referring to. Note that all + * unresolved symbols have been resolved. + */ + sym = symtab + r_sym; + value = sym->st_value + rela[i].r_addend; + + switch (r_type) { + case R_SW64_NONE: + break; + case R_SW64_REFLONG: + *(u32 *)location = value; + break; + case R_SW64_REFQUAD: + /* BUG() can produce misaligned relocations. */ + ((u32 *)location)[0] = value; + ((u32 *)location)[1] = value >> 32; + break; + case R_SW64_GPREL32: + value -= gp; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_LITERAL: + hi = got + r_got_offset; + lo = hi - gp; + if ((short)lo != lo) { + unsigned long over_offset = (lo + 0x8000) >> 16; + + if ((over_offset & 0x8000) == 0) { + *(u16 *)(location - 0x4) = over_offset; + *(u16 *)location = lo - ((over_offset << 16) + gp); + *(u64 *)hi = value; + } else { + goto reloc_overflow; + } + } else { + *(u16 *)location = lo; + *(u64 *)hi = value; + } + break; + case R_SW64_LITERAL_GOT: + /* empty for now need to fill */ + break; + case R_SW64_LITUSE: + break; + case R_SW64_GPDISP: + value = gp - (u64)location; + lo = (short)value; + hi = (int)(value - lo); + if (hi + lo != value) + goto reloc_overflow; + *(u16 *)location = hi >> 16; + *(u16 *)(location + rela[i].r_addend) = lo; + break; + case R_SW64_BRSGP: + /* + * BRSGP is only allowed to bind to local symbols. + * If the section is undef, this means that the + * value was resolved from somewhere else. + */ + if (sym->st_shndx == SHN_UNDEF) + goto reloc_overflow; + if ((sym->st_other & STO_SW64_STD_GPLOAD) == + STO_SW64_STD_GPLOAD) + /* Omit the prologue. */ + value += 8; + fallthrough; + case R_SW64_BRADDR: + value -= (u64)location + 4; + if (value & 3) + goto reloc_overflow; + value = (long)value >> 2; + if (value + (1<<21) >= 1<<22) + goto reloc_overflow; + value &= 0x1fffff; + value |= *(u32 *)location & ~0x1fffff; + *(u32 *)location = value; + break; + case R_SW64_HINT: + break; + case R_SW64_SREL32: + value -= (u64)location; + if ((int)value != value) + goto reloc_overflow; + *(u32 *)location = value; + break; + case R_SW64_SREL64: + value -= (u64)location; + *(u64 *)location = value; + break; + case R_SW64_GPRELHIGH: + value = (long)(value - gp + 0x8000) >> 16; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + case R_SW64_GPRELLOW: + value -= gp; + *(u16 *)location = value; + break; + case R_SW64_GPREL16: + value -= gp; + if ((short) value != value) + goto reloc_overflow; + *(u16 *)location = value; + break; + default: + pr_err("module %s: Unknown relocation: %lu\n", me->name, r_type); + return -ENOEXEC; +reloc_overflow: + if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) + pr_err("module %s: Relocation (type %lu) overflow vs section %d\n", + me->name, r_type, sym->st_shndx); + else + pr_err("module %s: Relocation (type %lu) overflow vs %s\n", + me->name, r_type, strtab + sym->st_name); + return -ENOEXEC; + } + } + + return 0; +} diff --git a/arch/sw_64/kernel/pci-noop.c b/arch/sw_64/kernel/pci-noop.c new file mode 100644 index 0000000000000000000000000000000000000000..abfba92fa6a9c388542859b69884b208a424c9c3 --- /dev/null +++ b/arch/sw_64/kernel/pci-noop.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/pci-noop.c + * + * Stub PCI interfaces for NO PCI kernels. + */ + +#include +#include +#include +#include + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +asmlinkage long +sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_read(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +asmlinkage long +sys_pciconfig_write(unsigned long bus, unsigned long dfn, + unsigned long off, unsigned long len, void *buf) +{ + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + else + return -ENODEV; +} + +static void *sw64_noop_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + void *ret; + + if (!dev || *dev->dma_mask >= 0xffffffffUL) + gfp &= ~GFP_DMA; + ret = (void *)__get_free_pages(gfp, get_order(size)); + if (ret) { + memset(ret, 0, size); + *dma_handle = virt_to_phys(ret); + } + return ret; +} + +static void sw64_noop_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_addr, + unsigned long attrs) +{ + free_pages((unsigned long)cpu_addr, get_order(size)); +} + +static dma_addr_t sw64_noop_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + return page_to_pa(page) + offset; +} + +static int sw64_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents, + enum dma_data_direction dir, unsigned long attrs) +{ + int i; + struct scatterlist *sg; + + for_each_sg(sgl, sg, nents, i) { + void *va; + + BUG_ON(!sg_page(sg)); + va = sg_virt(sg); + sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va); + sg_dma_len(sg) = sg->length; + } + + return nents; +} + +static int sw64_noop_supported(struct device *dev, u64 mask) +{ + return mask < 0x00ffffffUL ? 0 : 1; +} + +const struct dma_map_ops sw64_noop_ops = { + .alloc = sw64_noop_alloc_coherent, + .free = sw64_noop_free_coherent, + .map_page = sw64_noop_map_page, + .map_sg = sw64_noop_map_sg, + .dma_supported = sw64_noop_supported, +}; + +const struct dma_map_ops *dma_ops = &sw64_noop_ops; +EXPORT_SYMBOL(dma_ops); + +void __init common_init_pci(void) +{ +} + +void __init sw64_init_arch(void) { } +void __init sw64_init_irq(void) { } diff --git a/arch/sw_64/kernel/perf_event.c b/arch/sw_64/kernel/perf_event.c new file mode 100644 index 0000000000000000000000000000000000000000..83bb051be9de4767779d8783b31c8eda4277caa2 --- /dev/null +++ b/arch/sw_64/kernel/perf_event.c @@ -0,0 +1,787 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for SW64 platforms. + * + * This code is based upon riscv and sparc perf event code. + */ + +#include +#include + +/* For tracking PMCs and the hw events they monitor on each CPU. */ +struct cpu_hw_events { + /* + * Set the bit (indexed by the counter number) when the counter + * is used for an event. + */ + unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; +}; + +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +struct sw64_perf_event { + /* pmu index */ + int counter; + /* events selector */ + int event; +}; + +/* + * A structure to hold the description of the PMCs available on a particular + * type of SW64 CPU. + */ +struct sw64_pmu_t { + /* generic hw/cache events table */ + const struct sw64_perf_event *hw_events; + const struct sw64_perf_event (*cache_events)[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + + /* method used to map hw/cache events */ + const struct sw64_perf_event *(*map_hw_event)(u64 config); + const struct sw64_perf_event *(*map_cache_event)(u64 config); + + /* The number of entries in the hw_event_map */ + int max_events; + + /* The number of counters on this pmu */ + int num_pmcs; + + /* + * All PMC counters reside in the IBOX register PCTR. This is the + * LSB of the counter. + */ + int pmc_count_shift[MAX_HWEVENTS]; + + /* + * The mask that isolates the PMC bits when the LSB of the counter + * is shifted to bit 0. + */ + unsigned long pmc_count_mask; + + /* The maximum period the PMC can count. */ + unsigned long pmc_max_period; + + /* + * The maximum value that may be written to the counter due to + * hardware restrictions is pmc_max_period - pmc_left. + */ + long pmc_left; + + /* Subroutine for checking validity of a raw event for this PMU. */ + bool (*raw_event_valid)(u64 config); +}; + +/* + * The SW64 PMU description currently in operation. This is set during + * the boot process to the specific CPU of the machine. + */ +static const struct sw64_pmu_t *sw64_pmu; + +/* + * SW64 PMC event types + * + * There is no one-to-one mapping of the possible hw event types to the + * actual codes that are used to program the PMCs hence we introduce our + * own hw event type identifiers. + */ +#define SW64_OP_UNSUP {-1, -1} + +/* Mapping of the hw event types to the perf tool interface */ +static const struct sw64_perf_event core3_hw_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = {PMC_PC0, PC0_CPU_CYCLES}, + [PERF_COUNT_HW_INSTRUCTIONS] = {PMC_PC0, PC0_INSTRUCTIONS}, + [PERF_COUNT_HW_CACHE_REFERENCES] = {PMC_PC0, PC0_SCACHE_REFERENCES}, + [PERF_COUNT_HW_CACHE_MISSES] = {PMC_PC1, PC1_SCACHE_MISSES}, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = {PMC_PC0, PC0_BRANCH_INSTRUCTIONS}, + [PERF_COUNT_HW_BRANCH_MISSES] = {PMC_PC1, PC1_BRANCH_MISSES}, +}; + +/* Mapping of the hw cache event types to the perf tool interface */ +#define C(x) PERF_COUNT_HW_CACHE_##x +static const struct sw64_perf_event core3_cache_event_map + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DCACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DCACHE_MISSES} + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ICACHE_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ICACHE_READ_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_DTB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_DTB_SINGLE_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = {PMC_PC0, PC0_ITB_READ}, + [C(RESULT_MISS)] = {PMC_PC1, PC1_ITB_MISSES}, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = SW64_OP_UNSUP, + [C(RESULT_MISS)] = SW64_OP_UNSUP, + }, + }, + +}; + +static const struct sw64_perf_event *core3_map_hw_event(u64 config) +{ + return &sw64_pmu->hw_events[config]; +} + +static const struct sw64_perf_event *core3_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result; + const struct sw64_perf_event *perf_event; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return ERR_PTR(-EINVAL); + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return ERR_PTR(-EINVAL); + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return ERR_PTR(-EINVAL); + + perf_event = &((*sw64_pmu->cache_events)[cache_type][cache_op][cache_result]); + if (perf_event->counter == -1) /* SW64_OP_UNSUP */ + return ERR_PTR(-ENOENT); + + return perf_event; +} + +/* + * r0xx for counter0, r1yy for counter1. + * According to the datasheet, 00 <= xx <= 0F, 00 <= yy <= 3D + */ +static bool core3_raw_event_valid(u64 config) +{ + if ((config >= PC0_RAW_BASE && config <= (PC0_RAW_BASE + PC0_MAX)) || + (config >= PC1_RAW_BASE && config <= (PC1_RAW_BASE + PC1_MAX))) + return true; + + pr_info("sw64 pmu: invalid raw event config %#llx\n", config); + return false; +} + +static const struct sw64_pmu_t core3_pmu = { + .max_events = ARRAY_SIZE(core3_hw_event_map), + .hw_events = core3_hw_event_map, + .map_hw_event = core3_map_hw_event, + .cache_events = &core3_cache_event_map, + .map_cache_event = core3_map_cache_event, + .num_pmcs = MAX_HWEVENTS, + .pmc_count_mask = PMC_COUNT_MASK, + .pmc_max_period = PMC_COUNT_MASK, + .pmc_left = 4, + .raw_event_valid = core3_raw_event_valid, +}; + +/* + * Low-level functions: reading/writing counters + */ +static void sw64_write_pmc(int idx, unsigned long val) +{ + wrperfmon(PMC_CMD_WRITE_BASE + idx, val); +} + +static unsigned long sw64_read_pmc(int idx) +{ + return wrperfmon(PMC_CMD_READ, idx); +} + +/* Set a new period to sample over */ +static int sw64_perf_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + long left = local64_read(&hwc->period_left); + long period = hwc->sample_period; + int overflow = 0; + unsigned long value; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + overflow = 1; + } + + if (left > (long)sw64_pmu->pmc_max_period) + left = sw64_pmu->pmc_max_period; + + value = sw64_pmu->pmc_max_period - left; + local64_set(&hwc->prev_count, value); + sw64_write_pmc(idx, value); + + perf_event_update_userpage(event); + + return overflow; +} + +/* + * Calculates the count (the 'delta') since the last time the PMC was read. + * + * As the PMCs' full period can easily be exceeded within the perf system + * sampling period we cannot use any high order bits as a guard bit in the + * PMCs to detect overflow as is done by other architectures. The code here + * calculates the delta on the basis that there is no overflow when ovf is + * zero. The value passed via ovf by the interrupt handler corrects for + * overflow. + * + * This can be racey on rare occasions -- a call to this routine can occur + * with an overflowed counter just before the PMI service routine is called. + * The check for delta negative hopefully always rectifies this situation. + */ +static unsigned long sw64_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx, long ovf) +{ + long prev_raw_count, new_raw_count; + long delta; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = sw64_read_pmc(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - (prev_raw_count & sw64_pmu->pmc_count_mask)) + ovf; + + /* It is possible on very rare occasions that the PMC has overflowed + * but the interrupt is yet to come. Detect and fix this situation. + */ + if (unlikely(delta < 0)) + delta += sw64_pmu->pmc_max_period + 1; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +/* + * State transition functions: + * + * add()/del() & start()/stop() + * + */ + +/* + * pmu->start: start the event. + */ +static void sw64_pmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) + return; + + if (flags & PERF_EF_RELOAD) { + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + sw64_perf_event_set_period(event, hwc, hwc->idx); + } + + hwc->state = 0; + + /* counting in selected modes, for both counters */ + wrperfmon(PMC_CMD_PM, hwc->config_base); + wrperfmon(PMC_CMD_EVENT_BASE + hwc->idx, hwc->event_base); + wrperfmon(PMC_CMD_ENABLE, PMC_ENABLE_BASE + hwc->idx); +} + +/* + * pmu->stop: stop the counter + */ +static void sw64_pmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!(hwc->state & PERF_HES_STOPPED)) { + wrperfmon(PMC_CMD_DISABLE, PMC_DISABLE_BASE + hwc->idx); + hwc->state |= PERF_HES_STOPPED; + barrier(); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + sw64_perf_event_update(event, hwc, hwc->idx, 0); + hwc->state |= PERF_HES_UPTODATE; + } +} + +/* + * pmu->add: add the event to PMU. + */ +static int sw64_pmu_add(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int err = 0; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + if (__test_and_set_bit(hwc->idx, cpuc->used_mask)) { + err = -ENOSPC; + goto out; + } + + cpuc->event[hwc->idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + sw64_pmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + local_irq_restore(irq_flags); + + return err; +} + +/* + * pmu->del: delete the event from PMU. + */ +static void sw64_pmu_del(struct perf_event *event, int flags) +{ + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + unsigned long irq_flags; + + local_irq_save(irq_flags); + + sw64_pmu_stop(event, PERF_EF_UPDATE); + cpuc->event[hwc->idx] = NULL; + __clear_bit(event->hw.idx, cpuc->used_mask); + + /* Absorb the final count and turn off the event. */ + perf_event_update_userpage(event); + + local_irq_restore(irq_flags); +} + +/* + * pmu->read: read and update the counter + */ +static void sw64_pmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + sw64_perf_event_update(event, hwc, hwc->idx, 0); +} + +static bool supported_cpu(void) +{ + return true; +} + +static void hw_perf_event_destroy(struct perf_event *event) +{ + /* Nothing to be done! */ +} + +static int __hw_perf_event_init(struct perf_event *event) +{ + struct perf_event_attr *attr = &event->attr; + struct hw_perf_event *hwc = &event->hw; + const struct sw64_perf_event *event_type; + + + /* + * SW64 does not have per-counter usr/os/guest/host bits, + * we can distinguish exclude_user and exclude_kernel by + * sample mode. + */ + if (event->attr.exclude_hv || event->attr.exclude_idle || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + /* + * SW64 does not support precise ip feature, and system hang when + * detecting precise_ip by perf_event_attr__set_max_precise_ip + * in userspace + */ + if (attr->precise_ip != 0) + return -EOPNOTSUPP; + + /* SW64 has fixed counter for given event type */ + if (attr->type == PERF_TYPE_HARDWARE) { + if (attr->config >= sw64_pmu->max_events) + return -EINVAL; + event_type = sw64_pmu->map_hw_event(attr->config); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else if (attr->type == PERF_TYPE_HW_CACHE) { + event_type = sw64_pmu->map_cache_event(attr->config); + if (IS_ERR(event_type)) /* */ + return PTR_ERR(event_type); + hwc->idx = event_type->counter; + hwc->event_base = event_type->event; + } else { /* PERF_TYPE_RAW */ + if (!sw64_pmu->raw_event_valid(attr->config)) + return -EINVAL; + hwc->idx = attr->config >> 8; /* counter selector */ + hwc->event_base = attr->config & 0xff; /* event selector */ + } + + hwc->config_base = SW64_PERFCTRL_AM; + + if (attr->exclude_user) + hwc->config_base = SW64_PERFCTRL_KM; + if (attr->exclude_kernel) + hwc->config_base = SW64_PERFCTRL_UM; + + hwc->config = attr->config; + + if (!is_sampling_event(event)) + pr_debug("not sampling event\n"); + + event->destroy = hw_perf_event_destroy; + + if (!hwc->sample_period) { + hwc->sample_period = sw64_pmu->pmc_max_period; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + return 0; +} + +/* + * Main entry point to initialise a HW performance event. + */ +static int sw64_pmu_event_init(struct perf_event *event) +{ + int err; + + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + default: + return -ENOENT; + } + + if (!sw64_pmu) + return -ENODEV; + + /* Do the real initialisation work. */ + err = __hw_perf_event_init(event); + + return err; +} + +static struct pmu pmu = { + .name = "core3-base", + .capabilities = PERF_PMU_CAP_NO_NMI, + .event_init = sw64_pmu_event_init, + .add = sw64_pmu_add, + .del = sw64_pmu_del, + .start = sw64_pmu_start, + .stop = sw64_pmu_stop, + .read = sw64_pmu_read, +}; + +void perf_event_print_debug(void) +{ + unsigned long flags; + unsigned long pcr0, pcr1; + int cpu; + + if (!supported_cpu()) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + pcr0 = wrperfmon(PMC_CMD_READ, PMC_PC0); + pcr1 = wrperfmon(PMC_CMD_READ, PMC_PC1); + + pr_info("CPU#%d: PCTR0[%lx] PCTR1[%lx]\n", cpu, pcr0, pcr1); + + local_irq_restore(flags); +} + +static void sw64_perf_event_irq_handler(unsigned long idx, + struct pt_regs *regs) +{ + struct cpu_hw_events *cpuc; + struct perf_sample_data data; + struct perf_event *event; + struct hw_perf_event *hwc; + + __this_cpu_inc(irq_pmi_count); + cpuc = this_cpu_ptr(&cpu_hw_events); + + event = cpuc->event[idx]; + + if (unlikely(!event)) { + irq_err_count++; + return; + } + + hwc = &event->hw; + sw64_perf_event_update(event, hwc, idx, sw64_pmu->pmc_max_period + 1); + perf_sample_data_init(&data, 0, hwc->last_period); + + if (sw64_perf_event_set_period(event, hwc, idx)) { + if (perf_event_overflow(event, &data, regs)) { + /* Interrupts coming too quickly; "throttle" the + * counter, i.e., disable it for a little while. + */ + sw64_pmu_stop(event, 0); + } + } +} + +bool valid_utext_addr(unsigned long addr) +{ + return addr >= current->mm->start_code && addr <= current->mm->end_code; +} + +bool valid_dy_addr(unsigned long addr) +{ + bool ret = false; + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + + if (addr > TASK_SIZE || addr < TASK_UNMAPPED_BASE) + return ret; + vma = find_vma(mm, addr); + if (vma && vma->vm_start <= addr && (vma->vm_flags & VM_EXEC)) + ret = true; + return ret; +} + +#ifdef CONFIG_FRAME_POINTER +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + + struct stack_frame frame; + unsigned long __user *fp; + int err; + + perf_callchain_store(entry, regs->pc); + + fp = (unsigned long __user *)regs->regs[15]; + + while (entry->nr < entry->max_stack && (unsigned long)fp < current->mm->start_stack) { + if (!access_ok(fp, sizeof(frame))) + break; + + pagefault_disable(); + err = __copy_from_user_inatomic(&frame, fp, sizeof(frame)); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(frame.return_address) || valid_dy_addr(frame.return_address)) + perf_callchain_store(entry, frame.return_address); + fp = (void __user *)frame.next_frame; + } +} +#else /* !CONFIG_FRAME_POINTER */ +void perf_callchain_user(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + unsigned long usp = rdusp(); + unsigned long user_addr; + int err; + + perf_callchain_store(entry, regs->pc); + + while (entry->nr < entry->max_stack && usp < current->mm->start_stack) { + if (!access_ok((const void __user *)usp, 8)) + break; + + pagefault_disable(); + err = __get_user(user_addr, (unsigned long *)usp); + pagefault_enable(); + + if (err) + break; + + if (valid_utext_addr(user_addr) || valid_dy_addr(user_addr)) + perf_callchain_store(entry, user_addr); + usp = usp + 8; + } +} +#endif/* CONFIG_FRAME_POINTER */ + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(unsigned long pc, void *data) +{ + struct perf_callchain_entry_ctx *entry = data; + + perf_callchain_store(entry, pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, + struct pt_regs *regs) +{ + walk_stackframe(NULL, regs, callchain_trace, entry); +} + +/* + * Gets the perf_instruction_pointer and perf_misc_flags for guest os. + */ + +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ + if (perf_guest_state()) + return perf_guest_get_ip(); + + return instruction_pointer(regs); +} + +unsigned long perf_misc_flags(struct pt_regs *regs) +{ + unsigned int guest_state = perf_guest_state(); + int misc = 0; + + if (guest_state) { + if (guest_state & PERF_GUEST_USER) + misc |= PERF_RECORD_MISC_GUEST_USER; + else + misc |= PERF_RECORD_MISC_GUEST_KERNEL; + } else { + if (user_mode(regs)) + misc |= PERF_RECORD_MISC_USER; + else + misc |= PERF_RECORD_MISC_KERNEL; + } + + return misc; +} + +/* + * Init call to initialise performance events at kernel startup. + */ +int __init init_hw_perf_events(void) +{ + if (!supported_cpu()) { + pr_info("Performance events: Unsupported CPU type!\n"); + return 0; + } + + pr_info("Performance events: Supported CPU type!\n"); + + /* Override performance counter IRQ vector */ + + perf_irq = sw64_perf_event_irq_handler; + + /* And set up PMU specification */ + sw64_pmu = &core3_pmu; + + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + + return 0; +} +early_initcall(init_hw_perf_events); diff --git a/arch/sw_64/kernel/perf_regs.c b/arch/sw_64/kernel/perf_regs.c new file mode 100644 index 0000000000000000000000000000000000000000..b036f213936bc6d79214c9b7bdf1ab9a82a40b69 --- /dev/null +++ b/arch/sw_64/kernel/perf_regs.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE((u32)idx >= PERF_REG_SW64_MAX)) + return 0; + + return ((unsigned long *)regs)[idx]; +} + +#define REG_RESERVED (~((1ULL << PERF_REG_SW64_MAX) - 1)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_64; +} + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/sw_64/kernel/pm.c b/arch/sw_64/kernel/pm.c new file mode 100644 index 0000000000000000000000000000000000000000..f0a35e5d0486167340b44f3bac1c80104f25649e --- /dev/null +++ b/arch/sw_64/kernel/pm.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include + +struct syscore_ops io_syscore_ops; + +static int __init sw64_pm_init(void) +{ +#ifdef CONFIG_SUSPEND + suspend_set_ops(&native_suspend_ops); +#endif + register_syscore_ops(&io_syscore_ops); + + return 0; +} +device_initcall(sw64_pm_init); diff --git a/arch/sw_64/kernel/proc_misc.c b/arch/sw_64/kernel/proc_misc.c new file mode 100644 index 0000000000000000000000000000000000000000..ca107ec1e05e96f4a8790a3216d7f0eec6458ecd --- /dev/null +++ b/arch/sw_64/kernel/proc_misc.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +extern const struct seq_operations cpu_active_mask_op; +static int cpu_active_mask_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &cpu_active_mask_op); +} + +static const struct file_operations proc_cpu_active_mask_operations = { + .open = cpu_active_mask_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +static int __init proc_cpu_active_mask_init(void) +{ + proc_create("cpu_active_mask", 0, NULL, &proc_cpu_active_mask_operations); + return 0; +} +fs_initcall(proc_cpu_active_mask_init); diff --git a/arch/sw_64/kernel/process.c b/arch/sw_64/kernel/process.c new file mode 100644 index 0000000000000000000000000000000000000000..9a887140edef19d2a490edd7ec12ef871fcd5cae --- /dev/null +++ b/arch/sw_64/kernel/process.c @@ -0,0 +1,127 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file handles the architecture-dependent parts of process handling. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "proto.h" + +/* + * Re-start a thread when doing execve() + */ +void +start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp) +{ + regs->pc = pc; + regs->ps = 8; + regs->regs[30] = sp; +} +EXPORT_SYMBOL(start_thread); + + +void +flush_thread(void) +{ + /* Arrange for each exec'ed process to start off with a clean slate + * with respect to the FPU. This is all exceptions disabled. + */ + current_thread_info()->ieee_state = 0; + wrfpcr(FPCR_INIT | ieee_swcr_to_fpcr(0)); + + /* Clean slate for TLS. */ + current_thread_info()->pcb.tp = 0; +} + +void +release_thread(struct task_struct *dead_task) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + /* + * aux_save() has to read the current TLS pointer from CSR:TID as it + * may be out-of-sync with the saved value. + */ + aux_save(src); + *dst = *src; + return 0; +} + +/* + * Copy architecture-specific thread state + */ + +int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) +{ + unsigned long clone_flags = args->flags; + unsigned long usp = args->stack; + unsigned long tls = args->tls; + struct thread_info *childti = task_thread_info(p); + struct pt_regs *childregs = task_pt_regs(p); + struct pt_regs *regs = current_pt_regs(); + + extern void ret_from_fork(void); + extern void ret_from_kernel_thread(void); + + p->thread.sp = (unsigned long) childregs; + + if (unlikely(args->fn)) { + /* kernel thread */ + memset(childregs, 0, sizeof(struct pt_regs)); + p->thread.ra = (unsigned long) ret_from_kernel_thread; + p->thread.s[0] = (unsigned long) args->fn; /* function */ + p->thread.s[1] = (unsigned long) args->fn_arg; + return 0; + } + + /* + * Note: if CLONE_SETTLS is not set, then we must inherit the + * value from the parent, which will have been set by the block + * copy in dup_task_struct. This is non-intuitive, but is + * required for proper operation in the case of a threaded + * application calling fork. + */ + if (clone_flags & CLONE_SETTLS) + childti->pcb.tp = tls; + else + regs->regs[20] = 0; + *childregs = *regs; + if (usp) + childregs->regs[30] = usp; + syscall_set_return_value(NULL, childregs, 0, 0); + p->thread.ra = (unsigned long) ret_from_fork; + return 0; +} + +/* + * Fill in the user structure for a ELF core dump. + * @regs: should be signal_pt_regs() or task_pt_reg(task) + */ +void sw64_elf_core_copy_regs(elf_greg_t *dest, struct pt_regs *regs) +{ + int i; + struct thread_info *ti; + + ti = (void *)((__u64)regs & ~(THREAD_SIZE - 1)); + + for (i = 0; i < 31; i++) + dest[i] = regs->regs[i]; + dest[31] = regs->pc; + dest[32] = ti->pcb.tp; +} +EXPORT_SYMBOL(sw64_elf_core_copy_regs); + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_page(mm->brk, 0x02000000); +} diff --git a/arch/sw_64/kernel/proto.h b/arch/sw_64/kernel/proto.h new file mode 100644 index 0000000000000000000000000000000000000000..d7222334d1b99ffab3e0345bebcb1ec15177290c --- /dev/null +++ b/arch/sw_64/kernel/proto.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_KERNEL_PROTO_H +#define _SW64_KERNEL_PROTO_H + +#include +#include +#include +#include + +/* ptrace.c */ +extern int ptrace_set_bpt(struct task_struct *child); +extern int ptrace_cancel_bpt(struct task_struct *child); + +/* traps.c */ +extern void show_regs(struct pt_regs *regs); +extern void die(char *str, struct pt_regs *regs, long err); + +#endif /* _SW64_KERNEL_PROTO_H */ diff --git a/arch/sw_64/kernel/ptrace.c b/arch/sw_64/kernel/ptrace.c new file mode 100644 index 0000000000000000000000000000000000000000..070e27ee256766b7d031dbc2a92bdefcedfe3676 --- /dev/null +++ b/arch/sw_64/kernel/ptrace.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* ptrace.c */ +/* By Ross Biro 1/23/92 */ +/* edited by Linus Torvalds */ +/* mangled further by Bob Manson (manson@santafe.edu) */ +/* more mutilation by David Mosberger (davidm@azstarnet.com) */ + +#include +#include +#include +#include + +#include + +#include "proto.h" +#include + +#define CREATE_TRACE_POINTS +#include + +#define BREAKINST 0x00000080 /* sys_call bpt */ + +/* + * does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Processes always block with the following stack-layout: + * + * +================================+ <---- task + 2*PAGE_SIZE + * | HMcode saved frame (ps, pc, | ^ + * | gp, a0, a1, a2) | | + * +================================+ | struct pt_regs + * | | | + * | frame generated by SAVE_ALL | | + * | | v + * +================================+ + */ + +/* + * The following table maps a register index into the stack offset at + * which the register is saved. Register indices are 0-31 for integer + * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and + * zero have no stack-slot and need to be treated specially (see + * get_reg/put_reg below). + */ +#define PCB_OFF(var) offsetof(struct pcb_struct, var) + +static int pcboff[] = { + [PT_TP] = PCB_OFF(tp), + [PT_DA_MATCH] = PCB_OFF(da_match), + [PT_DA_MASK] = PCB_OFF(da_mask), + [PT_DV_MATCH] = PCB_OFF(dv_match), + [PT_DV_MASK] = PCB_OFF(dv_mask), + [PT_DC_CTL] = PCB_OFF(dc_ctl), + [PT_MATCH_CTL] = PCB_OFF(match_ctl), + [PT_IA_MATCH] = PCB_OFF(ia_match), + [PT_IA_MASK] = PCB_OFF(ia_mask), + [PT_IV_MATCH] = PCB_OFF(iv_match), + [PT_IDA_MATCH] = PCB_OFF(ida_match), + [PT_IDA_MASK] = PCB_OFF(ida_mask) +}; + +static unsigned long zero; + +/* + * Get address of register REGNO in task TASK. + */ + +static unsigned long * +get_reg_addr(struct task_struct *task, unsigned long regno) +{ + void *addr; + int fno, vno; + + switch (regno) { + case PT_UNIQUE: + case PT_DA_MATCH: + case PT_DA_MASK: + case PT_DV_MATCH: + case PT_DV_MASK: + case PT_MATCH_CTL: + case PT_IA_MATCH: + case PT_IA_MASK: + case PT_IV_MATCH: + case PT_IDA_MATCH: + case PT_IDA_MASK: + addr = (void *)task_thread_info(task) + pcboff[regno]; + break; + case PT_REG_BASE ... PT_REG_END: + addr = &task_pt_regs(task)->regs[regno]; + break; + case PT_FPREG_BASE ... PT_FPREG_END: + fno = regno - PT_FPREG_BASE; + addr = &task->thread.fpstate.fp[fno].v[0]; + break; + case PT_VECREG_BASE ... PT_VECREG_END: + /* + * return addr for zero value if we catch vectors of f31 + * v0 and v3 of f31 are not in this range so ignore them + */ + if (regno == PT_F31_V1 || regno == PT_F31_V2) { + addr = &zero; + break; + } + fno = (regno - PT_VECREG_BASE) & 0x1f; + vno = 1 + ((regno - PT_VECREG_BASE) >> 5); + addr = &task->thread.fpstate.fp[fno].v[vno]; + break; + case PT_FPCR: + addr = &task->thread.fpstate.fpcr; + break; + case PT_PC: + addr = (void *)task_pt_regs(task) + PT_REGS_PC; + break; + default: + addr = &zero; + } + + return addr; +} + +/* + * Get contents of register REGNO in task TASK. + */ +unsigned long +get_reg(struct task_struct *task, unsigned long regno) +{ + return *get_reg_addr(task, regno); +} + +/* + * Write contents of register REGNO in task TASK. + */ +static int +put_reg(struct task_struct *task, unsigned long regno, unsigned long data) +{ + *get_reg_addr(task, regno) = data; + return 0; +} + +static inline int +read_int(struct task_struct *task, unsigned long addr, int *data) +{ + int copied = access_process_vm(task, addr, data, sizeof(int), FOLL_FORCE); + + return (copied == sizeof(int)) ? 0 : -EIO; +} + +static inline int +write_int(struct task_struct *task, unsigned long addr, int data) +{ + int copied = access_process_vm(task, addr, &data, sizeof(int), + FOLL_FORCE | FOLL_WRITE); + return (copied == sizeof(int)) ? 0 : -EIO; +} + +/* + * Set breakpoint. + */ +int +ptrace_set_bpt(struct task_struct *child) +{ + int displ, i, res, reg_b, nsaved = 0; + unsigned int insn, op_code; + unsigned long pc; + + pc = get_reg(child, PT_PC); + res = read_int(child, pc, (int *)&insn); + if (res < 0) + return res; + + op_code = insn >> 26; + /* br bsr beq bne blt ble bgt bge blbc blbs fbeq fbne fblt fble fbgt fbge */ + if ((1UL << op_code) & 0x3fff000000000030UL) { + /* + * It's a branch: instead of trying to figure out + * whether the branch will be taken or not, we'll put + * a breakpoint at either location. This is simpler, + * more reliable, and probably not a whole lot slower + * than the alternative approach of emulating the + * branch (emulation can be tricky for fp branches). + */ + displ = ((s32)(insn << 11)) >> 9; + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + if (displ) /* guard against unoptimized code */ + task_thread_info(child)->bpt_addr[nsaved++] + = pc + 4 + displ; + /*call ret jmp*/ + } else if (op_code >= 0x1 && op_code <= 0x3) { + reg_b = (insn >> 16) & 0x1f; + task_thread_info(child)->bpt_addr[nsaved++] = get_reg(child, reg_b); + } else { + task_thread_info(child)->bpt_addr[nsaved++] = pc + 4; + } + + /* install breakpoints: */ + for (i = 0; i < nsaved; ++i) { + res = read_int(child, task_thread_info(child)->bpt_addr[i], + (int *)&insn); + if (res < 0) + return res; + task_thread_info(child)->bpt_insn[i] = insn; + res = write_int(child, task_thread_info(child)->bpt_addr[i], + BREAKINST); + if (res < 0) + return res; + } + task_thread_info(child)->bpt_nsaved = nsaved; + return 0; +} + +/* + * Ensure no single-step breakpoint is pending. Returns non-zero + * value if child was being single-stepped. + */ +int +ptrace_cancel_bpt(struct task_struct *child) +{ + int i, nsaved = task_thread_info(child)->bpt_nsaved; + + task_thread_info(child)->bpt_nsaved = 0; + + if (nsaved > 2) { + pr_info("%s: bogus nsaved: %d!\n", __func__, nsaved); + nsaved = 2; + } + + for (i = 0; i < nsaved; ++i) { + write_int(child, task_thread_info(child)->bpt_addr[i], + task_thread_info(child)->bpt_insn[i]); + } + return (nsaved != 0); +} + +void user_enable_single_step(struct task_struct *child) +{ + /* Mark single stepping. */ + task_thread_info(child)->bpt_nsaved = -1; +} + +void user_disable_single_step(struct task_struct *child) +{ + ptrace_cancel_bpt(child); +} + +/* + * Called by kernel/ptrace.c when detaching.. + * + * Make sure the single step bit is not set. + */ +void ptrace_disable(struct task_struct *child) +{ + user_disable_single_step(child); +} + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + return membuf_write(&to, task_pt_regs(target), sizeof(struct user_pt_regs)); +} + +static int gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + task_pt_regs(target), 0, sizeof(struct user_pt_regs)); +} + +static int fpr_get(struct task_struct *target, + const struct user_regset *regset, + struct membuf to) +{ + + return membuf_write(&to, &target->thread.fpstate, + sizeof(struct user_fpsimd_state)); +} + +static int fpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + return user_regset_copyin(&pos, &count, &kbuf, &ubuf, + &target->thread.fpstate, 0, + sizeof(struct user_fpsimd_state)); +} + +enum sw64_regset { + REGSET_GPR, + REGSET_FPR, +}; + +static const struct user_regset sw64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = ELF_NGREG, + .size = sizeof(elf_greg_t), + .align = sizeof(elf_greg_t), + .regset_get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .regset_get = fpr_get, + .set = fpr_set + }, +}; + +static const struct user_regset_view user_sw64_view = { + .name = "sw64", .e_machine = EM_SW64, + .regsets = sw64_regsets, .n = ARRAY_SIZE(sw64_regsets) +}; + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ + return &user_sw64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + unsigned long tmp; + size_t copied; + long ret; + + switch (request) { + /* When I and D space are separate, these will need to be fixed. */ + case PTRACE_PEEKTEXT: /* read word at location addr. */ + case PTRACE_PEEKDATA: + copied = access_process_vm(child, addr, &tmp, sizeof(tmp), FOLL_FORCE); + ret = -EIO; + if (copied != sizeof(tmp)) + break; + + force_successful_syscall_return(); + ret = tmp; + break; + + /* Read register number ADDR. */ + case PTRACE_PEEKUSR: + force_successful_syscall_return(); + ret = get_reg(child, addr); + break; + + /* When I and D space are separate, this will have to be fixed. */ + case PTRACE_POKETEXT: /* write the word at location addr. */ + case PTRACE_POKEDATA: + ret = generic_ptrace_pokedata(child, addr, data); + break; + + case PTRACE_POKEUSR: /* write the specified register */ + ret = put_reg(child, addr, data); + break; + default: + ret = ptrace_request(child, request, addr, data); + break; + } + return ret; +} + +asmlinkage unsigned long syscall_trace_enter(void) +{ + unsigned long ret = 0; + struct pt_regs *regs = current_pt_regs(); + + if (test_thread_flag(TIF_SYSCALL_TRACE) && + ptrace_report_syscall_entry(regs)) + return NO_SYSCALL; + +#ifdef CONFIG_SECCOMP + /* Do seccomp after ptrace, to catch any tracer changes. */ + if (secure_computing() == -1) + return NO_SYSCALL; +#endif + + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_enter(regs, regs->regs[0]); + audit_syscall_entry(regs->regs[0], regs->regs[16], regs->regs[17], regs->regs[18], regs->regs[19]); + return ret ?: regs->regs[0]; +} + +asmlinkage void +syscall_trace_leave(void) +{ + struct pt_regs *regs = current_pt_regs(); + + audit_syscall_exit(regs); + if (test_thread_flag(TIF_SYSCALL_TRACE)) + ptrace_report_syscall_exit(regs, 0); + if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) + trace_sys_exit(regs, regs_return_value(regs)); +} + +#ifdef CONFIG_SUBARCH_C3B +static long rwcsr(int rw, unsigned long csr, unsigned long value) +{ + register unsigned long __r0 __asm__("$0"); + register unsigned long __r16 __asm__("$16") = rw; + register unsigned long __r17 __asm__("$17") = csr; + register unsigned long __r18 __asm__("$18") = value; + + __asm__ __volatile__( + "sys_call %4" + : "=r"(__r0), "=r"(__r16), "=r"(__r17), "=r"(__r18) + : "i"(HMC_rwreg), "1"(__r16), "2"(__r17), "3"(__r18) + : "$1", "$22", "$23", "$24", "$25"); + + return __r0; +} + +#define RCSR 0 +#define WCSR 1 + +#define CSR_DA_MATCH 0 +#define CSR_DA_MASK 1 +#define CSR_IA_MATCH 2 +#define CSR_IA_MASK 3 +#define CSR_IDA_MATCH 6 +#define CSR_IDA_MASK 7 +#define CSR_DC_CTL 11 +#define CSR_DV_MATCH 15 +#define CSR_DV_MASK 16 + +#define DV_MATCH_EN_S 19 +#define DAV_MATCH_EN_S 20 + +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + unsigned long dc_ctl; + unsigned long value; + + pr_info("%s: pid %d, name = %s,cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + if (mmcsr == MMCSR__DV_MATCH) { + value = rwcsr(RCSR, CSR_DV_MATCH, 0); + pr_notice("value is %#lx\n", value); + value = rwcsr(RCSR, CSR_DV_MASK, 0); + pr_notice("value is %#lx\n", value); + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + return 1; + } + + if (mmcsr == MMCSR__DA_MATCH) { + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~(0x1UL << DV_MATCH_EN_S); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } + if (mmcsr == MMCSR__DAV_MATCH) { + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + rwcsr(WCSR, CSR_DA_MATCH, 0); //clear da_match + } + task_thread_info(current)->pcb.dv_match = 0; + task_thread_info(current)->pcb.dc_ctl = 0; + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_fault(SIGTRAP, TRAP_HWBKPT, (void *) address); + return 1; + + case MMCSR__IA_MATCH: + rwcsr(WCSR, CSR_IA_MATCH, 0); //clear ia_match + return 1; + case MMCSR__IDA_MATCH: + rwcsr(WCSR, CSR_IDA_MATCH, 0); //clear ida_match + return 1; + } + + return 0; +} + +void restore_da_match_after_sched(void) +{ + unsigned long dc_ctl_mode; + unsigned long dc_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + + rwcsr(WCSR, CSR_DA_MATCH, 0); + rwcsr(WCSR, CSR_DA_MASK, pcb->da_mask); + rwcsr(WCSR, CSR_DA_MATCH, pcb->da_match); + dc_ctl_mode = pcb->dc_ctl; + dc_ctl = rwcsr(RCSR, CSR_DC_CTL, 0); + dc_ctl &= ~((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + dc_ctl |= ((dc_ctl_mode << DV_MATCH_EN_S) & ((0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S))); + if (dc_ctl_mode & 0x1) { + rwcsr(WCSR, CSR_DV_MATCH, pcb->dv_match); + rwcsr(WCSR, CSR_DV_MASK, pcb->dv_mask); + rwcsr(WCSR, CSR_DC_CTL, dc_ctl); + } +} + +#elif defined(CONFIG_SUBARCH_C4) +int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs) +{ + kernel_siginfo_t info; + unsigned long match_ctl, ia_match; + sigval_t sw64_value; + + pr_info("%s: pid %d, name = %s, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + __func__, current->pid, current->comm, cause, mmcsr, address, regs->pc); + + switch (mmcsr) { + case MMCSR__DA_MATCH: + case MMCSR__DV_MATCH: + case MMCSR__DAV_MATCH: + case MMCSR__IA_MATCH: + case MMCSR__IDA_MATCH: + case MMCSR__IV_MATCH: + show_regs(regs); + + if (!(current->ptrace & PT_PTRACED)) { + pr_notice(" pid %d %s not be ptraced, return\n", current->pid, current->comm); + if (mmcsr == MMCSR__DA_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + return 1; + } + + info.si_signo = SIGTRAP; + info.si_addr = (void *) address; + sw64_value.sival_ptr = (void *)(regs->pc); + info.si_value = sw64_value; + info.si_code = TRAP_HWBKPT; + + if (mmcsr == MMCSR__DA_MATCH) { + info.si_errno = 1; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x3UL << DA_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + task_thread_info(current)->pcb.match_ctl &= ~0x1; + task_thread_info(current)->pcb.da_match = 0; + } + if (mmcsr == MMCSR__DV_MATCH) { + info.si_errno = 2; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~(0x1UL << DV_MATCH_EN_S); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 1); + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__DAV_MATCH) { + info.si_errno = 3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + write_csr(match_ctl, CSR_DC_CTLP); + write_csr(0, CSR_DA_MATCH); // clear da_match + write_csr(0, CSR_DV_MATCH); // clear dv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 | (0x1 << 1) | (0x1 << 2)); + task_thread_info(current)->pcb.da_match = 0; + task_thread_info(current)->pcb.dv_match = 0; + } + if (mmcsr == MMCSR__IA_MATCH) { + info.si_errno = 4; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IA_MATCH_EN_S) | (0x7ffffffffffffUL << 2)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 3); + task_thread_info(current)->pcb.ia_match = 0; + } + if (mmcsr == MMCSR__IV_MATCH) { + info.si_errno = 5; + ia_match = read_csr(CSR_IA_MATCH); + ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + write_csr(ia_match, CSR_IA_MATCH); // clear ia_match + write_csr(0, CSR_IV_MATCH); // clear iv_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 4); + task_thread_info(current)->pcb.ia_match &= ~((0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S)); + task_thread_info(current)->pcb.iv_match = 0; + } + if (mmcsr == MMCSR__IDA_MATCH) { + info.si_errno = 6; + write_csr(0, CSR_IDA_MATCH); // clear ida_match + task_thread_info(current)->pcb.match_ctl &= ~(0x1 << 5); + task_thread_info(current)->pcb.ida_match = 0; + } + pr_notice("do_page_fault: want to send SIGTRAP, pid = %d\n", current->pid); + force_sig_info(&info); + return 1; + } + + return 0; +} + +/* + *pcb->match_ctl: + * [0] DA_MATCH + * [1] DV_MATCH + * [2] DAV_MATCH + * [3] IA_MATCH + * [4] IV_MATCH + * [5] IDA_MATCH + * [8:9] match_ctl_mode + * + */ +#define DA_MATCH 0x1 +#define DV_MATCH 0x2 +#define DAV_MATCH 0x4 +#define IA_MATCH 0x8 +#define IV_MATCH 0x10 +#define IDA_MATCH 0x20 + +void restore_da_match_after_sched(void) +{ + unsigned long match_ctl_mode; + unsigned long match_ctl; + struct pcb_struct *pcb = &task_thread_info(current)->pcb; + unsigned long vpn, upn; + + if (!pcb->match_ctl) + return; + pr_info("Restroe MATCH status, pid: %d\n", current->pid); + + if (pcb->match_ctl & DA_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx match_ctl:%#lx\n", pcb->da_match, pcb->da_mask, match_ctl); + } + + if (pcb->match_ctl & DV_MATCH) { + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x1UL << 3) | (0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (0x1UL << DV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & DAV_MATCH) { + write_csr(pcb->da_match, CSR_DA_MATCH); + write_csr(pcb->da_mask, CSR_DA_MASK); + write_csr(pcb->dv_match, CSR_DV_MATCH); + write_csr(pcb->dv_mask, CSR_DV_MASK); + write_csr(0xfffffffff, CSR_DA_MATCH_MODE); + match_ctl_mode = (pcb->match_ctl >> 8) & 0x3; + match_ctl = read_csr(CSR_DC_CTLP); + match_ctl &= ~((0x3UL << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) | (0x1UL << DAV_MATCH_EN_S)); + match_ctl |= (match_ctl_mode << DA_MATCH_EN_S) | (0x1UL << DV_MATCH_EN_S) + | (0x1UL << DAV_MATCH_EN_S) | (0x1UL << DPM_MATCH_EN_S) + | (0x3UL << DPM_MATCH); + write_csr(match_ctl, CSR_DC_CTLP); + pr_info("da_match:%#lx da_mask:%#lx dv_match:%#lx dv_mask:%#lx match_ctl:%#lx\n", + pcb->da_match, pcb->da_mask, pcb->dv_match, pcb->dv_mask, match_ctl); + } + + if (pcb->match_ctl & IA_MATCH) { + pcb->ia_match |= (0x1UL << IA_MATCH_EN_S) | 0x3; + pcb->ia_mask |= 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->ia_mask, CSR_IA_MASK); + vpn = read_csr(CSR_VPCR) >> 44; + vpn &= 0x3ff; + upn = read_csr(CSR_UPCR); + upn &= 0x3ff; + write_csr(((0x3ff << 18) | vpn), CSR_IA_VPNMATCH); + write_csr(((0x3ff << 18) | upn), CSR_IA_UPNMATCH); + pr_info("ia_match:%#lx ia_mask:%#lx\n", pcb->ia_match, pcb->ia_mask); + } + if (pcb->match_ctl & IV_MATCH) { + pcb->ia_match |= (0x1UL << IV_MATCH_EN_S) | (0x1UL << IV_PM_EN_S) | 0x3; + write_csr(pcb->ia_match, CSR_IA_MATCH); + write_csr(pcb->iv_match, CSR_IV_MATCH); + pr_info("ia_match:%#lx iv_match:%#lx\n", pcb->ia_match, pcb->iv_match); + } + if (pcb->match_ctl & IDA_MATCH) { + pcb->ida_match |= (0x1UL << IDA_MATCH_EN_S) | 0x3; + pcb->ida_mask |= 0x3; + write_csr(pcb->ida_match, CSR_IDA_MATCH); + write_csr(pcb->ida_mask, CSR_IDA_MASK); + pr_info("ida_match:%#lx ida_mask:%#lx\n", pcb->ida_match, pcb->ida_mask); + } +} +#endif + +struct pt_regs_offset { + const char *name; + int offset; +}; + +#define GPR_OFFSET_NAME(r) { \ + .name = "r" #r, \ + .offset = offsetof(struct pt_regs, regs[r]) \ +} + +#define REG_OFFSET_NAME(r) { \ + .name = #r, \ + .offset = offsetof(struct pt_regs, r) \ +} + +#define REG_OFFSET_END { \ + .name = NULL, \ + .offset = 0 \ +} + +static const struct pt_regs_offset regoffset_table[] = { + GPR_OFFSET_NAME(0), + GPR_OFFSET_NAME(1), + GPR_OFFSET_NAME(2), + GPR_OFFSET_NAME(3), + GPR_OFFSET_NAME(4), + GPR_OFFSET_NAME(5), + GPR_OFFSET_NAME(6), + GPR_OFFSET_NAME(7), + GPR_OFFSET_NAME(8), + GPR_OFFSET_NAME(9), + GPR_OFFSET_NAME(10), + GPR_OFFSET_NAME(11), + GPR_OFFSET_NAME(12), + GPR_OFFSET_NAME(13), + GPR_OFFSET_NAME(14), + GPR_OFFSET_NAME(15), + GPR_OFFSET_NAME(16), + GPR_OFFSET_NAME(17), + GPR_OFFSET_NAME(18), + GPR_OFFSET_NAME(19), + GPR_OFFSET_NAME(20), + GPR_OFFSET_NAME(21), + GPR_OFFSET_NAME(22), + GPR_OFFSET_NAME(23), + GPR_OFFSET_NAME(24), + GPR_OFFSET_NAME(25), + GPR_OFFSET_NAME(26), + GPR_OFFSET_NAME(27), + GPR_OFFSET_NAME(28), + GPR_OFFSET_NAME(29), + GPR_OFFSET_NAME(30), + REG_OFFSET_NAME(pc), + REG_OFFSET_NAME(ps), + REG_OFFSET_END, +}; + +/** + * regs_query_register_offset() - query register offset from its name + * @name: the name of a register + * + * regs_query_register_offset() returns the offset of a register in struct + * pt_regs from its name. If the name is invalid, this returns -EINVAL; + */ +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_offset *roff; + + for (roff = regoffset_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return roff->offset; + return -EINVAL; +} + +static int regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) +{ + unsigned long ksp = kernel_stack_pointer(regs); + + return (addr & ~(THREAD_SIZE - 1)) == (ksp & ~(THREAD_SIZE - 1)); +} + +/** + * regs_get_kernel_stack_nth() - get Nth entry of the stack + * @regs:pt_regs which contains kernel stack pointer. + * @n:stack entry number. + * + * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which + * is specifined by @regs. If the @n th entry is NOT in the kernel stack, + * this returns 0. + */ +unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n) +{ + unsigned long addr; + + addr = kernel_stack_pointer(regs) + n * sizeof(long); + if (!regs_within_kernel_stack(regs, addr)) + return 0; + return *(unsigned long *)addr; +} diff --git a/arch/sw_64/kernel/relocate.c b/arch/sw_64/kernel/relocate.c new file mode 100644 index 0000000000000000000000000000000000000000..ebdf7d894805e8f2c0a1853d853d5d2ef8bf6c09 --- /dev/null +++ b/arch/sw_64/kernel/relocate.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Support for kernel relocation at boot time. + * + * Based on arch/mips/kernel/relocate.c + * + * Copyright (C) 2019 He Sheng + * Authors: He Sheng (hesheng05@gmail.com) + */ +#include +#include +#include + +#include + +#define KTEXT_MAX 0xffffffffa0000000UL +#define RELOCATED(x) ((void *)((unsigned long)x + offset)) + +extern unsigned long _got_start[]; +extern unsigned long _got_end[]; +extern char pre_start_kernel[]; + +extern unsigned int _relocation_start[]; /* End kernel image / start relocation table */ +extern unsigned int _relocation_end[]; /* End relocation table */ + +extern unsigned long __start___ex_table; /* Start exception table */ +extern unsigned long __stop___ex_table; /* End exception table */ +extern union thread_union init_thread_union; + +/* + * This function may be defined for a platform to perform any post-relocation + * fixup necessary. + * Return non-zero to abort relocation + */ +int __weak plat_post_relocation(long offset) +{ + return 0; +} + +static int __init apply_r_sw64_refquad(unsigned long *loc_orig, unsigned long *loc_new, unsigned int offset) +{ + *(unsigned long *)loc_new += offset; + + return 0; +} + +static int (*reloc_handlers_rel[]) (unsigned long *, unsigned long *, unsigned int) __initdata = { + [R_SW64_REFQUAD] = apply_r_sw64_refquad, +}; + +int __init do_relocations(void *kbase_old, void *kbase_new, unsigned int offset) +{ + unsigned int *r; + unsigned long *loc_orig; + unsigned long *loc_new; + int type; + int res; + + for (r = _relocation_start; r < _relocation_end; r++) { + /* Sentinel for last relocation */ + if (*r == 0) + break; + + type = (*r >> 24) & 0xff; + loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); + loc_new = RELOCATED(loc_orig); + + if (reloc_handlers_rel[type] == NULL) { + /* Unsupported relocation */ + pr_err("Unhandled relocation type %d at 0x%pK\n", + type, loc_orig); + return -ENOEXEC; + } + + res = reloc_handlers_rel[type](loc_orig, loc_new, offset); + if (res) + return res; + } + + return 0; +} + +static int __init relocate_got(unsigned int offset) +{ + unsigned long *got_start, *got_end, *e; + + got_start = RELOCATED(&_got_start); + got_end = RELOCATED(&_got_end); + + for (e = got_start; e < got_end; e++) + *e += offset; + + return 0; +} + +#ifdef CONFIG_RANDOMIZE_BASE + +static inline __init unsigned long rotate_xor(unsigned long hash, + const void *area, size_t size) +{ + size_t i; + unsigned long start, *ptr; + /* Make sure start is 8 byte aligned */ + start = ALIGN((unsigned long)area, 8); + size -= (start - (unsigned long)area); + ptr = (unsigned long *) start; + for (i = 0; i < size / sizeof(hash); i++) { + /* Rotate by odd number of bits and XOR. */ + hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); + hash ^= ptr[i]; + } + return hash; +} + +static inline __init unsigned long get_random_boot(void) +{ + unsigned long entropy = random_get_entropy(); + unsigned long hash = 0; + + /* Attempt to create a simple but unpredictable starting entropy. */ + hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); + + /* Add in any runtime entropy we can get */ + hash = rotate_xor(hash, &entropy, sizeof(entropy)); + + return hash; +} + +static inline __init bool kaslr_disabled(void) +{ + char *str; + + str = strstr(COMMAND_LINE, "nokaslr"); + if (str == COMMAND_LINE || (str > COMMAND_LINE && *(str - 1) == ' ')) + return true; + + return false; +} + +static unsigned long __init determine_relocation_offset(void) +{ + /* Choose a new address for the kernel */ + unsigned long kernel_length; + unsigned long offset; + + if (kaslr_disabled()) + return 0; + + kernel_length = (unsigned long)_end - (unsigned long)(&_text); + + /* TODO: offset is 64K align. maybe 8KB align is okay. */ + offset = get_random_boot() << 16; + offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); + if (offset < kernel_length) + offset += ALIGN(kernel_length, 0x10000); + + /* + * TODO:new location should not overlaps initrd, dtb, acpi + * tables, etc. + */ + + if ((KTEXT_MAX - (unsigned long)_end) < offset) + offset = 0; + + return offset; +} + +#else + +static inline unsigned long __init determine_relocation_offset(void) +{ + /* + * Choose a new address for the kernel + * For now we'll hard code the destination offset. + */ + return 0; +} + +#endif + +static inline int __init relocation_offset_valid(unsigned long offset) +{ + unsigned long loc_new = (unsigned long)_text + offset; + + if (loc_new & 0x0000ffff) { + /* Inappropriately aligned new location */ + return 0; + } + if (loc_new < (unsigned long)&_end) { + /* New location overlaps original kernel */ + return 0; + } + return 1; +} + +unsigned int __init relocate_kernel(void) +{ + void *loc_new; + unsigned long kernel_length; + unsigned long bss_length; + unsigned int offset = 0; + int res = 1; + + kernel_length = (unsigned long)(&_relocation_start) - (long)(&_text); + bss_length = (unsigned long)&__bss_stop - (long)&__bss_start; + + offset = determine_relocation_offset(); + /* Reset the command line now so we don't end up with a duplicate */ + + /* Sanity check relocation address */ + if (offset && relocation_offset_valid(offset)) { + + loc_new = RELOCATED(&_text); + /* Copy the kernel to it's new location */ + memcpy(loc_new, &_text, kernel_length); + + /* Perform relocations on the new kernel */ + res = do_relocations(&_text, loc_new, offset); + if (res < 0) + goto out; + + res = relocate_got(offset); + if (res < 0) + goto out; + + /* + * The original .bss has already been cleared, and + * some variables such as command line parameters + * stored to it so make a copy in the new location. + */ + memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length); + + /* + * Last chance for the platform to abort relocation. + * This may also be used by the platform to perform any + * initialisation required now that the new kernel is + * resident in memory and ready to be executed. + */ + if (plat_post_relocation(offset)) + goto out; + + /* Return the new kernel's offset */ + return offset; + } +out: + return 0; +} + +/* + * Show relocation information on panic. + */ +void show_kernel_relocation(const char *level) +{ + unsigned long offset; + + offset = __pa_symbol(_text) - __pa_symbol(_TEXT_START); + + if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { + printk(level); + pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); + pr_cont(" .text @ 0x%pK\n", _text); + pr_cont(" .data @ 0x%pK\n", _sdata); + pr_cont(" .bss @ 0x%pK\n", __bss_start); + } +} + +static int kernel_location_notifier_fn(struct notifier_block *self, + unsigned long v, void *p) +{ + show_kernel_relocation(KERN_EMERG); + return NOTIFY_DONE; +} + +static struct notifier_block kernel_location_notifier = { + .notifier_call = kernel_location_notifier_fn +}; + +static int __init register_kernel_offset_dumper(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, + &kernel_location_notifier); + return 0; +} +device_initcall(register_kernel_offset_dumper); diff --git a/arch/sw_64/kernel/relocate_kernel.S b/arch/sw_64/kernel/relocate_kernel.S new file mode 100644 index 0000000000000000000000000000000000000000..f1a160636212fed8e73dd32616edaea155c51154 --- /dev/null +++ b/arch/sw_64/kernel/relocate_kernel.S @@ -0,0 +1,176 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * relocate_kernel.S for kexec + * Created by Jul 2 2019 + * + * This source code is licensed under the GNU General Public License, + * Version 2. See the file COPYING for more details. + */ + +#include +#include + + .align 3 + .globl relocate_new_kernel + .ent relocate_new_kernel + +relocate_new_kernel: + .prologue 0 + ldl a0, arg0 + ldl a1, arg1 + ldl a2, arg2 + ldl a3, arg3 + + ldl s0, kexec_indirection_page + ldl s1, kexec_start_address + +process_entry: + ldl s2, 0(s0) + addl s0, 8, s0 + + /* + * In case of a kdump/crash kernel, the indirection page is not + * populated as the kernel is directly copied to a reserved location + */ + beq s2, done + + /* destination page */ + and s2, 0x1, s3 + beq s3, 1f + bic s2, 0x1, s4/* store destination addr in s4 */ + br $31, process_entry + +1: + /* indirection page, update s0*/ + and s2, 0x2, s3 + beq s3, 1f + bic s2, 0x2, s0 + br $31, process_entry + +1: + /* done page */ + and s2, 0x4, s3 + beq s3, 1f + br $31, done +1: + /* source page */ + and s2, 0x8, s3 + beq s3, process_entry + bic s2, 0x8, s2 + ldi s6, 0x1 + sll s6, (PAGE_SHIFT - 3), s6 + +copy_word: + /* copy page word by word */ + ldl s5, 0(s2) + stl s5, 0(s4) + addl s4, 8, s4 + addl s2, 8, s2 + subl s6, 1, s6 + beq s6, process_entry + br $31, copy_word + br $31, process_entry + +done: +#ifdef CONFIG_CRASH_SMP /* unsupported now!!!! */ + /* kexec_flag reset is signal to other CPUs what kernel + * was moved to it's location. Note - we need relocated address + * of kexec_flag. + */ + + br ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + stl zero, 0(t0) +#endif + memb + jmp ra, (s1) + .end relocate_new_kernel + .size relocate_new_kernel, .-relocate_new_kernel + +#ifdef CONFIG_CRASH_SMP + /* + * Other CPUs should wait until code is relocated and + * then start at entry (?) point. + */ + .align 3 + .globl kexec_smp_wait + .ent kexec_smp_wait +kexec_smp_wait: + ldl a0, s_arg0 + ldl a1, s_arg1 + ldl a2, s_arg2 + ldl a3, s_arg3 + ldl s1, kexec_start_address + + /* Non-relocated address works for args and kexec_start_address (old + * kernel is not overwritten). But we need relocated address of + * kexec_flag. + */ + + bsr ra, 1f +1: mov ra, t1 + ldi t2, 1b + ldi t0, kexec_flag + subl t0, t2, t0 + addl t1, t0, t0 + +1: stl s0, 0(t0) + bne s0, 1b + memb + jmp ra, (s1) + .end kexec_smp_wait + .size kexec_smp_wait, .-kexec_smp_wait +#endif + + .align 3 + + /* All parameters to new kernel are passed in registers a0-a3. + * kexec_args[0..3] are uses to prepare register values. + */ + +kexec_args: + .globl kexec_args +arg0: .quad 0x0 +arg1: .quad 0x0 +arg2: .quad 0x0 +arg3: .quad 0x0 + .size kexec_args, 8*4 + +#ifdef CONFIG_CRASH_SMP + /* + * Secondary CPUs may have different kernel parameters in + * their registers a0-a3. secondary_kexec_args[0..3] are used + * to prepare register values. + */ +secondary_kexec_args: + .globl secondary_kexec_args +s_arg0: .quad 0x0 +s_arg1: .quad 0x0 +s_arg2: .quad 0x0 +s_arg3: .quad 0x0 + .size secondary_kexec_args, 8*4 + +kexec_flag: + .quad 0x1 +#endif + +kexec_start_address: + .globl kexec_start_address + .quad 0x0 + .size kexec_start_address, 8 + +kexec_indirection_page: + .globl kexec_indirection_page + .quad 0 + .size kexec_indirection_page, 8 + +relocate_new_kernel_end: + +relocate_new_kernel_size: + .global relocate_new_kernel_size + .quad relocate_new_kernel_end - relocate_new_kernel + .size relocate_new_kernel_size, 8 diff --git a/arch/sw_64/kernel/reset.c b/arch/sw_64/kernel/reset.c new file mode 100644 index 0000000000000000000000000000000000000000..955339557a7a1d8eec504f0f85ad0241e82fc833 --- /dev/null +++ b/arch/sw_64/kernel/reset.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020-2022 Sunway Technology Corporation Limited + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +void fix_jm585_reset(void) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + int val; + + pdev = pci_get_device(PCI_VENDOR_ID_JMICRON, + 0x0585, NULL); + if (pdev) { + hose = pci_bus_to_pci_controller(pdev->bus); + val = read_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val | 0x8); + write_rc_conf(hose->node, hose->index, + RC_PORT_LINK_CTL, val); + } + +} +static void default_halt(void) +{ + local_irq_disable(); + + pr_notice("\n\n** You can safely turn off the power now **\n\n"); + + while (true) + arch_cpu_idle(); +} + +static void default_poweroff(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); +#ifdef CONFIG_EFI + efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL); +#endif + while (true) + arch_cpu_idle(); +} + +static void default_restart(void) +{ + /* No point in taking interrupts anymore. */ + local_irq_disable(); + + fix_jm585_reset(); +#ifdef CONFIG_EFI + if (efi_capsule_pending(NULL)) + efi_reboot(REBOOT_WARM, NULL); + else + efi_reboot(REBOOT_COLD, NULL); +#endif + + while (true) + arch_cpu_idle(); +} + +void (*pm_restart)(void); + +void (*pm_power_off)(void); +EXPORT_SYMBOL(pm_power_off); + +void (*pm_halt)(void); + +void machine_halt(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_halt(); +} + +void machine_power_off(void) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + pm_power_off(); +} + +void machine_restart(char *command) +{ +#ifdef CONFIG_SMP + preempt_disable(); + smp_send_stop(); +#endif + do_kernel_restart(command); + pm_restart(); +} + +static int __init sw64_reboot_setup(void) +{ + pm_restart = default_restart; + pm_power_off = default_poweroff; + pm_halt = default_halt; + + return 0; +} +arch_initcall(sw64_reboot_setup); diff --git a/arch/sw_64/kernel/segvdbg.c b/arch/sw_64/kernel/segvdbg.c new file mode 100644 index 0000000000000000000000000000000000000000..148d639a08dbb721ee3dd8a10e076937fe254f6a --- /dev/null +++ b/arch/sw_64/kernel/segvdbg.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Zhi Tongze + * Author: Zhi Tongze + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include + +#include + +extern bool segv_debug_enabled; + +static int __init segv_debug_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + + debugfs_create_bool("segv_debug", 0644, + sw64_debugfs_dir, &segv_debug_enabled); + return 0; +} +late_initcall(segv_debug_init); diff --git a/arch/sw_64/kernel/setup.c b/arch/sw_64/kernel/setup.c new file mode 100644 index 0000000000000000000000000000000000000000..0c1ddb9b46d7bb0e74308a97a1830730ad3a44eb --- /dev/null +++ b/arch/sw_64/kernel/setup.c @@ -0,0 +1,1061 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Bootup setup stuff. + */ + +#include +#include +#include +#include +#include +#include +#include +#ifdef CONFIG_MAGIC_SYSRQ +#include +#include +#endif +#ifdef CONFIG_DEBUG_FS +#include +#endif +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +#undef DEBUG_DISCONTIG +#ifdef DEBUG_DISCONTIG +#define DBGDCONT(args...) pr_debug(args) +#else +#define DBGDCONT(args...) +#endif + +int __cpu_to_rcid[NR_CPUS]; /* Map logical to physical */ +EXPORT_SYMBOL(__cpu_to_rcid); + +DEFINE_PER_CPU(unsigned long, hard_node_id) = { 0 }; +static DEFINE_PER_CPU(struct cpu, cpu_devices); + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +struct cma *sw64_kvm_cma; +EXPORT_SYMBOL(sw64_kvm_cma); + +static phys_addr_t kvm_mem_size; +static phys_addr_t kvm_mem_base; + +struct gen_pool *sw64_kvm_pool; +EXPORT_SYMBOL(sw64_kvm_pool); +#endif +#endif + +static inline int phys_addr_valid(unsigned long addr) +{ + /* + * At this point memory probe has not been done such that max_pfn + * and other physical address variables cannot be used, so let's + * roughly judge physical address based on arch specific bit. + */ + return !(addr >> (cpu_desc.pa_bits - 1)); +} + +extern struct atomic_notifier_head panic_notifier_list; +static int sw64_panic_event(struct notifier_block *, unsigned long, void *); +static struct notifier_block sw64_panic_block = { + sw64_panic_event, + NULL, + INT_MAX /* try to do it first */ +}; + +/* the value is IOR: CORE_ONLIE*/ +cpumask_t core_start = CPU_MASK_NONE; + +static struct resource data_resource = { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource code_resource = { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +static struct resource bss_resource = { + .name = "Kernel bss", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM +}; + +/* A collection of per-processor data. */ +struct cpuinfo_sw64 cpu_data[NR_CPUS]; +EXPORT_SYMBOL(cpu_data); + +DEFINE_STATIC_KEY_TRUE(run_mode_host_key); +DEFINE_STATIC_KEY_FALSE(run_mode_guest_key); +DEFINE_STATIC_KEY_FALSE(run_mode_emul_key); +struct cpu_desc_t cpu_desc; +struct socket_desc_t socket_desc[MAX_NUMSOCKETS]; +int memmap_nr; +struct memmap_entry memmap_map[MAX_NUMMEMMAPS]; +bool memblock_initialized; + +cpumask_t cpu_offline = CPU_MASK_NONE; + +static char command_line[COMMAND_LINE_SIZE] __initdata; +#ifdef CONFIG_CMDLINE_BOOL +static char builtin_cmdline[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; +#endif + +/* boot_params */ +struct boot_params *sunway_boot_params = (struct boot_params *) (PARAM + 0x100); + +/* + * The format of "screen_info" is strange, and due to early + * i386-setup code. This is just enough to make the console + * code think we're on a VGA color display. + */ + +struct screen_info screen_info = { + .orig_x = 0, + .orig_y = 25, + .orig_video_cols = 80, + .orig_video_lines = 25, + .orig_video_isVGA = 1, + .orig_video_points = 16 +}; +EXPORT_SYMBOL(screen_info); + +/* + * Move global data into per-processor storage. + */ +void store_cpu_data(int cpu) +{ + cpu_data[cpu].last_asid = ASID_FIRST_VERSION; +} + +#ifdef CONFIG_KEXEC + +void *kexec_control_page; + +#define KTEXT_MAX KERNEL_IMAGE_SIZE + +static void __init kexec_control_page_init(void) +{ + phys_addr_t addr; + + addr = memblock_phys_alloc_range(KEXEC_CONTROL_PAGE_SIZE, PAGE_SIZE, + 0, KTEXT_MAX); + kexec_control_page = (void *)(__START_KERNEL_map + addr); +} + +/* + * reserve_crashkernel() - reserves memory are for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by a dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_size, crash_base; + int ret; + + ret = parse_crashkernel(boot_command_line, mem_desc.size, + &crash_size, &crash_base); + if (ret || !crash_size) + return; + + if (!crash_size) { + pr_warn("size of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + if (!crash_base) { + pr_warn("base of crash kernel memory unspecified, no memory reserved for crash kernel\n"); + return; + } + + if (!memblock_is_region_memory(crash_base, crash_size)) + memblock_add(crash_base, crash_size); + + ret = memblock_reserve(crash_base, crash_size); + if (ret < 0) { + pr_warn("crashkernel reservation failed - memory is in use [mem %#018llx-%#018llx]\n", + crash_base, crash_base + crash_size - 1); + return; + } + + pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n", + (unsigned long)(crash_size >> 20), + (unsigned long)(crash_base >> 20), + (unsigned long)(mem_desc.size >> 20)); + + ret = add_memmap_region(crash_base, crash_size, memmap_crashkernel); + if (ret) + pr_warn("Add crash kernel area [mem %#018llx-%#018llx] to memmap region failed.\n", + crash_base, crash_base + crash_size - 1); + + if (crash_base >= KERNEL_IMAGE_SIZE) + pr_warn("Crash base should be less than %#x\n", KERNEL_IMAGE_SIZE); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; + insert_resource(&iomem_resource, &crashk_res); +} +#else /* !defined(CONFIG_KEXEC) */ +static void __init reserve_crashkernel(void) {} +static void __init kexec_control_page_init(void) {} +#endif /* !defined(CONFIG_KEXEC) */ + +/* + * I/O resources inherited from PeeCees. Except for perhaps the + * turbochannel SWs, everyone has these on some sort of SuperIO chip. + * + * ??? If this becomes less standard, move the struct out into the + * machine vector. + */ + +static void __init +reserve_std_resources(void) +{ + static struct resource standard_io_resources[] = { + { .name = "rtc", .start = -1, .end = -1 }, + { .name = "dma1", .start = 0x00, .end = 0x1f }, + { .name = "pic1", .start = 0x20, .end = 0x3f }, + { .name = "timer", .start = 0x40, .end = 0x5f }, + { .name = "keyboard", .start = 0x60, .end = 0x6f }, + { .name = "dma page reg", .start = 0x80, .end = 0x8f }, + { .name = "pic2", .start = 0xa0, .end = 0xbf }, + { .name = "dma2", .start = 0xc0, .end = 0xdf }, + }; + + struct resource *io = &ioport_resource; + size_t i; + + if (hose_head) { + struct pci_controller *hose; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->index == 0) { + io = hose->io_space; + break; + } + } + + /* Fix up for the Jensen's queer RTC placement. */ + standard_io_resources[0].start = RTC_PORT(0); + standard_io_resources[0].end = RTC_PORT(0) + 0x10; + + for (i = 0; i < ARRAY_SIZE(standard_io_resources); ++i) + request_resource(io, standard_io_resources+i); +} + +static int __init parse_memmap_one(char *p) +{ + char *oldp; + u64 start_at, mem_size; + int ret; + + if (!p) + return -EINVAL; + + if (!strncmp(p, "exactmap", 8)) { + pr_err("\"memmap=exactmap\" not valid on sw64\n"); + return 0; + } + + oldp = p; + mem_size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') { + pr_err("\"memmap=nn@ss\" invalid on sw64\n"); + } else if (*p == '#') { + pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on sw64\n"); + } else if (*p == '$') { + start_at = memparse(p + 1, &p); + ret = add_memmap_region(start_at, mem_size, memmap_reserved); + if (ret) + return ret; + } else { + return -EINVAL; + } + return *p == '\0' ? 0 : -EINVAL; +} + +static int __init setup_memmap(char *str) +{ + while (str) { + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + parse_memmap_one(str); + str = k; + } + + return 0; +} +early_param("memmap", setup_memmap); + +static int __init setup_cpuoffline(char *p) +{ + cpulist_parse(p, &cpu_offline); + cpumask_clear_cpu(0, &cpu_offline); + return 0; +} +early_param("cpuoffline", setup_cpuoffline); + +#ifdef CONFIG_BLK_DEV_INITRD +static void * __init move_initrd(unsigned long mem_limit) +{ + void *start; + unsigned long size; + + size = initrd_end - initrd_start; + start = memblock_alloc_from(PAGE_ALIGN(size), PAGE_SIZE, 0); + if (!start || __pa(start) + size > mem_limit) { + initrd_start = initrd_end = 0; + return NULL; + } + memmove(start, (void *)initrd_start, size); + initrd_start = (unsigned long)start; + initrd_end = initrd_start + size; + pr_info("initrd moved to 0x%px\n", start); + return start; +} +#else +static void * __init move_initrd(unsigned long mem_limit) +{ + return NULL; +} +#endif + +static bool __init memmap_range_valid(phys_addr_t base, phys_addr_t *size) +{ + if (base > memblock_end_of_DRAM()) + return false; + + if ((base + *size) > memblock_end_of_DRAM()) + *size = memblock_end_of_DRAM() - base; + + return true; +} + +void __init process_memmap(void) +{ + static int i; // Make it static so we won't start over again every time. + int ret; + phys_addr_t base, size; + unsigned long dma_end __maybe_unused = (MAX_DMA32_PFN << PAGE_SHIFT); + + if (!memblock_initialized) + return; + + for (; i < memmap_nr; i++) { + base = memmap_map[i].addr; + size = memmap_map[i].size; + switch (memmap_map[i].type) { + case memmap_reserved: + if (!memmap_range_valid(base, &size)) { + pr_err("reserved memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("reserved memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + else if (IS_ENABLED(CONFIG_ZONE_DMA32) && (base < dma_end)) + pr_warn("memmap region [mem %#018llx-%#018llx] overlapped with DMA32 region\n", + base, base + size - 1); + } + break; + case memmap_pci: + if (!memmap_range_valid(base, &size)) { + pr_err("pci memmap region [mem %#018llx-%#018llx] beyond end of memory (%#018llx)\n", + base, base + size - 1, memblock_end_of_DRAM()); + } else { + pr_info("pci memmap region [mem %#018llx-%#018llx]\n", + base, base + size - 1); + ret = memblock_mark_nomap(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + } + break; + case memmap_initrd: + if ((base + size) > memblock_end_of_DRAM()) { + phys_addr_t old_base = base; + + base = (unsigned long) move_initrd(memblock_end_of_DRAM()); + if (!base) { + pr_err("initrd memmap region [mem %#018llx-%#018llx] extends beyond end of memory (%#018llx)\n", + old_base, old_base + size - 1, memblock_end_of_DRAM()); + break; + } + memmap_map[i].addr = base; + } + pr_info("initrd memmap region [mem %#018llx-%#018llx]\n", base, base + size - 1); + ret = memblock_reserve(base, size); + if (ret) + pr_err("reserve memmap region [mem %#018llx-%#018llx] failed\n", + base, base + size - 1); + break; + case memmap_kvm: + case memmap_crashkernel: + /* kvm and crashkernel are handled elsewhere, skip */ + break; + case memmap_acpi: + pr_err("ACPI memmap region is not supported.\n"); + break; + case memmap_use: + pr_err("Force usage memmap region is not supported.\n"); + break; + case memmap_protected: + pr_err("Protected memmap region is not supported.\n"); + break; + default: + pr_err("Unknown type of memmap region.\n"); + } + } +} + +int __init add_memmap_region(u64 addr, u64 size, enum memmap_types type) +{ + if (memmap_nr >= ARRAY_SIZE(memmap_map)) { + pr_err("Ooops! Too many entries in the memory map!\n"); + return -EPERM; + } + + if (addr + size <= addr) { + pr_warn("Trying to add an invalid memory region, skipped\n"); + return -EINVAL; + } + + memmap_map[memmap_nr].addr = addr; + memmap_map[memmap_nr].size = size; + memmap_map[memmap_nr].type = type; + memmap_nr++; + + process_memmap(); + + return 0; +} + +static struct resource* __init +insert_ram_resource(u64 start, u64 end, bool reserved) +{ + struct resource *res = + kzalloc(sizeof(struct resource), GFP_ATOMIC); + if (!res) + return NULL; + if (reserved) { + res->name = "reserved"; + res->flags = IORESOURCE_MEM; + } else { + res->name = "System RAM"; + res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; + } + res->start = start; + res->end = end; + if (insert_resource(&iomem_resource, res)) { + kfree(res); + return NULL; + } + return res; +} + +static int __init request_standard_resources(void) +{ + struct memblock_region *mblk; + + extern char _text[], _etext[]; + extern char _sdata[], _edata[]; + extern char __bss_start[], __bss_stop[]; + + for_each_mem_region(mblk) { + if (!memblock_is_nomap(mblk)) + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 0); + else + insert_ram_resource(mblk->base, + mblk->base + mblk->size - 1, 1); + } + + code_resource.start = __pa_symbol(_text); + code_resource.end = __pa_symbol(_etext)-1; + data_resource.start = __pa_symbol(_sdata); + data_resource.end = __pa_symbol(_edata)-1; + bss_resource.start = __pa_symbol(__bss_start); + bss_resource.end = __pa_symbol(__bss_stop)-1; + + insert_resource(&iomem_resource, &code_resource); + insert_resource(&iomem_resource, &data_resource); + insert_resource(&iomem_resource, &bss_resource); + + return 0; +} +subsys_initcall(request_standard_resources); + +#ifdef CONFIG_NUMA +extern void cpu_set_node(void); +#endif + +static void __init show_socket_mem_layout(void) +{ + int i; + phys_addr_t base, size, end; + + base = 0; + + pr_info("Socket memory layout:\n"); + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + size = socket_desc[i].socket_mem; + end = base + size - 1; + pr_info("Socket %d: [mem %#018llx-%#018llx], size %llu\n", + i, base, end, size); + base = end + 1; + } + } + pr_info("Reserved memory size for Socket 0: %#lx\n", NODE0_START); +} + +int page_is_ram(unsigned long pfn) +{ + pfn <<= PAGE_SHIFT; + + return pfn >= mem_desc.base && pfn < (mem_desc.base + mem_desc.size); +} + +static int __init topology_init(void) +{ + int i, ret; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_devices, i); + +#ifdef CONFIG_HOTPLUG_CPU + if (i != 0) + cpu->hotpluggable = 1; +#endif + ret = register_cpu(cpu, i); + if (unlikely(ret)) + pr_warn("Warning: %s: register_cpu %d failed (%d)\n", + __func__, i, ret); + } + + return 0; +} +subsys_initcall(topology_init); + +static void __init setup_machine_fdt(void) +{ +#ifdef CONFIG_USE_OF + void *dt_virt; + const char *name; + + /* Give a chance to select kernel builtin DTB firstly */ + if (IS_ENABLED(CONFIG_BUILTIN_DTB)) + dt_virt = (void *)__dtb_start; + else { + dt_virt = (void *)sunway_boot_params->dtb_start; + if (virt_to_phys(dt_virt) < virt_to_phys(__bss_stop)) { + pr_emerg("BUG: DTB has been corrupted by kernel image!\n"); + while (true) + cpu_relax(); + } + } + + if (!phys_addr_valid(__boot_pa(dt_virt)) || + !early_init_dt_scan(dt_virt)) { + pr_crit("\n" + "Error: invalid device tree blob at virtual address %px\n" + "The dtb must be 8-byte aligned and must not exceed 2 MB in size\n" + "\nPlease check your bootloader.", + dt_virt); + + while (true) + cpu_relax(); + } + + name = of_flat_dt_get_machine_name(); + if (!name) + return; + + pr_info("Machine model: %s\n", name); +#else + pr_info("Kernel disable device tree support.\n"); + return; +#endif +} + +void __init device_tree_init(void) +{ + unflatten_and_copy_device_tree(); + sunway_boot_params->dtb_start = (__u64)initial_boot_params; +} + +static void __init setup_cpu_info(void) +{ + int i; + struct cache_desc *c; + unsigned long val; + + val = cpuid(GET_TABLE_ENTRY, 0); + cpu_desc.model = CPUID_MODEL(val); + cpu_desc.family = CPUID_FAMILY(val); + cpu_desc.chip_var = CPUID_CHIP_VAR(val); + cpu_desc.arch_var = CPUID_ARCH_VAR(val); + cpu_desc.arch_rev = CPUID_ARCH_REV(val); + cpu_desc.pa_bits = CPUID_PA_BITS(val); + cpu_desc.va_bits = CPUID_VA_BITS(val); + + for (i = 0; i < VENDOR_ID_MAX; i++) { + val = cpuid(GET_VENDOR_ID, i); + memcpy(cpu_desc.vendor_id + (i * 8), &val, 8); + } + + for (i = 0; i < MODEL_MAX; i++) { + val = cpuid(GET_MODEL, i); + memcpy(cpu_desc.model_id + (i * 8), &val, 8); + } + + cpu_desc.frequency = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + + for (i = 0; i < NR_CPUS; i++) { + c = &(cpu_data[i].icache); + val = cpuid(GET_CACHE_INFO, L1_ICACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].dcache); + val = cpuid(GET_CACHE_INFO, L1_DCACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].scache); + val = cpuid(GET_CACHE_INFO, L2_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + + c = &(cpu_data[i].tcache); + val = cpuid(GET_CACHE_INFO, L3_CACHE); + c->size = CACHE_SIZE(val); + c->linesz = 1 << (CACHE_LINE_BITS(val)); + c->sets = 1 << (CACHE_INDEX_BITS(val)); + c->ways = c->size / c->sets / c->linesz; + } +} + +static void __init setup_run_mode(void) +{ + if (*(unsigned long *)MMSIZE) { + static_branch_disable(&run_mode_host_key); + if (*(unsigned long *)MMSIZE & EMUL_FLAG) { + pr_info("run mode: emul\n"); + static_branch_disable(&run_mode_guest_key); + static_branch_enable(&run_mode_emul_key); + + } else { + pr_info("run mode: guest\n"); + static_branch_enable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } + } else { + pr_info("run mode: host\n"); + static_branch_enable(&run_mode_host_key); + static_branch_disable(&run_mode_guest_key); + static_branch_disable(&run_mode_emul_key); + } +} + +static void __init setup_socket_info(void) +{ + int i; + int numsockets = sw64_chip->get_cpu_num(); + + memset(socket_desc, 0, MAX_NUMSOCKETS * sizeof(struct socket_desc_t)); + + for (i = 0; i < numsockets; i++) { + socket_desc[i].is_online = 1; + if (sw64_chip_init->early_init.get_node_mem) + socket_desc[i].socket_mem = sw64_chip_init->early_init.get_node_mem(i); + } +} + +#ifdef CONFIG_BLK_DEV_INITRD +static void __init reserve_mem_for_initrd(void) +{ + int ret; + + initrd_start = sunway_boot_params->initrd_start; + if (initrd_start) { + initrd_start = __pa(initrd_start) + PAGE_OFFSET; + initrd_end = initrd_start + sunway_boot_params->initrd_size; + pr_info("Initial ramdisk at: 0x%px (%llu bytes)\n", + (void *)initrd_start, sunway_boot_params->initrd_size); + + ret = add_memmap_region(__pa(initrd_start), initrd_end - initrd_start, memmap_initrd); + if (ret) + pr_err("Add initrd area [mem %#018lx-%#018lx] to memmap region failed.\n", + __pa(initrd_start), __pa(initrd_end - 1)); + } +} +#endif /* CONFIG_BLK_DEV_INITRD */ + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init early_kvm_reserved_mem(char *p) +{ + if (!p) { + pr_err("Config string not provided\n"); + return -EINVAL; + } + + kvm_mem_size = memparse(p, &p); + if (*p != '@') + return -EINVAL; + kvm_mem_base = memparse(p + 1, &p); + return 0; +} +early_param("kvm_mem", early_kvm_reserved_mem); + +void __init sw64_kvm_reserve(void) +{ + kvm_cma_declare_contiguous(kvm_mem_base, kvm_mem_size, 0, + PAGE_SIZE, 0, "sw64_kvm_cma", &sw64_kvm_cma); +} +#endif +#endif + +void __init +setup_arch(char **cmdline_p) +{ + /** + * Work around the unaligned access exception to parse ACPI + * tables in the following function acpi_boot_table_init(). + */ + trap_init(); + + jump_label_init(); + setup_cpu_info(); + setup_run_mode(); + setup_chip_ops(); + setup_socket_info(); + show_socket_mem_layout(); + sw64_chip_init->early_init.setup_core_map(&core_start); + if (is_guest_or_emul()) + get_vt_smp_info(); + + setup_sched_clock(); + + setup_machine_fdt(); + + /* Register a call for panic conditions. */ + atomic_notifier_chain_register(&panic_notifier_list, + &sw64_panic_block); + + callback_init(); + + /* command line */ + if (!sunway_boot_params->cmdline) + sunway_boot_params->cmdline = (unsigned long)COMMAND_LINE; + + strscpy(boot_command_line, (char *)sunway_boot_params->cmdline, COMMAND_LINE_SIZE); + +#if IS_ENABLED(CONFIG_CMDLINE_BOOL) +#if IS_ENABLED(CONFIG_CMDLINE_OVERRIDE) + strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + strscpy((char *)sunway_boot_params->cmdline, boot_command_line, COMMAND_LINE_SIZE); +#else + if (builtin_cmdline[0]) { + /* append builtin to boot loader cmdline */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); + } +#endif /* CMDLINE_EXTEND */ +#endif + + strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + /* + * Process command-line arguments. + */ + parse_early_param(); + + /* Find our memory. */ + mem_detect(); + +#ifdef CONFIG_PCI + reserve_mem_for_pci(); +#endif + +#ifdef CONFIG_BLK_DEV_INITRD + reserve_mem_for_initrd(); +#endif + + sw64_memblock_init(); + + reserve_crashkernel(); + + /* Reserve large chunks of memory for use by CMA for KVM. */ +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + sw64_kvm_reserve(); +#endif +#endif + + efi_init(); + + /* Try to upgrade ACPI tables via initrd */ + acpi_table_upgrade(); + + /* Parse the ACPI tables for possible boot-time configuration */ + acpi_boot_table_init(); + +#ifdef CONFIG_SMP + setup_smp(); +#else + store_cpu_data(0); +#endif + + sw64_numa_init(); + + memblock_dump_all(); + + sparse_init(); + + zone_sizes_init(); + + paging_init(); + + kexec_control_page_init(); + + /* + * Initialize the machine. Usually has to do with setting up + * DMA windows and the like. + */ + sw64_init_arch(); + + /* Reserve standard resources. */ + reserve_std_resources(); + + /* + * Give us a default console. TGA users will see nothing until + * chr_dev_init is called, rather late in the boot sequence. + */ + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif + + /* Default root filesystem to sda2. */ + ROOT_DEV = MKDEV(SCSI_DISK0_MAJOR, 2); + + if (acpi_disabled) { +#ifdef CONFIG_NUMA + cpu_set_node(); +#endif + device_tree_init(); + } +} + + +static int +show_cpuinfo(struct seq_file *f, void *slot) +{ + int i; + unsigned long cpu_freq; + + cpu_freq = cpuid(GET_CPU_FREQ, 0); + + for_each_online_cpu(i) { + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ + seq_printf(f, "processor\t: %u\n" + "vendor_id\t: %s\n" + "cpu family\t: %d\n" + "model\t\t: %u\n" + "model name\t: %s CPU @ %lu.%lu%luGHz\n" + "cpu variation\t: %u\n" + "cpu revision\t: %u\n", + i, cpu_desc.vendor_id, cpu_desc.family, + cpu_desc.model, cpu_desc.model_id, + cpu_freq / 1000, (cpu_freq % 1000) / 100, + (cpu_freq % 100) / 10, + cpu_desc.arch_var, cpu_desc.arch_rev); + seq_printf(f, "cpu MHz\t\t: %lu.00\n" + "cache size\t: %u KB\n" + "physical id\t: %d\n" + "bogomips\t: %lu.%02lu\n", + get_cpu_freq() / 1000 / 1000, cpu_data[i].tcache.size >> 10, + cpu_topology[i].package_id, + loops_per_jiffy / (500000/HZ), + (loops_per_jiffy / (5000/HZ)) % 100); + + seq_printf(f, "flags\t\t: fpu simd vpn upn cpuid\n"); + seq_printf(f, "page size\t: %d\n", 8192); + seq_printf(f, "cache_alignment\t: %d\n", cpu_data[i].tcache.linesz); + seq_printf(f, "address sizes\t: %u bits physical, %u bits virtual\n\n", + cpu_desc.pa_bits, cpu_desc.va_bits); + } + return 0; +} + +/* + * We show only CPU #0 info. + */ +static void * +c_start(struct seq_file *f, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void * +c_next(struct seq_file *f, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static void +c_stop(struct seq_file *f, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + + +static int +sw64_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + return NOTIFY_DONE; +} + +static __init int add_pcspkr(void) +{ + struct platform_device *pd; + int ret; + + pd = platform_device_alloc("pcspkr", -1); + if (!pd) + return -ENOMEM; + + ret = platform_device_add(pd); + if (ret) + platform_device_put(pd); + + return ret; +} +device_initcall(add_pcspkr); + +#ifdef CONFIG_DEBUG_FS +struct dentry *sw64_debugfs_dir; +EXPORT_SYMBOL(sw64_debugfs_dir); + +static int __init debugfs_sw64(void) +{ + struct dentry *d; + + d = debugfs_create_dir("sw64", NULL); + if (!d) + return -ENOMEM; + sw64_debugfs_dir = d; + return 0; +} +arch_initcall(debugfs_sw64); +#endif + +#ifdef CONFIG_OF +static int __init sw64_of_init(void) +{ + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); + return 0; +} +core_initcall(sw64_of_init); +#endif + +#ifdef CONFIG_SUBARCH_C3B +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) +static int __init sw64_kvm_pool_init(void) +{ + int status = 0; + unsigned long kvm_pool_virt; + struct page *base_page, *end_page, *p; + + if (!sw64_kvm_cma) + goto out; + + kvm_pool_virt = (unsigned long)kvm_mem_base; + + sw64_kvm_pool = gen_pool_create(PAGE_SHIFT, -1); + if (!sw64_kvm_pool) + goto out; + + status = gen_pool_add_virt(sw64_kvm_pool, kvm_pool_virt, kvm_mem_base, + kvm_mem_size, -1); + if (status < 0) { + pr_err("failed to add memory chunks to sw64 kvm pool\n"); + gen_pool_destroy(sw64_kvm_pool); + sw64_kvm_pool = NULL; + goto out; + } + gen_pool_set_algo(sw64_kvm_pool, gen_pool_best_fit, NULL); + + base_page = pfn_to_page(kvm_mem_base >> PAGE_SHIFT); + end_page = pfn_to_page((kvm_mem_base + kvm_mem_size - 1) >> PAGE_SHIFT); + + p = base_page; + while (p <= end_page && page_ref_count(p) == 0) { + set_page_count(p, 1); + page_mapcount_reset(p); + SetPageReserved(p); + p++; + } + + return status; + +out: + return -ENOMEM; +} +core_initcall_sync(sw64_kvm_pool_init); +#endif +#endif diff --git a/arch/sw_64/kernel/signal.c b/arch/sw_64/kernel/signal.c new file mode 100644 index 0000000000000000000000000000000000000000..496f33bb1c89f0ae39d68e6972e08321736685df --- /dev/null +++ b/arch/sw_64/kernel/signal.c @@ -0,0 +1,378 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/signal.c + * + * Copyright (C) 1995 Linus Torvalds + * + * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "proto.h" + + +#define DEBUG_SIG 0 + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +SYSCALL_DEFINE2(odd_sigprocmask, int, how, unsigned long, newmask) +{ + sigset_t oldmask; + sigset_t mask; + unsigned long res; + + siginitset(&mask, newmask & _BLOCKABLE); + res = sigprocmask(how, &mask, &oldmask); + if (!res) { + force_successful_syscall_return(); + res = oldmask.sig[0]; + } + return res; +} + +SYSCALL_DEFINE3(odd_sigaction, int, sig, + const struct odd_sigaction __user *, act, + struct odd_sigaction __user *, oact) +{ + struct k_sigaction new_ka, old_ka; + old_sigset_t mask; + int ret; + + if (act) { + if (!access_ok(act, sizeof(*act)) || + __get_user(new_ka.sa.sa_handler, &act->sa_handler) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) + return -EFAULT; + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(oact, sizeof(*oact)) || + __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) + return -EFAULT; + } + + return ret; +} + +/* + * Do a signal return; undo the signal stack. + */ + +#if _NSIG_WORDS > 1 +# error "Non SA_SIGINFO frame needs rearranging" +#endif + +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +/* + * If this changes, userland unwinders that Know Things about our signal + * frame will break. Do not undertake lightly. It also implies an ABI + * change wrt the size of siginfo_t, which may cause some pain. + */ +extern char compile_time_assert + [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; + +static long +restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) +{ + long err = __get_user(regs->pc, &sc->sc_pc); + + err |= __copy_from_user(regs, sc->sc_regs, sizeof_field(struct pt_regs, regs)); + /* simd-fp */ + err |= __copy_from_user(¤t->thread.fpstate, &sc->sc_fpregs, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __get_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + if (likely(!err)) + __fpstate_restore(current); + + return err; +} + +/* + * Note that this syscall is also used by setcontext(3) to install + * a given sigcontext. This because it's impossible to set *all* + * registers and transfer control from userland. + */ + +SYSCALL_DEFINE1(sigreturn, struct sigcontext __user *, sc) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good sigcontext before using it */ + if (!access_ok(sc, sizeof(*sc))) + goto give_sigsegv; + if (__get_user(set.sig[0], &sc->sc_mask)) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(sc, regs)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + +SYSCALL_DEFINE1(rt_sigreturn, struct rt_sigframe __user *, frame) +{ + struct pt_regs *regs = current_pt_regs(); + sigset_t set; + + force_successful_syscall_return(); + + /* Always make any pending restarted system calls return -EINTR */ + current->restart_block.fn = do_no_restart_syscall; + + /* Verify that it's a good ucontext_t before using it */ + if (!access_ok(&frame->uc, sizeof(frame->uc))) + goto give_sigsegv; + if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) + goto give_sigsegv; + + set_current_blocked(&set); + + if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) + goto give_sigsegv; + + if (restore_altstack(&frame->uc.uc_stack)) + goto give_sigsegv; + + /* Send SIGTRAP if we're single-stepping: */ + if (ptrace_cancel_bpt(current)) { + force_sig_fault(SIGTRAP, TRAP_BRKPT, + (void __user *)regs->pc); + } + return regs->regs[0]; + +give_sigsegv: + force_sig(SIGSEGV); + return 0; +} + + +/* + * Set up a signal frame. + */ + +static inline void __user * +get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) +{ + return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); +} + +static long +setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, + unsigned long mask) +{ + long err = 0; + + err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); + err |= __put_user(mask, &sc->sc_mask); + err |= __put_user(regs->pc, &sc->sc_pc); + err |= __put_user(8, &sc->sc_ps); + + err |= __copy_to_user(sc->sc_regs, regs, sizeof_field(struct pt_regs, regs)); + err |= __put_user(0, sc->sc_regs+31); + /* simd-fp */ + __fpstate_save(current); + err |= __copy_to_user(&sc->sc_fpregs, ¤t->thread.fpstate, + offsetof(struct user_fpsimd_state, fpcr)); + err |= __put_user(current->thread.fpstate.fpcr, &sc->sc_fpcr); + + return err; +} + +static int +setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) +{ + unsigned long err = 0; + struct rt_sigframe __user *frame; + + frame = get_sigframe(ksig, regs->regs[30], sizeof(*frame)); + if (!access_ok(frame, sizeof(*frame))) + return -EFAULT; + + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&frame->info, &ksig->info); + + /* Create the ucontext. */ + err |= __put_user(0, &frame->uc.uc_flags); + err |= __put_user(0, &frame->uc.uc_link); + err |= __put_user(set->sig[0], &frame->uc.uc_old_sigmask); + err |= __save_altstack(&frame->uc.uc_stack, regs->regs[30]); + err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + if (err) + return -EFAULT; + + /* "Return" to the handler */ + regs->regs[26] = VDSO_SYMBOL(current->mm->context.vdso, rt_sigreturn); + regs->regs[27] = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; + regs->regs[16] = ksig->sig; /* a0: signal number */ + if (ksig->ka.sa.sa_flags & SA_SIGINFO) { + /* a1: siginfo pointer, a2: ucontext pointer */ + regs->regs[17] = (unsigned long) &frame->info; + regs->regs[18] = (unsigned long) &frame->uc; + } else { + /* a1: exception code, a2: sigcontext pointer */ + regs->regs[17] = 0; + regs->regs[18] = (unsigned long) &frame->uc.uc_mcontext; + } + regs->regs[30] = (unsigned long) frame; + +#if DEBUG_SIG + pr_info("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", + current->comm, current->pid, frame, regs->pc, regs->regs[26]); +#endif + + return 0; +} + +/* + * OK, we're invoking a handler. + */ +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) +{ + sigset_t *oldset = sigmask_to_save(); + int ret; + + rseq_signal_deliver(ksig, regs); + + ret = setup_rt_frame(ksig, oldset, regs); + + signal_setup_done(ret, ksig, 0); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void +do_signal(struct pt_regs *regs) +{ + unsigned long single_stepping = ptrace_cancel_bpt(current); + struct ksignal ksig; + + /* This lets the debugger run, ... */ + if (get_signal(&ksig)) { + /* ... so re-check the single stepping. */ + single_stepping |= ptrace_cancel_bpt(current); + /* Whee! Actually deliver the signal. */ + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + if (!(ksig.ka.sa.sa_flags & SA_RESTART)) { + regs->regs[0] = EINTR; + break; + } + fallthrough; + case -ERESTARTNOINTR: + /* reset v0 and a3 and replay syscall */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTARTNOHAND: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = EINTR; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + handle_signal(&ksig, regs); + } else { + single_stepping |= ptrace_cancel_bpt(current); + if (regs->orig_r0 != NO_SYSCALL) { + switch (syscall_get_error(current, regs)) { + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTARTNOHAND: + /* Reset v0 and a3 and replay syscall. */ + regs->regs[0] = regs->orig_r0; + regs->regs[19] = regs->orig_r19; + regs->pc -= 4; + break; + case -ERESTART_RESTARTBLOCK: + /* Set v0 to the restart_syscall and replay */ + regs->regs[0] = __NR_restart_syscall; + regs->pc -= 4; + break; + } + regs->orig_r0 = NO_SYSCALL; + } + restore_saved_sigmask(); + } + if (single_stepping) + ptrace_set_bpt(current); /* re-set breakpoint */ +} + +asmlinkage void +do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) +{ + do { + local_irq_enable(); + + if (thread_flags & _TIF_NEED_RESCHED) + schedule(); + + if (thread_flags & _TIF_UPROBE) { + unsigned long pc = regs->pc; + + uprobe_notify_resume(regs); + sw64_fix_uretprobe(regs, pc - 4); + } + + if (thread_flags & _TIF_PATCH_PENDING) + klp_update_patch_state(current); + + if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) + resume_user_mode_work(regs); + + local_irq_disable(); + thread_flags = READ_ONCE(current_thread_info()->flags); + } while (thread_flags & _TIF_WORK_MASK); +} diff --git a/arch/sw_64/kernel/smp.c b/arch/sw_64/kernel/smp.c new file mode 100644 index 0000000000000000000000000000000000000000..6d1aab4be1c0c45badc962a908eed24e1e7a40dd --- /dev/null +++ b/arch/sw_64/kernel/smp.c @@ -0,0 +1,578 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw_64/kernel/smp.c + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +struct smp_rcb_struct *smp_rcb; + +extern struct cpuinfo_sw64 cpu_data[NR_CPUS]; + +int smp_booted; + +void *idle_task_pointer[NR_CPUS]; + +/* State of each CPU */ +DEFINE_PER_CPU(int, cpu_state) = { 0 }; + +/* A collection of single bit ipi messages. */ +static struct { + unsigned long bits ____cacheline_aligned; +} ipi_data[NR_CPUS] __cacheline_aligned; + +enum ipi_message_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CPU_STOP, +}; + +int smp_num_cpus = 1; /* Number that came online. */ +EXPORT_SYMBOL(smp_num_cpus); + +#define send_sleep_interrupt(cpu) send_ipi((cpu), II_SLEEP) +#define send_wakeup_interrupt(cpu) send_ipi((cpu), II_WAKE) + +/* + * Where secondaries begin a life of C. + */ +void smp_callin(void) +{ + int cpuid = smp_processor_id(); + + local_irq_disable(); + + if (cpu_online(cpuid)) { + pr_err("??, cpu 0x%x already present??\n", cpuid); + BUG(); + } + set_cpu_online(cpuid, true); + + /* clear ksp, usp */ + wrksp(0); + wrusp(0); + + /* Set trap vectors. */ + trap_init(); + + /* Set interrupt vector. */ + if (is_in_host()) { + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI0_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI1_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI2_INTEN); + write_csr(0xffffffffffffffffUL, CSR_PCIE_MSI3_INTEN); + } + wrent(entInt, 0); + + /* Get our local ticker going. */ + sw64_setup_timer(); + + /* All kernel threads share the same mm context. */ + mmgrab(&init_mm); + current->active_mm = &init_mm; + /* update csr:ptbr */ + update_ptbr_sys(virt_to_phys(init_mm.pgd)); + + /* inform the notifiers about the new cpu */ + notify_cpu_starting(cpuid); + + per_cpu(cpu_state, cpuid) = CPU_ONLINE; + per_cpu(hard_node_id, cpuid) = rcid_to_domain_id(cpu_to_rcid(cpuid)); + + /* Must have completely accurate bogos. */ + local_irq_enable(); + + /* Cpu0 init preempt_count at start_kernel, other smp cpus do here. */ + preempt_disable(); + + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + + +/* + * Set ready for secondary cpu. + */ +static inline void set_secondary_ready(int cpuid) +{ + smp_rcb->ready = cpuid; +} + +/* + * Convince the hmcode to have a secondary cpu begin execution. + */ +static int secondary_cpu_start(int cpuid, struct task_struct *idle) +{ + unsigned long timeout; + /* + * Precalculate the target ksp. + */ + idle_task_pointer[cpuid] = idle; + + set_cpu_online(cpuid, false); + wmb(); + + set_secondary_ready(cpuid); + + /* Wait 10 seconds for secondary cpu. */ + timeout = jiffies + 10*HZ; + while (time_before(jiffies, timeout)) { + if (cpu_online(cpuid)) + goto started; + udelay(10); + barrier(); + } + pr_err("SMP: Processor %d failed to start.\n", cpuid); + return -1; + +started: + store_cpu_topology(cpuid); + numa_add_cpu(cpuid); + return 0; +} + +/* + * Bring one cpu online. + */ +static int smp_boot_one_cpu(int cpuid, struct task_struct *idle) +{ + per_cpu(cpu_state, cpuid) = CPU_UP_PREPARE; + + return secondary_cpu_start(cpuid, idle); +} + +static void __init process_nr_cpu_ids(void) +{ + int i; + + for (i = nr_cpu_ids; i < NR_CPUS; i++) { + set_cpu_possible(i, false); + set_cpu_present(i, false); + } + + nr_cpu_ids = num_possible_cpus(); +} + +void __init smp_rcb_init(void) +{ + smp_rcb = INIT_SMP_RCB; + memset(smp_rcb, 0, sizeof(struct smp_rcb_struct)); + /* Setup SMP_RCB fields that uses to activate secondary CPU */ + smp_rcb->restart_entry = __smp_callin; + smp_rcb->init_done = 0xDEADBEEFUL; + mb(); +} + +/* + * Called from setup_arch. Detect an SMP system and which processors + * are present. + */ +void __init setup_smp(void) +{ + int i = 0, num = 0; + + init_cpu_possible(cpu_none_mask); + + /* For unified kernel, NR_CPUS is the maximum possible value */ + for (; i < NR_CPUS; i++) { + if (cpu_to_rcid(i) != -1) { + set_cpu_possible(num, true); + store_cpu_data(num); + if (!cpumask_test_cpu(i, &cpu_offline)) + set_cpu_present(num, true); + num++; + } + } + + process_nr_cpu_ids(); + + pr_info("Detected %u possible CPU(s), %u CPU(s) are present\n", + nr_cpu_ids, num_present_cpus()); + + smp_rcb_init(); +} +/* + * Called by smp_init prepare the secondaries + */ +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned int cpu; + /* Take care of some initial bookkeeping. */ + memset(ipi_data, 0, sizeof(ipi_data)); + + init_cpu_topology(); + store_cpu_topology(smp_processor_id()); + numa_add_cpu(smp_processor_id()); + + for_each_possible_cpu(cpu) { + numa_store_cpu_info(cpu); + } + + /* Nothing to do on a UP box, or when told not to. */ + if (nr_cpu_ids == 1 || max_cpus == 0) { + init_cpu_possible(cpumask_of(0)); + init_cpu_present(cpumask_of(0)); + pr_info("SMP mode deactivated.\n"); + return; + } + + pr_info("SMP starting up secondaries.\n"); +} + +void smp_prepare_boot_cpu(void) +{ + int me = smp_processor_id(); + + per_cpu(cpu_state, me) = CPU_ONLINE; +} + +int vt_cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + pr_info("%s: cpu = %d\n", __func__, cpu); + + wmb(); + smp_rcb->ready = 0; + if (smp_booted) { + /* irq must be disabled before reset vCPU */ + reset_cpu(cpu); + } + smp_boot_one_cpu(cpu, tidle); + + return cpu_online(cpu) ? 0 : -EIO; +} + +#ifdef CONFIG_SUBARCH_C3B +DECLARE_STATIC_KEY_FALSE(use_tc_as_sched_clock); +#endif + +int __cpu_up(unsigned int cpu, struct task_struct *tidle) +{ + if (is_in_guest()) + return vt_cpu_up(cpu, tidle); + + wmb(); + smp_rcb->ready = 0; + + /* send wake up signal */ + send_wakeup_interrupt(cpu); + /* send reset signal */ + if (smp_booted) { + if (is_in_host()) { + reset_cpu(cpu); + } else { + while (1) + cpu_relax(); + } + } + smp_boot_one_cpu(cpu, tidle); + +#ifdef CONFIG_SUBARCH_C3B + if (static_branch_likely(&use_tc_as_sched_clock)) { + if (smp_booted) { + tc_sync_clear(); + smp_call_function_single(cpu, tc_sync_ready, NULL, 0); + tc_sync_set(); + } + } +#endif + + return cpu_online(cpu) ? 0 : -EIO; +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + smp_booted = 1; + pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); +} + +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} + + +static void send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation) +{ + int i; + + mb(); + for_each_cpu(i, to_whom) + set_bit(operation, &ipi_data[i].bits); + + mb(); + for_each_cpu(i, to_whom) + send_ipi(i, II_II0); +} + +static void ipi_cpu_stop(int cpu) +{ + local_irq_disable(); + set_cpu_online(cpu, false); + while (1) + wait_for_interrupt(); +} + +void handle_ipi(struct pt_regs *regs) +{ + int cpu = smp_processor_id(); + unsigned long *pending_ipis = &ipi_data[cpu].bits; + unsigned long ops; + + mb(); /* Order interrupt and bit testing. */ + while ((ops = xchg(pending_ipis, 0)) != 0) { + mb(); /* Order bit clearing and data access. */ + do { + unsigned long which; + + which = ops & -ops; + ops &= ~which; + which = __ffs(which); + + switch (which) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + break; + + case IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + + default: + pr_crit("Unknown IPI on CPU %d: %lu\n", cpu, which); + break; + } + } while (ops); + + mb(); /* Order data access and bit testing. */ + } + + cpu_data[cpu].ipi_count++; +} + +void arch_smp_send_reschedule(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); +} +EXPORT_SYMBOL(arch_smp_send_reschedule); + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + + if (system_state <= SYSTEM_RUNNING) + pr_crit("SMP: stopping secondary CPUs\n"); + send_ipi_message(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warn("SMP: failed to stop secondary CPUs %*pbl\n", + cpumask_pr_args(cpu_online_mask)); +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + send_ipi_message(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC); +} + +static void ipi_flush_tlb_all(void *ignored) +{ + local_flush_tlb_all(); +} + +void flush_tlb_all(void) +{ + /* Although we don't have any data to pass, we do want to + * synchronize with the other processors. + */ + on_each_cpu(ipi_flush_tlb_all, NULL, 1); +} + +static void ipi_flush_tlb_mm(void *x) +{ + local_flush_tlb_mm((struct mm_struct *)x); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + + /* happens as a result of exit_mmap() + * Shall we clear mm->context.asid[] here? + */ + if (atomic_read(&mm->mm_users) == 0) + return; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_mm(mm); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_mm); + +struct flush_tlb_info { + struct vm_area_struct *vma; + unsigned long addr; +#define start addr + unsigned long end; +}; + +static void ipi_flush_tlb_page(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_page(info->vma, info->addr); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) +{ + struct mm_struct *mm = vma->vm_mm; + + preempt_disable(); + + if (atomic_read(&mm->mm_users) != 1 || mm != current->mm) { + struct flush_tlb_info info = { + .vma = vma, + .addr = addr, + }; + on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_page, &info, 1); + } else { + int cpu, this_cpu = smp_processor_id(); + + for_each_online_cpu(cpu) { + if (cpu != this_cpu && mm->context.asid[cpu]) + mm->context.asid[cpu] = 0; + } + local_flush_tlb_page(vma, addr); + } + + preempt_enable(); +} +EXPORT_SYMBOL(flush_tlb_page); + +/* It always flush the whole user tlb by now. To be optimized. */ +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + flush_tlb_mm(vma->vm_mm); +} +EXPORT_SYMBOL(flush_tlb_range); + +static void ipi_flush_tlb_kernel_range(void *x) +{ + struct flush_tlb_info *info = x; + + local_flush_tlb_kernel_range(info->start, info->end); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct flush_tlb_info info = { + .start = start, + .end = end, + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &info, 1); +} +EXPORT_SYMBOL(flush_tlb_kernel_range); + +#ifdef CONFIG_HOTPLUG_CPU +int __cpu_disable(void) +{ + int cpu = smp_processor_id(); + + set_cpu_online(cpu, false); + remove_cpu_topology(cpu); + numa_remove_cpu(cpu); + clear_tasks_mm_cpumask(cpu); + return 0; +} + +void __cpu_die(unsigned int cpu) +{ + /* We don't do anything here: idle task is faking death itself. */ + unsigned int i; + + for (i = 0; i < 10; i++) { + /* They ack this in play_dead by setting CPU_DEAD */ + if (per_cpu(cpu_state, cpu) == CPU_DEAD) { + if (system_state == SYSTEM_RUNNING) + pr_info("CPU %u is now offline\n", cpu); + smp_rcb->ready = 0; + return; + } + msleep(100); + } + pr_err("CPU %u didn't die...\n", cpu); +} + +void arch_cpu_idle_dead(void) +{ + idle_task_exit(); + mb(); + __this_cpu_write(cpu_state, CPU_DEAD); + fixup_irqs(); + local_irq_disable(); + + if (is_in_guest()) { + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + hcall(HCALL_STOP, 0, 0, 0); + } else { + wrtimer(0); + } + +#ifdef CONFIG_SUSPEND + sleepen(); + send_sleep_interrupt(smp_processor_id()); + while (1) + asm("nop"); +#else + asm volatile("memb"); + asm volatile("halt"); +#endif +} +#endif diff --git a/arch/sw_64/kernel/stacktrace.c b/arch/sw_64/kernel/stacktrace.c new file mode 100644 index 0000000000000000000000000000000000000000..ff00506d5b824727161449fa8c5f3602574c1e6e --- /dev/null +++ b/arch/sw_64/kernel/stacktrace.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stack trace management functions + * + * Copyright (C) 2018 snyh + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * sw_64 PCS assigns the frame pointer to r15. + * + * A simple function prologue looks like this: + * ldi sp,-xx(sp) + * stl ra,0(sp) + * stl fp,8(sp) + * mov sp,fp + * + * A simple function epilogue looks like this: + * mov fp,sp + * ldl ra,0(sp) + * ldl fp,8(sp) + * ldi sp,+xx(sp) + */ + +#ifdef CONFIG_FRAME_POINTER + +int unwind_frame(struct task_struct *tsk, struct stackframe *frame) +{ + unsigned long fp = frame->fp; + + if (fp & 0x7) + return -EINVAL; + + if (!tsk) + tsk = current; + + if (!on_accessible_stack(tsk, fp, NULL)) + return -EINVAL; + + frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp)); + frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8)); + + /* + * Frames created upon entry from user have NULL FP and PC values, so + * don't bother reporting these. Frames created by __noreturn functions + * might have a valid FP even if PC is bogus, so only terminate where + * both are NULL. + */ + if (!frame->fp && !frame->pc) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL_GPL(unwind_frame); + +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long pc, fp; + + struct stackframe frame; + + if (regs) { + unsigned long offset; + + pc = regs->pc; + fp = regs->regs[15]; + if (kallsyms_lookup_size_offset(pc, NULL, &offset) + && offset < 16) { + /* call stack has not been setup + * store pc first then loop from ra + */ + if (fn(pc, data)) + return; + pc = regs->regs[26]; + } + } else if (tsk == current || tsk == NULL) { + fp = (unsigned long)__builtin_frame_address(0); + pc = (unsigned long)walk_stackframe; + } else { + fp = tsk->thread.s[6]; + pc = tsk->thread.ra; + } + + if (!__kernel_text_address(pc) || fn(pc, data)) + return; + + frame.pc = pc; + frame.fp = fp; + while (1) { + int ret; + + ret = unwind_frame(tsk, &frame); + if (ret < 0) + break; + + if (fn(frame.pc, data)) + break; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#else /* !CONFIG_FRAME_POINTER */ +void walk_stackframe(struct task_struct *tsk, struct pt_regs *regs, + int (*fn)(unsigned long, void *), void *data) +{ + unsigned long *ksp; + unsigned long sp, pc; + + if (regs) { + sp = (unsigned long)(regs+1); + pc = regs->pc; + } else if (tsk == current || tsk == NULL) { + register unsigned long current_sp __asm__ ("$30"); + sp = current_sp; + pc = (unsigned long)walk_stackframe; + } else { + sp = tsk->thread.sp; + pc = tsk->thread.ra; + } + + ksp = (unsigned long *)sp; + + while (!kstack_end(ksp)) { + if (__kernel_text_address(pc) && fn(pc, data)) + break; + pc = *ksp++; + } +} +EXPORT_SYMBOL_GPL(walk_stackframe); + +#endif/* CONFIG_FRAME_POINTER */ + +static int print_address_trace(unsigned long pc, void *data) +{ + print_ip_sym((const char *)data, pc); + return 0; +} + +void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) +{ + pr_info("Trace:\n"); + walk_stackframe(task, NULL, print_address_trace, (void *)loglvl); +} + +#ifdef CONFIG_STACKTRACE +/* + * Save stack-backtrace addresses into a stack_trace buffer. + */ +struct stack_trace_data { + struct stack_trace *trace; + unsigned int nosched; +}; + +int save_trace(unsigned long pc, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + + if (data->nosched && in_sched_functions(pc)) + return 0; + if (trace->skip > 0) { + trace->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = pc; + return (trace->nr_entries >= trace->max_entries); +} + +void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = 0; + + walk_stackframe(current, regs, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +static void __save_stack_trace(struct task_struct *tsk, + struct stack_trace *trace, unsigned int nosched) +{ + struct stack_trace_data data; + + data.trace = trace; + data.nosched = nosched; + + walk_stackframe(tsk, NULL, save_trace, &data); + + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + __save_stack_trace(tsk, trace, 1); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + __save_stack_trace(current, trace, 0); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif + +static int save_pc(unsigned long pc, void *data) +{ + unsigned long *p = data; + *p = 0; + + if (!in_sched_functions(pc)) + *p = pc; + + return *p; +} + +unsigned long __get_wchan(struct task_struct *tsk) +{ + unsigned long pc; + + if (!tsk || tsk == current || task_is_running(tsk)) + return 0; + walk_stackframe(tsk, NULL, save_pc, &pc); + + return pc; +} + +#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE +int save_stack_trace_tsk_reliable(struct task_struct *tsk, + struct stack_trace *trace) +{ + return 0; +} +#endif diff --git a/arch/sw_64/kernel/suspend.c b/arch/sw_64/kernel/suspend.c new file mode 100644 index 0000000000000000000000000000000000000000..27a240e6614955835f7abe8c21558b956898da43 --- /dev/null +++ b/arch/sw_64/kernel/suspend.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include + +struct processor_state suspend_state; + +static int native_suspend_state_valid(suspend_state_t pm_state) +{ + switch (pm_state) { + case PM_SUSPEND_ON: + case PM_SUSPEND_STANDBY: + case PM_SUSPEND_MEM: + return 1; + default: + return 0; + } +} + +void disable_local_timer(void) +{ + wrtimer(0); +} + +extern struct pci_controller *hose_head; + +/* + * Boot Core will enter suspend stat here. + */ +void sw64_suspend_enter(void) +{ + /* boot processor will go to deep sleep mode from here + * After wake up boot processor, pc will go here + */ + disable_local_timer(); + current_thread_info()->pcb.tp = rtid(); + + sw64_suspend_deep_sleep(&suspend_state); + wrtp(current_thread_info()->pcb.tp); + + disable_local_timer(); +} + +static int native_suspend_enter(suspend_state_t state) +{ + if (is_in_guest()) + return 0; + /* processor specific suspend */ + sw64_suspend_enter(); + return 0; +} + +const struct platform_suspend_ops native_suspend_ops = { + .valid = native_suspend_state_valid, + .enter = native_suspend_enter, +}; diff --git a/arch/sw_64/kernel/suspend_asm.S b/arch/sw_64/kernel/suspend_asm.S new file mode 100644 index 0000000000000000000000000000000000000000..34ee349515a7c1278f24bc9c64dc3e8a6e864137 --- /dev/null +++ b/arch/sw_64/kernel/suspend_asm.S @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include +#include +#include +#include + + .text + .set noat +ENTRY(sw64_suspend_deep_sleep) + /* a0 $16 will be the address of suspend_state */ + ldi $1, PSTATE_REGS($16) + stl $9, CALLEE_R9($1) + stl $10, CALLEE_R10($1) + stl $11, CALLEE_R11($1) + stl $12, CALLEE_R12($1) + stl $13, CALLEE_R13($1) + stl $14, CALLEE_R14($1) + stl $15, CALLEE_R15($1) + stl $26, CALLEE_RA($1) + /* SIMD-FP */ + ldi $1, PSTATE_FPREGS($16) + vstd $f2, CALLEE_F2($1) + vstd $f3, CALLEE_F3($1) + vstd $f4, CALLEE_F4($1) + vstd $f5, CALLEE_F5($1) + vstd $f6, CALLEE_F6($1) + vstd $f7, CALLEE_F7($1) + vstd $f8, CALLEE_F8($1) + vstd $f9, CALLEE_F9($1) + rfpcr $f0 + fstd $f0, PSTATE_FPCR($16) + stl $8, PSTATE_KTP($16) + + /* save the address of suspend_state to $18 */ + mov $16, $18 + + /* + * Now will Go to Deep Sleep + * HMcode should save pc, gp, ps, r16, r17, r18 + */ + + sys_call HMC_sleepen + sys_call HMC_whami + bis $0, $0, $16 + ldi $17, 0x2($31) + sys_call HMC_sendii + + /* wait for a while to receive interrupt */ + ldi $16, 0x1($31) + sll $16, 24, $16 +$subloop: + subl $16, 1, $16 + bis $16, $16, $16 + bis $16, $16, $16 + bne $16, $subloop + + + ldl $8, PSTATE_KTP($18) + ldi $1, PSTATE_REGS($18) + ldl $9, CALLEE_R9($1) + ldl $10, CALLEE_R10($1) + ldl $11, CALLEE_R11($1) + ldl $12, CALLEE_R12($1) + ldl $13, CALLEE_R13($1) + ldl $14, CALLEE_R14($1) + ldl $15, CALLEE_R15($1) + ldl $26, CALLEE_RA($1) + /* SIMD-FP */ + fldd $f0, PSTATE_FPCR($18) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $suspend_setfpec_0 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_1 + subl $2, 0x1, $2 + beq $2, $suspend_setfpec_2 + setfpec3 + br $suspend_setfpec_over +$suspend_setfpec_0: + setfpec0 + br $suspend_setfpec_over +$suspend_setfpec_1: + setfpec1 + br $suspend_setfpec_over +$suspend_setfpec_2: + setfpec2 +$suspend_setfpec_over: + ldi $1, PSTATE_FPREGS($18) + vldd $f2, CALLEE_F2($1) + vldd $f3, CALLEE_F3($1) + vldd $f4, CALLEE_F4($1) + vldd $f5, CALLEE_F5($1) + vldd $f6, CALLEE_F6($1) + vldd $f7, CALLEE_F7($1) + vldd $f8, CALLEE_F8($1) + vldd $f9, CALLEE_F9($1) + ret +END(sw64_suspend_deep_sleep) diff --git a/arch/sw_64/kernel/sys_sw64.c b/arch/sw_64/kernel/sys_sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..d0198aef554d78a60dd4cb67acc78324a49d1b33 --- /dev/null +++ b/arch/sw_64/kernel/sys_sw64.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +SYSCALL_DEFINE5(getsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + unsigned long w; + + switch (op) { + case GSI_IEEE_FP_CONTROL: + /* Return current software fp control & status bits. */ + /* Note that DU doesn't verify available space here. */ + + w = current_thread_info()->ieee_state & IEEE_SW_MASK; + w = swcr_update_status(w, rdfpcr()); + if (put_user(w, (unsigned long __user *) buffer)) + return -EFAULT; + return 0; + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE5(setsysinfo, unsigned long, op, void __user *, buffer, + unsigned long, nbytes, int __user *, start, void __user *, arg) +{ + switch (op) { + case SSI_IEEE_FP_CONTROL: { + unsigned long swcr, fpcr; + unsigned int *state; + + /* + * Sw_64 Architecture Handbook 4.7.7.3: + * To be fully IEEE compiant, we must track the current IEEE + * exception state in software, because spurious bits can be + * set in the trap shadow of a software-complete insn. + */ + + if (get_user(swcr, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + + /* Update softare trap enable bits. */ + *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK); + + /* Update the real fpcr. */ + fpcr = rdfpcr() & FPCR_DYN_MASK; + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + return 0; + } + + case SSI_IEEE_RAISE_EXCEPTION: { + unsigned long exc, swcr, fpcr, fex; + unsigned int *state; + + if (get_user(exc, (unsigned long __user *)buffer)) + return -EFAULT; + state = ¤t_thread_info()->ieee_state; + exc &= IEEE_STATUS_MASK; + + /* Update softare trap enable bits. */ + swcr = (*state & IEEE_SW_MASK) | exc; + *state |= exc; + + /* Update the real fpcr. */ + fpcr = rdfpcr(); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); + + /* If any exceptions set by this call, and are unmasked, + * send a signal. Old exceptions are not signaled. + */ + fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr; + if (fex) { + int si_code = FPE_FLTUNK; + + if (fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + + send_sig_fault(SIGFPE, si_code, (void __user *)NULL, current); + } + return 0; + } + default: + break; + } + + return -EOPNOTSUPP; +} + +SYSCALL_DEFINE2(odd_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + * This does result in negative return values, so signal + * no error. + */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->regs[20] = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->regs[20] = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->regs[20] = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(sw64_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->regs[20] = fd[1]; + res = fd[0]; + } + return res; +} diff --git a/arch/sw_64/kernel/syscalls/Makefile b/arch/sw_64/kernel/syscalls/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..cdfe761d728288b7d22a48f481ac39af165c2280 --- /dev/null +++ b/arch/sw_64/kernel/syscalls/Makefile @@ -0,0 +1,32 @@ +# SPDX-License-Identifier: GPL-2.0 +kapi := arch/$(SRCARCH)/include/generated/asm +uapi := arch/$(SRCARCH)/include/generated/uapi/asm + +$(shell mkdir -p $(uapi) $(kapi)) + +syscall := $(src)/syscall.tbl +syshdr := $(srctree)/scripts/syscallhdr.sh +systbl := $(srctree)/scripts/syscalltbl.sh + +quiet_cmd_syshdr = SYSHDR $@ + cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --emit-nr $< $@ + +quiet_cmd_systbl = SYSTBL $@ + cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@ + +$(uapi)/unistd_64.h: $(syscall) $(syshdr) FORCE + $(call if_changed,syshdr) + +$(kapi)/syscall_table.h: $(syscall) $(systbl) FORCE + $(call if_changed,systbl) + +uapisyshdr-y += unistd_64.h +kapisyshdr-y += syscall_table.h + +uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y)) +kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y)) +targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y)) + +PHONY += all +all: $(uapisyshdr-y) $(kapisyshdr-y) + @: diff --git a/arch/sw_64/kernel/syscalls/syscall.tbl b/arch/sw_64/kernel/syscalls/syscall.tbl new file mode 100644 index 0000000000000000000000000000000000000000..fdf9e4cb03eb53e7263225aae82bc00dfb53a66c --- /dev/null +++ b/arch/sw_64/kernel/syscalls/syscall.tbl @@ -0,0 +1,528 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sw64 +# +# The format is: +# +# +# The is always "common" for this file +# +#0 is unused +1 common exit sys_exit +2 common fork sys_fork +3 common read sys_read +4 common write sys_write +#5 is unused +6 common close sys_close +#7 is unused +#8 is unused +9 common link sys_link +10 common unlink sys_unlink +#11 is unused +12 common chdir sys_chdir +13 common fchdir sys_fchdir +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown +17 common brk sys_brk +#18 is unused +19 common lseek sys_lseek +20 common getxpid sys_getxpid +#21 is unused +22 common umount2 sys_umount +23 common setuid sys_setuid +24 common getxuid sys_getxuid +#25 is unused +26 common ptrace sys_ptrace +#27 is unused +#28 is unused +#29 is unused +#30 is unused +#31 is unused +#32 is unused +33 common access sys_access +#34 is unused +#35 is unused +36 common sync sys_sync +37 common kill sys_kill +#38 is unused +39 common setpgid sys_setpgid +#40 is unused +41 common dup sys_dup +42 common pipe sys_sw64_pipe +#43 is unused +#44 is unused +45 common open sys_open +#46 is unused +47 common getxgid sys_getxgid +48 common odd_sigprocmask sys_odd_sigprocmask +#49 is unused +#50 is unused +51 common acct sys_acct +52 common sigpending sys_sigpending +#53 is unused +54 common ioctl sys_ioctl +#55 is unused +#56 is unused +57 common symlink sys_symlink +58 common readlink sys_readlink +59 common execve sys_execve +60 common umask sys_umask +61 common chroot sys_chroot +#62 is unused +63 common getpgrp sys_getpgrp +#64 is unused +#65 is unused +66 common vfork sys_vfork +67 common stat sys_newstat +68 common lstat sys_newlstat +#69 is unused +#70 is unused +71 common mmap sys_mmap +#72 is unused +73 common munmap sys_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +#77 is unused +#78 is unused +79 common getgroups sys_getgroups +80 common setgroups sys_setgroups +#81 is unused +82 common setpgrp sys_setpgid +#83 is unused +#84 is unused +#85 is unused +#86 is unused +87 common gethostname sys_gethostname +88 common sethostname sys_sethostname +#89 is unused +90 common dup2 sys_dup2 +91 common fstat sys_newfstat +92 common fcntl sys_fcntl +#93 is unused +94 common poll sys_poll +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common odd_getpriority sys_odd_getpriority +101 common send sys_send +102 common recv sys_recv +103 common sigreturn sys_sigreturn +104 common bind sys_bind +105 common setsockopt sys_setsockopt +106 common listen sys_listen +#107 is unused +#108 is unused +#109 is unused +#110 is unused +111 common sigsuspend sys_sigsuspend +#112 is unused +113 common recvmsg sys_recvmsg +114 common sendmsg sys_sendmsg +#115 is unused +#116 is unused +#117 is unused +118 common getsockopt sys_getsockopt +119 common socketcall sys_socketcall +120 common readv sys_readv +121 common writev sys_writev +#122 is unused +123 common fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 common setreuid sys_setreuid +127 common setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate +130 common ftruncate sys_ftruncate +131 common flock sys_flock +132 common setgid sys_setgid +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +#138 is unused +#139 is unused +#140 is unused +141 common getpeername sys_getpeername +#142 is unused +#143 is unused +144 common getrlimit sys_getrlimit +145 common setrlimit sys_setrlimit +#146 is unused +147 common setsid sys_setsid +148 common quotactl sys_quotactl +#149 is unused +150 common getsockname sys_getsockname +#151 is unused +#152 is unused +#153 is unused +#154 is unused +#155 is unused +156 common sigaction sys_odd_sigaction +#157 is unused +#158 is unused +#159 is unused +#160 is unused +#161 is unused +#162 is unused +#163 is unused +#164 is unused +#165 is unused +166 common setdomainname sys_setdomainname +#167 is unused +#168 is unused +#169 is unused +170 common bpf sys_bpf +171 common userfaultfd sys_userfaultfd +172 common membarrier sys_membarrier +173 common mlock2 sys_mlock2 +174 common getpid sys_getpid +175 common getppid sys_getppid +176 common getuid sys_getuid +177 common geteuid sys_geteuid +178 common getgid sys_getgid +179 common getegid sys_getegid +180 common epoll_pwait2 sys_epoll_pwait2 +181 common mount_setattr sys_mount_setattr +182 common quotactl_fd sys_quotactl_fd +183 common landlock_create_ruleset sys_landlock_create_ruleset +184 common landlock_add_rule sys_landlock_add_rule +185 common landlock_restrict_self sys_landlock_restrict_self +# 186 reserved for memfd_secret +187 common process_mrelease sys_process_mrelease +188 common futex_waitv sys_futex_waitv +189 common set_mempolicy_home_node sys_ni_syscall +190 common cachestat sys_cachestat +191 common fchmodat2 sys_fchmodat2 +#192 is unused +#193 is unused +#194 is unused +#195 is unused +#196 is unused +#197 is unused +#198 is unused +#199 is unused +200 common msgctl sys_old_msgctl +201 common msgget sys_msgget +202 common msgrcv sys_msgrcv +203 common msgsnd sys_msgsnd +204 common semctl sys_old_semctl +205 common semget sys_semget +206 common semop sys_semop +#207 is unused +208 common lchown sys_lchown +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl +211 common shmdt sys_shmdt +212 common shmget sys_shmget +#213 is unused +#214 is unused +#215 is unused +#216 is unused +217 common msync sys_msync +#218 is unused +#219 is unused +#220 is unused +#221 is unused +#222 is unused +#223 is unused +#224 is unused +#225 is unused +#226 is unused +#227 is unused +#228 is unused +229 common statfs64 sys_statfs64 +230 common fstatfs64 sys_fstatfs64 +#231 is unused +#232 is unused +233 common getpgid sys_getpgid +234 common getsid sys_getsid +235 common sigaltstack sys_sigaltstack +#236 is unused +#237 is unused +#238 is unused +#239 is unused +#240 is unused +#241 is unused +#242 is unused +#243 is unused +#244 is unused +#245 is unused +#246 is unused +#247 is unused +#248 is unused +#249 is unused +#250 is unused +#251 is unused +#252 is unused +#253 is unused +254 common sysfs sys_sysfs +#255 is unused +256 common getsysinfo sys_getsysinfo +257 common setsysinfo sys_setsysinfo +#258 is unused +#259 is unused +#260 is unused +#261 is unused +#262 is unused +#263 is unused +#264 is unused +#265 is unused +#266 is unused +#267 is unused +#268 is unused +#269 is unused +#270 is unused +271 common pidfd_send_signal sys_pidfd_send_signal +272 common io_uring_setup sys_io_uring_setup +273 common io_uring_enter sys_io_uring_enter +274 common io_uring_register sys_io_uring_register +275 common open_tree sys_open_tree +276 common move_mount sys_move_mount +277 common fsopen sys_fsopen +278 common fsconfig sys_fsconfig +279 common fsmount sys_fsmount +280 common fspick sys_fspick +281 common pidfd_open sys_pidfd_open +282 common clone3 sys_clone3 +283 common close_range sys_close_range +284 common openat2 sys_openat2 +285 common pidfd_getfd sys_pidfd_getfd +286 common faccessat2 sys_faccessat2 +287 common process_madvise sys_process_madvise +288 common pkey_mprotect sys_pkey_mprotect +289 common pkey_alloc sys_pkey_alloc +290 common pkey_free sys_pkey_free +#291 is unused +#292 is unused +#293 is unused +#294 is unused +#295 is unused +#296 is unused +#297 is unused +298 common getpriority sys_getpriority +299 common sigprocmask sys_sigprocmask +300 common bdflush sys_ni_syscall +#301 is unused +302 common mount sys_mount +#303 is unused +304 common swapoff sys_swapoff +305 common getdents sys_getdents +306 common create_module sys_ni_syscall +307 common init_module sys_init_module +308 common delete_module sys_delete_module +309 common get_kernel_syms sys_ni_syscall +310 common syslog sys_syslog +311 common reboot sys_reboot +312 common clone sys_clone +313 common uselib sys_uselib +314 common mlock sys_mlock +315 common munlock sys_munlock +316 common mlockall sys_mlockall +317 common munlockall sys_munlockall +318 common sysinfo sys_sysinfo +#319 is unused +#320 is unused +321 common oldumount sys_oldumount +322 common swapon sys_swapon +323 common times sys_times +324 common personality sys_personality +325 common setfsuid sys_setfsuid +326 common setfsgid sys_setfsgid +327 common ustat sys_ustat +328 common statfs sys_statfs +329 common fstatfs sys_fstatfs +330 common sched_setparam sys_sched_setparam +331 common sched_getparam sys_sched_getparam +332 common sched_setscheduler sys_sched_setscheduler +333 common sched_getscheduler sys_sched_getscheduler +334 common sched_yield sys_sched_yield +335 common sched_get_priority_max sys_sched_get_priority_max +336 common sched_get_priority_min sys_sched_get_priority_min +337 common sched_rr_get_interval sys_sched_rr_get_interval +338 common afs_syscall sys_ni_syscall +339 common uname sys_newuname +340 common nanosleep sys_nanosleep +341 common mremap sys_mremap +342 common nfsservctl sys_ni_syscall +343 common setresuid sys_setresuid +344 common getresuid sys_getresuid +345 common pciconfig_read sys_pciconfig_read +346 common pciconfig_write sys_pciconfig_write +347 common query_module sys_ni_syscall +348 common prctl sys_prctl +349 common pread64 sys_pread64 +350 common pwrite64 sys_pwrite64 +351 common rt_sigreturn sys_rt_sigreturn +352 common rt_sigaction sys_rt_sigaction +353 common rt_sigprocmask sys_rt_sigprocmask +354 common rt_sigpending sys_rt_sigpending +355 common rt_sigtimedwait sys_rt_sigtimedwait +356 common rt_sigqueueinfo sys_rt_sigqueueinfo +357 common rt_sigsuspend sys_rt_sigsuspend +358 common select sys_select +359 common gettimeofday sys_gettimeofday +360 common settimeofday sys_settimeofday +361 common getitimer sys_getitimer +362 common setitimer sys_setitimer +363 common utimes sys_utimes +364 common getrusage sys_getrusage +365 common wait4 sys_wait4 +366 common adjtimex sys_adjtimex +367 common getcwd sys_getcwd +368 common capget sys_capget +369 common capset sys_capset +370 common sendfile sys_sendfile64 +371 common setresgid sys_setresgid +372 common getresgid sys_getresgid +373 common dipc sys_ni_syscall +374 common pivot_root sys_pivot_root +375 common mincore sys_mincore +376 common pciconfig_iobase sys_pciconfig_iobase +377 common getdents64 sys_getdents64 +378 common gettid sys_gettid +379 common readahead sys_readahead +#380 is unused +381 common tkill sys_tkill +382 common setxattr sys_setxattr +383 common lsetxattr sys_lsetxattr +384 common fsetxattr sys_fsetxattr +385 common getxattr sys_getxattr +386 common lgetxattr sys_lgetxattr +387 common fgetxattr sys_fgetxattr +388 common listxattr sys_listxattr +389 common llistxattr sys_llistxattr +390 common flistxattr sys_flistxattr +391 common removexattr sys_removexattr +392 common lremovexattr sys_lremovexattr +393 common fremovexattr sys_fremovexattr +394 common futex sys_futex +395 common sched_setaffinity sys_sched_setaffinity +396 common sched_getaffinity sys_sched_getaffinity +397 common tuxcall sys_ni_syscall +398 common io_setup sys_io_setup +399 common io_destroy sys_io_destroy +400 common io_getevents sys_io_getevents +401 common io_submit sys_io_submit +402 common io_cancel sys_io_cancel +403 common io_pgetevents sys_io_pgetevents +404 common rseq sys_rseq +405 common exit_group sys_exit_group +406 common lookup_dcookie sys_lookup_dcookie +407 common epoll_create sys_epoll_create +408 common epoll_ctl sys_epoll_ctl +409 common epoll_wait sys_epoll_wait +410 common remap_file_pages sys_remap_file_pages +411 common set_tid_address sys_set_tid_address +412 common restart_syscall sys_restart_syscall +413 common fadvise64 sys_fadvise64 +414 common timer_create sys_timer_create +415 common timer_settime sys_timer_settime +416 common timer_gettime sys_timer_gettime +417 common timer_getoverrun sys_timer_getoverrun +418 common timer_delete sys_timer_delete +419 common clock_settime sys_clock_settime +420 common clock_gettime sys_clock_gettime +421 common clock_getres sys_clock_getres +422 common clock_nanosleep sys_clock_nanosleep +423 common semtimedop sys_semtimedop +424 common tgkill sys_tgkill +425 common stat64 sys_stat64 +426 common lstat64 sys_lstat64 +427 common fstat64 sys_fstat64 +428 common vserver sys_ni_syscall +429 common mbind sys_mbind +430 common get_mempolicy sys_get_mempolicy +431 common set_mempolicy sys_set_mempolicy +432 common mq_open sys_mq_open +433 common mq_unlink sys_mq_unlink +434 common mq_timedsend sys_mq_timedsend +435 common mq_timedreceive sys_mq_timedreceive +436 common mq_notify sys_mq_notify +437 common mq_getsetattr sys_mq_getsetattr +438 common waitid sys_waitid +439 common add_key sys_add_key +440 common request_key sys_request_key +441 common keyctl sys_keyctl +442 common ioprio_set sys_ioprio_set +443 common ioprio_get sys_ioprio_get +444 common inotify_init sys_inotify_init +445 common inotify_add_watch sys_inotify_add_watch +446 common inotify_rm_watch sys_inotify_rm_watch +447 common fdatasync sys_fdatasync +448 common kexec_load sys_kexec_load +449 common migrate_pages sys_migrate_pages +450 common openat sys_openat +451 common mkdirat sys_mkdirat +452 common mknodat sys_mknodat +453 common fchownat sys_fchownat +454 common futimesat sys_futimesat +455 common fstatat64 sys_fstatat64 +456 common unlinkat sys_unlinkat +457 common renameat sys_renameat +458 common linkat sys_linkat +459 common symlinkat sys_symlinkat +460 common readlinkat sys_readlinkat +461 common fchmodat sys_fchmodat +462 common faccessat sys_faccessat +463 common pselect6 sys_pselect6 +464 common ppoll sys_ppoll +465 common unshare sys_unshare +466 common set_robust_list sys_set_robust_list +467 common get_robust_list sys_get_robust_list +468 common splice sys_splice +469 common sync_file_range sys_sync_file_range +470 common tee sys_tee +471 common vmsplice sys_vmsplice +472 common move_pages sys_move_pages +473 common getcpu sys_getcpu +474 common epoll_pwait sys_epoll_pwait +475 common utimensat sys_utimensat +476 common signalfd sys_signalfd +477 common timerfd sys_ni_syscall +478 common eventfd sys_eventfd +479 common recvmmsg sys_recvmmsg +480 common fallocate sys_fallocate +481 common timerfd_create sys_timerfd_create +482 common timerfd_settime sys_timerfd_settime +483 common timerfd_gettime sys_timerfd_gettime +484 common signalfd4 sys_signalfd4 +485 common eventfd2 sys_eventfd2 +486 common epoll_create1 sys_epoll_create1 +487 common dup3 sys_dup3 +488 common pipe2 sys_pipe2 +489 common inotify_init1 sys_inotify_init1 +490 common preadv sys_preadv +491 common pwritev sys_pwritev +492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +493 common perf_event_open sys_perf_event_open +494 common fanotify_init sys_fanotify_init +495 common fanotify_mark sys_fanotify_mark +496 common prlimit64 sys_prlimit64 +497 common name_to_handle_at sys_name_to_handle_at +498 common open_by_handle_at sys_open_by_handle_at +499 common clock_adjtime sys_clock_adjtime +500 common syncfs sys_syncfs +501 common setns sys_setns +502 common accept4 sys_accept4 +503 common sendmmsg sys_sendmmsg +504 common process_vm_readv sys_process_vm_readv +505 common process_vm_writev sys_process_vm_writev +506 common kcmp sys_kcmp +507 common finit_module sys_finit_module +508 common sched_setattr sys_sched_setattr +509 common sched_getattr sys_sched_getattr +510 common renameat2 sys_renameat2 +511 common getrandom sys_getrandom +512 common memfd_create sys_memfd_create +513 common execveat sys_execveat +514 common seccomp sys_seccomp +515 common copy_file_range sys_copy_file_range +516 common preadv2 sys_preadv2 +517 common pwritev2 sys_pwritev2 +518 common statx sys_statx diff --git a/arch/sw_64/kernel/systbls.S b/arch/sw_64/kernel/systbls.S new file mode 100644 index 0000000000000000000000000000000000000000..010ca3f8e016b773a014aeb0bca648a3ef79b14d --- /dev/null +++ b/arch/sw_64/kernel/systbls.S @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw_64/kernel/systbls.S + * + * The system call table. + */ + +#include + +#define __SYSCALL(nr, entry) .quad entry + .data + .align 3 + .globl sys_call_table +sys_call_table: +#include diff --git a/arch/sw_64/kernel/tc.c b/arch/sw_64/kernel/tc.c new file mode 100644 index 0000000000000000000000000000000000000000..f2de5ac3d9dc440ca8685e33354c5e0b35919f91 --- /dev/null +++ b/arch/sw_64/kernel/tc.c @@ -0,0 +1,36 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019, serveros, linyue + */ + + +#include +#include + +/* + * Entry/exit counters that make sure that both CPUs + * run the measurement code at once: + */ +unsigned long time_sync; + +DEFINE_PER_CPU(u64, tc_offset); + +void tc_sync_clear(void) +{ + time_sync = 0; +} + +void tc_sync_ready(void *ignored) +{ + /* make sure we can see time_sync been set to 0 */ + smp_mb(); + while (!time_sync) + cpu_relax(); + + __this_cpu_write(tc_offset, time_sync - rdtc()); +} + +void tc_sync_set(void) +{ + time_sync = rdtc() + __this_cpu_read(tc_offset); +} diff --git a/arch/sw_64/kernel/termios.c b/arch/sw_64/kernel/termios.c new file mode 100644 index 0000000000000000000000000000000000000000..5c76a513c89683ac959bad926494ae58f2e41487 --- /dev/null +++ b/arch/sw_64/kernel/termios.c @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* + * Translate a "termio" structure into a "termios". Ugh. + */ + +int user_termio_to_kernel_termios(struct ktermios *a_termios, struct termio __user *u_termio) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon, ret; + + ret = copy_from_user(&k_termio, u_termio, sizeof(k_termio)); + if (!ret) { + /* Overwrite only the low bits. */ + *(unsigned short *)&k_termios->c_iflag = k_termio.c_iflag; + *(unsigned short *)&k_termios->c_oflag = k_termio.c_oflag; + *(unsigned short *)&k_termios->c_cflag = k_termio.c_cflag; + *(unsigned short *)&k_termios->c_lflag = k_termio.c_lflag; + canon = k_termio.c_lflag & ICANON; + + k_termios->c_cc[VINTR] = k_termio.c_cc[_VINTR]; + k_termios->c_cc[VQUIT] = k_termio.c_cc[_VQUIT]; + k_termios->c_cc[VERASE] = k_termio.c_cc[_VERASE]; + k_termios->c_cc[VKILL] = k_termio.c_cc[_VKILL]; + k_termios->c_cc[VEOL2] = k_termio.c_cc[_VEOL2]; + k_termios->c_cc[VSWTC] = k_termio.c_cc[_VSWTC]; + k_termios->c_cc[canon ? VEOF : VMIN] = k_termio.c_cc[_VEOF]; + k_termios->c_cc[canon ? VEOL : VTIME] = k_termio.c_cc[_VEOL]; + } + return ret; +} + +/* + * Translate a "termios" structure into a "termio". Ugh. + * + * Note the "fun" _VMIN overloading. + */ +int kernel_termios_to_user_termio(struct termio __user *u_termio, struct ktermios *a_termios) +{ + struct ktermios *k_termios = (a_termios); + struct termio k_termio; + int canon; + + k_termio.c_iflag = k_termios->c_iflag; + k_termio.c_oflag = k_termios->c_oflag; + k_termio.c_cflag = k_termios->c_cflag; + canon = (k_termio.c_lflag = k_termios->c_lflag) & ICANON; + + k_termio.c_line = k_termios->c_line; + k_termio.c_cc[_VINTR] = k_termios->c_cc[VINTR]; + k_termio.c_cc[_VQUIT] = k_termios->c_cc[VQUIT]; + k_termio.c_cc[_VERASE] = k_termios->c_cc[VERASE]; + k_termio.c_cc[_VKILL] = k_termios->c_cc[VKILL]; + k_termio.c_cc[_VEOF] = k_termios->c_cc[canon ? VEOF : VMIN]; + k_termio.c_cc[_VEOL] = k_termios->c_cc[canon ? VEOL : VTIME]; + k_termio.c_cc[_VEOL2] = k_termios->c_cc[VEOL2]; + k_termio.c_cc[_VSWTC] = k_termios->c_cc[VSWTC]; + + return copy_to_user(u_termio, &k_termio, sizeof(k_termio)); +} diff --git a/arch/sw_64/kernel/time.c b/arch/sw_64/kernel/time.c new file mode 100644 index 0000000000000000000000000000000000000000..533a6a14c20051da05469ed5bd1bf0d824c8c85d --- /dev/null +++ b/arch/sw_64/kernel/time.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +#include "proto.h" + +DEFINE_SPINLOCK(rtc_lock); +EXPORT_SYMBOL(rtc_lock); + +#define TICK_SIZE (tick_nsec / 1000) + +/* + * Shift amount by which scaled_ticks_per_cycle is scaled. Shifting + * by 48 gives us 16 bits for HZ while keeping the accuracy good even + * for large CPU clock rates. + */ +#define FIX_SHIFT 48 + +unsigned long est_cycle_freq; + +#ifdef CONFIG_IRQ_WORK + +DEFINE_PER_CPU(u8, irq_work_pending); + +#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1) +#define test_irq_work_pending() __this_cpu_read(irq_work_pending) +#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0) + +void arch_irq_work_raise(void) +{ + set_irq_work_pending_flag(); +} + +#else /* CONFIG_IRQ_WORK */ + +#define test_irq_work_pending() 0 +#define clear_irq_work_pending() + +#endif /* CONFIG_IRQ_WORK */ + +void __init +time_init(void) +{ + unsigned long cycle_freq; + + cycle_freq = get_cpu_freq(); + + pr_info("CPU Cycle frequency = %ld Hz\n", cycle_freq); + + /* Register clocksource */ + sw64_setup_clocksource(); + of_clk_init(NULL); + /* Startup the timer source. */ + sw64_setup_timer(); + /* Calibrate the delay loop directly */ + lpj_fine = cycle_freq / HZ; +} diff --git a/arch/sw_64/kernel/topology.c b/arch/sw_64/kernel/topology.c new file mode 100644 index 0000000000000000000000000000000000000000..8371c013446fc222e2a20ae8aab79c7d252dd56c --- /dev/null +++ b/arch/sw_64/kernel/topology.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include + +static int __init parse_dt_topology(void) +{ + return 0; +} + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +int topo_nr_threads, topo_nr_cores, topo_nr_maxcpus; + +static int topo_nr_cpus; +static int topo_threads[NR_CPUS]; +static int topo_cores[NR_CPUS]; +static int topo_packages[NR_CPUS]; + +void __init get_vt_smp_info(void) +{ + unsigned long smp_info; + + smp_info = sw64_io_read(0, SMP_INFO); + if (smp_info == -1UL) + smp_info = 0; + topo_nr_threads = (smp_info >> VT_THREADS_SHIFT) & VT_THREADS_MASK; + topo_nr_cores = (smp_info >> VT_CORES_SHIFT) & VT_CORES_MASK; + topo_nr_maxcpus = (smp_info >> VT_MAX_CPUS_SHIFT) & VT_MAX_CPUS_MASK; +} + +static void __init init_topo_threads(void) +{ + int i, j; + + if (topo_nr_threads == 0) + topo_nr_threads = 1; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_threads) { + for (j = 0; j < topo_nr_threads; j++) + topo_threads[i+j] = j; + } +} + +static void __init init_topo_cores(void) +{ + int i, j; + + if (topo_nr_cores == 0) + topo_nr_cores = topo_nr_cpus; + + for (i = 0; i < topo_nr_cpus; i += topo_nr_cores) { + for (j = 0; j < topo_nr_cores; j++) + topo_cores[i+j] = j; + } +} + +static void __init init_topo_packages(void) +{ + int i, j, packet_index = 0; + int topo_nr_packages = topo_nr_cpus / (topo_nr_cores * topo_nr_threads); + int div_package = topo_nr_cpus / topo_nr_packages; + + for (i = 0; i < topo_nr_cpus; i += div_package) { + for (j = 0 ; j < div_package; j++) + topo_packages[i+j] = packet_index; + packet_index++; + } + if (packet_index > topo_nr_packages) + pr_err("topo_cores init failed.\n"); +} + +static void __init init_topology_array(void) +{ + topo_nr_cpus = num_present_cpus(); + if (topo_nr_maxcpus > topo_nr_cpus) + topo_nr_cpus = topo_nr_maxcpus; + init_topo_threads(); + init_topo_cores(); + init_topo_packages(); +} + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + return topology_llc_cpumask(cpu); +} + +static void update_siblings_masks(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + int sib; + + /* update core and thread sibling masks */ + for_each_online_cpu(sib) { + struct cpu_topology *sib_topo = &cpu_topology[sib]; + + if (cpu_topo->package_id == sib_topo->package_id) { + cpumask_set_cpu(cpu, &sib_topo->core_sibling); + cpumask_set_cpu(sib, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &sib_topo->llc_sibling); + cpumask_set_cpu(sib, &cpu_topo->llc_sibling); + + if (cpu_topo->core_id == sib_topo->core_id) { + cpumask_set_cpu(cpu, &sib_topo->thread_sibling); + cpumask_set_cpu(sib, &cpu_topo->thread_sibling); + } + } + } +} + +void store_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + if (cpu_topo->package_id != -1) + goto topology_populated; + + if (is_guest_or_emul()) { + cpu_topo->package_id = topo_packages[cpu]; + cpu_topo->core_id = topo_cores[cpu]; + cpu_topo->thread_id = topo_threads[cpu]; + goto topology_populated; + } + + cpu_topo->package_id = rcid_to_domain_id(cpu_to_rcid(cpu)); + cpu_topo->core_id = rcid_to_core_id(cpu_to_rcid(cpu)); + cpu_topo->thread_id = rcid_to_thread_id(cpu_to_rcid(cpu)); + + pr_debug("CPU%u: socket %d core %d thread %d\n", + cpu, cpu_topo->package_id, cpu_topo->core_id, + cpu_topo->thread_id); + +topology_populated: + update_siblings_masks(cpu); +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +static void __init reset_cpu_topology(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = 0; + cpu_topo->package_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +#ifdef CONFIG_ACPI +static int __init parse_acpi_topology(void) +{ + return 0; +} +#else +static inline int __init parse_acpi_topology(void) +{ + return -EINVAL; +} +#endif + +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + if (is_guest_or_emul()) + init_topology_array(); + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (!acpi_disabled && parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} diff --git a/arch/sw_64/kernel/traps.c b/arch/sw_64/kernel/traps.c new file mode 100644 index 0000000000000000000000000000000000000000..a30e18ad1f00e5878264653da04db8cde14a845c --- /dev/null +++ b/arch/sw_64/kernel/traps.c @@ -0,0 +1,1542 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * arch/sw_64/kernel/traps.c + * + * (C) Copyright 1994 Linus Torvalds + */ + +/* + * This file initializes the trap entry points + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "proto.h" + +enum SW64_IF_TYPES { + IF_BREAKPOINT = 0, + IF_RESERVED, + IF_GENTRAP, + IF_FEN, + IF_OPDEC, + IF_SIMDEMU, +}; + +void show_regs(struct pt_regs *regs) +{ + show_regs_print_info(KERN_DEFAULT); + + printk(KERN_DEFAULT "pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", + regs->pc, regs->regs[26], regs->ps, print_tainted()); + printk(KERN_DEFAULT "pc is at %pSR\n", (void *)regs->pc); + printk(KERN_DEFAULT "ra is at %pSR\n", (void *)regs->regs[26]); + printk(KERN_DEFAULT "v0 = %016lx t0 = %016lx t1 = %016lx\n", + regs->regs[0], regs->regs[1], regs->regs[2]); + printk(KERN_DEFAULT "t2 = %016lx t3 = %016lx t4 = %016lx\n", + regs->regs[3], regs->regs[4], regs->regs[5]); + printk(KERN_DEFAULT "t5 = %016lx t6 = %016lx t7 = %016lx\n", + regs->regs[6], regs->regs[7], regs->regs[8]); + + printk(KERN_DEFAULT "s0 = %016lx s1 = %016lx s2 = %016lx\n", + regs->regs[9], regs->regs[10], regs->regs[11]); + printk(KERN_DEFAULT "s3 = %016lx s4 = %016lx s5 = %016lx\n", + regs->regs[12], regs->regs[13], regs->regs[14]); + printk(KERN_DEFAULT "s6 = %016lx\n", + regs->regs[15]); + + printk(KERN_DEFAULT "a0 = %016lx a1 = %016lx a2 = %016lx\n", + regs->regs[16], regs->regs[17], regs->regs[18]); + printk(KERN_DEFAULT "a3 = %016lx a4 = %016lx a5 = %016lx\n", + regs->regs[19], regs->regs[20], regs->regs[21]); + printk(KERN_DEFAULT "t8 = %016lx t9 = %016lx t10 = %016lx\n", + regs->regs[22], regs->regs[23], regs->regs[24]); + printk(KERN_DEFAULT "t11= %016lx pv = %016lx at = %016lx\n", + regs->regs[25], regs->regs[27], regs->regs[28]); + printk(KERN_DEFAULT "gp = %016lx sp = %016lx\n", regs->regs[29], regs->regs[30]); +} + +static void show_code(unsigned int *pc) +{ + long i; + unsigned int insn; + + printk(KERN_DEFAULT "Code:"); + for (i = -6; i < 2; i++) { + if (__get_user(insn, (unsigned int __user *)pc + i)) + break; + printk(KERN_DEFAULT "%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); + } + printk(KERN_DEFAULT "\n"); +} + +static DEFINE_SPINLOCK(die_lock); + +void die(char *str, struct pt_regs *regs, long err) +{ + static int die_counter; + unsigned long flags; + int ret; + + oops_enter(); + + spin_lock_irqsave(&die_lock, flags); + console_verbose(); + bust_spinlocks(1); + + pr_emerg("%s [#%d]\n", str, ++die_counter); + + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + + print_modules(); + show_regs(regs); + show_code((unsigned int *)regs->pc); + show_stack(current, NULL, KERN_EMERG); + + bust_spinlocks(0); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + spin_unlock_irqrestore(&die_lock, flags); + oops_exit(); + + if (kexec_should_crash(current)) + crash_kexec(regs); + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + + if (ret != NOTIFY_STOP) + make_task_dead(SIGSEGV); +} + +#ifndef CONFIG_MATHEMU +static long dummy_emul(void) +{ + return 0; +} + +long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul_imprecise); + +long (*sw64_fp_emul)(unsigned long pc) = (void *)dummy_emul; +EXPORT_SYMBOL_GPL(sw64_fp_emul); +#else +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); +long sw64_fp_emul(unsigned long pc); +#endif + +asmlinkage void +do_entArith(unsigned long summary, unsigned long write_mask, + struct pt_regs *regs) +{ + long si_code = FPE_FLTINV; + + if (summary & 1) { + /* Software-completion summary bit is set, so try to + * emulate the instruction. If the processor supports + * precise exceptions, we don't have to search. + */ + si_code = sw64_fp_emul(regs->pc - 4); + if (si_code == 0) + return; + } + + if (!user_mode(regs)) + die("Arithmetic fault", regs, 0); + + /*summary<39> means integer divide by zero in C4.*/ + if ((summary >> 39) & 1) + si_code = FPE_INTDIV; + + force_sig_fault(SIGFPE, si_code, (void __user *)regs->pc); +} + +void simd_emulate(unsigned int inst, unsigned long va) +{ + unsigned long *fp; + int instr_opc, reg; + + instr_opc = (inst >> 26) & 0x3f; + reg = (inst >> 21) & 0x1f; + fp = (unsigned long *) va; + + switch (instr_opc) { + case 0x0d: /* vldd */ + sw64_write_simd_fp_reg_d(reg, fp[0], fp[1], fp[2], fp[3]); + return; + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + return; + } +} + +/* + * BPT/GENTRAP/OPDEC make regs->pc = exc_pc + 4. debugger should + * do something necessary to handle it correctly. + */ +asmlinkage void +do_entIF(unsigned long inst_type, unsigned long va, struct pt_regs *regs) +{ + int signo, code; + unsigned int inst, type; + + type = inst_type & 0xffffffff; + inst = inst_type >> 32; + + if (type == IF_SIMDEMU) { + simd_emulate(inst, va); + return; + } + + if (!user_mode(regs) && type != IF_OPDEC) { + if (type == IF_BREAKPOINT) { + /* support kgdb */ + notify_die(0, "kgdb trap", regs, 0, 0, SIGTRAP); + return; + } + die((type == IF_RESERVED ? "Kernel Bug" : "Instruction fault"), + regs, type); + } + + switch (type) { + case IF_BREAKPOINT: /* gdb do pc-4 for sigtrap */ + force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc); + return; + + case IF_GENTRAP: + regs->pc -= 4; + switch ((long)regs->regs[16]) { + case GEN_INTOVF: + signo = SIGFPE; + code = FPE_INTOVF; + break; + case GEN_INTDIV: + signo = SIGFPE; + code = FPE_INTDIV; + break; + case GEN_FLTOVF: + signo = SIGFPE; + code = FPE_FLTOVF; + break; + case GEN_FLTDIV: + signo = SIGFPE; + code = FPE_FLTDIV; + break; + case GEN_FLTUND: + signo = SIGFPE; + code = FPE_FLTUND; + break; + case GEN_FLTINV: + signo = SIGFPE; + code = FPE_FLTINV; + break; + case GEN_FLTINE: + signo = SIGFPE; + code = FPE_FLTRES; + break; + case GEN_ROPRAND: + signo = SIGFPE; + code = FPE_FLTUNK; + break; + + case GEN_DECOVF: + case GEN_DECDIV: + case GEN_DECINV: + case GEN_ASSERTERR: + case GEN_NULPTRERR: + case GEN_STKOVF: + case GEN_STRLENERR: + case GEN_SUBSTRERR: + case GEN_RANGERR: + case GEN_SUBRNG: + case GEN_SUBRNG1: + case GEN_SUBRNG2: + case GEN_SUBRNG3: + case GEN_SUBRNG4: + case GEN_SUBRNG5: + case GEN_SUBRNG6: + case GEN_SUBRNG7: + default: + regs->pc += 4; + signo = SIGTRAP; + code = TRAP_UNK; + break; + } + + force_sig_fault(signo, code, (void __user *)regs->pc); + return; + + case IF_FEN: + fpu_enable(); + return; + + case IF_OPDEC: + switch (inst) { +#ifdef CONFIG_KPROBES + case BREAK_KPROBE: + if (notify_die(DIE_BREAK, "kprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case BREAK_KPROBE_SS: + if (notify_die(DIE_SSTEPBP, "single_step", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; +#endif +#ifdef CONFIG_UPROBES + case UPROBE_BRK_UPROBE: + if (notify_die(DIE_UPROBE, "uprobe", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; + break; + case UPROBE_BRK_UPROBE_XOL: + if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, 0, 0, SIGTRAP) == NOTIFY_STOP) + return; +#endif + } + + if (user_mode(regs)) + regs->pc -= 4; + else + die("Instruction fault", regs, type); + break; + + default: /* unexpected instruction-fault type */ + regs->pc -= 4; + break; + } + + force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc); +} + +asmlinkage void +do_entUna(void *va, unsigned long opcode, unsigned long reg, + struct pt_regs *regs) +{ + long error; + unsigned long tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8; + unsigned long pc = regs->pc - 4; + + /* + * We don't want to use the generic get/put unaligned macros as + * we want to trap exceptions. Only if we actually get an + * exception will we decide whether we should have caught it. + */ + + switch (opcode) { + case 0x21: + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table,\"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x22: + __asm__ __volatile__( + "1: ldl_u %1,0(%3)\n" + "2: ldl_u %2,3(%3)\n" + " extlw %1,%3,%1\n" + " exthw %2,%3,%2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = (int)(tmp1 | tmp2); + return; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto got_exception; + regs->regs[reg] = tmp1 | tmp2; + return; + + case 0x29: /* sth */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2a: /* stw */ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1,%2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(regs->regs[reg]), "0"(0)); + + if (error) + goto got_exception; + return; + } + + pr_warn("Bad unaligned kernel access at %016lx: %p %lx %lu\n", + pc, va, opcode, reg); + make_task_dead(SIGSEGV); + +got_exception: + /* Ok, we caught the exception, but we don't want it. Is there + * someone to pass it along to? + */ + if (fixup_exception(regs, pc)) { + pr_info("Forwarding unaligned exception at %lx (%lx)\n", + pc, regs->pc); + return; + } + + /* + * Yikes! No one to forward the exception to. + * Since the registers are in a weird format, dump them ourselves. + */ + + die("Unhandled unaligned exception", regs, error); +} + +/* + * Handle user-level unaligned fault. Handling user-level unaligned + * faults is *extremely* slow and produces nasty messages. A user + * program *should* fix unaligned faults ASAP. + * + * Notice that we have (almost) the regular kernel stack layout here, + * so finding the appropriate registers is a little more difficult + * than in the kernel case. + * + * Finally, we handle regular integer load/stores only. In + * particular, load-linked/store-conditionally and floating point + * load/stores are not supported. The former make no sense with + * unaligned faults (they are guaranteed to fail) and I don't think + * the latter will occur in any decent program. + * + * Sigh. We *do* have to handle some FP operations, because GCC will + * uses them as temporary storage for integer memory to memory copies. + * However, we need to deal with stt/ldt and sts/lds only. + */ +#define OP_INT_MASK (1L << 0x22 | 1L << 0x2a | /* ldw stw */ \ + 1L << 0x23 | 1L << 0x2b | /* ldl stl */ \ + 1L << 0x21 | 1L << 0x29 | /* ldhu sth */ \ + 1L << 0x20 | 1L << 0x28) /* ldbu stb */ + +asmlinkage void +do_entUnaUser(void __user *va, unsigned long opcode, + unsigned long reg, struct pt_regs *regs) +{ +#ifdef CONFIG_UNA_PRINT + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); +#endif + + unsigned long tmp1, tmp2, tmp3, tmp4; + unsigned long fake_reg, *reg_addr = &fake_reg; + int si_code; + long error; + unsigned long tmp, tmp5, tmp6, tmp7, tmp8, vb; + unsigned long fp[4]; + unsigned long instr, instr_op, value; + +#ifdef CONFIG_DEBUG_FS + /* + * If command name is specified, record some information + * to debugfs. + */ + if (unaligned_task[0] && !strcmp(unaligned_task, current->comm)) { + int idx; + + idx = unaligned_count % UNA_MAX_ENTRIES; + unaligned[idx].va = (unsigned long)va; + unaligned[idx].pc = regs->pc; + unaligned_count++; + } +#endif + + /* Check the UAC bits to decide what the user wants us to do + * with the unaliged access. + */ + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, + 1, regs, regs->pc - 4); + +#ifdef CONFIG_UNA_PRINT + if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { + if (__ratelimit(&ratelimit)) { + pr_info("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", + current->comm, task_pid_nr(current), + regs->pc - 4, va, opcode, reg); + } + } +#endif + if ((current_thread_info()->status & TS_UAC_SIGBUS)) + goto give_sigbus; + /* Not sure why you'd want to use this, but... */ + if ((current_thread_info()->status & TS_UAC_NOFIX)) + return; + + /* Don't bother reading ds in the access check since we already + * know that this came from the user. Also rely on the fact that + * the page at TASK_SIZE is unmapped and so can't be touched anyway. + */ + if ((unsigned long)va >= TASK_SIZE) + goto give_sigsegv; + + if ((1L << opcode) & OP_INT_MASK) { + /* it's an integer load/store */ + if (reg < 31) { + reg_addr = ®s->regs[reg]; + } else { + /* zero "register" */ + fake_reg = 0; + } + } + + get_user(instr, (__u32 *)(regs->pc - 4)); + instr_op = (instr >> 26) & 0x3f; + + get_user(value, (__u64 *)va); + + switch (instr_op) { + + case 0x0c: /* vlds */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp1 = tmp1 | tmp4; + tmp2 = tmp5 | tmp3; + + sw64_write_simd_fp_reg_s(reg, tmp1, tmp2); + + return; + } + case 0x0a: /* ldse */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + tmp = tmp | (tmp << 32); + + sw64_write_simd_fp_reg_s(reg, tmp, tmp); + + return; + + case 0x0d: /* vldd */ + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + "1: ldl %1, 0(%5)\n" + "2: ldl %2, 8(%5)\n" + "3: ldl %3, 16(%5)\n" + "4: ldl %4, 24(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi %4, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_d(reg, tmp1, tmp2, tmp3, tmp4); + + return; + } else { + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp7 = tmp1 | tmp4; //f0 + tmp8 = tmp5 | tmp3; //f1 + + vb = ((unsigned long)(va))+16; + + __asm__ __volatile__( + "1: ldl_u %1, 0(%6)\n" + "2: ldl_u %2, 7(%6)\n" + "3: ldl_u %3, 15(%6)\n" + " extll %1, %6, %1\n" + " extll %2, %6, %5\n" + " exthl %2, %6, %4\n" + " exthl %3, %6, %3\n" + "4:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 4b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 4b-2b(%0)\n" + " .long 3b - .\n" + " ldi %3, 4b-3b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5) + : "r"(vb), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp4; // f2 + tmp2 = tmp5 | tmp3; // f3 + + sw64_write_simd_fp_reg_d(reg, tmp7, tmp8, tmp, tmp2); + return; + } + + case 0x0b: /* ldde */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + tmp = tmp1 | tmp2; + + sw64_write_simd_fp_reg_d(reg, tmp, tmp, tmp, tmp); + return; + + case 0x09: /* ldwe */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + + if (error) + goto give_sigsegv; + + sw64_write_simd_fp_reg_ldwe(reg, (int)(tmp1 | tmp2)); + + return; + + case 0x0e: /* vsts */ + sw64_read_simd_fp_m_s(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + + case 0x0f: /* vstd */ + sw64_read_simd_fp_m_d(reg, fp); + if ((unsigned long)va << 61 == 0) { + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "r"(fp[0]), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va)+16; + + + __asm__ __volatile__( + " bis %4, %4, %1\n" + " bis %5, %5, %2\n" + "1: stl %1, 0(%3)\n" + "2: stl %2, 8(%3)\n" + "3:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(vb), "r"(fp[2]), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } else { + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(fp[0]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = ((unsigned long)va) + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[1]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[2]), "0"(0)); + + if (error) + goto give_sigsegv; + + vb = vb + 8; + + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(vb), "r"(fp[3]), "0"(0)); + + if (error) + goto give_sigsegv; + + return; + } + } + switch (opcode) { + case 0x21: /* ldhu */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 1(%3)\n" + " extlh %1, %3, %1\n" + " exthh %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + case 0x26: /* flds */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg_s(reg, tmp1 | tmp2); + return; + + case 0x27: /* fldd */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + sw64_write_fp_reg(reg, tmp1 | tmp2); + return; + + case 0x22: /* ldw */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 3(%3)\n" + " extlw %1, %3, %1\n" + " exthw %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = (int)(tmp1 | tmp2); + break; + + case 0x23: /* ldl */ + __asm__ __volatile__( + "1: ldl_u %1, 0(%3)\n" + "2: ldl_u %2, 7(%3)\n" + " extll %1, %3, %1\n" + " exthl %2, %3, %2\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %1, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %2, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) + : "r"(va), "0"(0)); + if (error) + goto give_sigsegv; + *reg_addr = tmp1 | tmp2; + break; + + /* Note that the store sequences do not indicate that they change + * memory because it _should_ be affecting nothing in this context. + * (Otherwise we have other, much larger, problems.) + */ + case 0x29: /* sth with stb */ + __asm__ __volatile__( + " zap %6, 2, %1\n" + " srl %6, 8, %2\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi %2, 3b-1b(%0)\n" + " .long 2b - .\n" + " ldi %1, 3b-2b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2e: /* fsts*/ + fake_reg = sw64_read_fp_reg_s(reg); + fallthrough; + + case 0x2a: /* stw with stb*/ + __asm__ __volatile__( + " zapnot %6, 0x1, %1\n" + " srl %6, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %6, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %6, 24, %4\n" + " zapnot %4, 0x1, %4\n" + "1: stb %1, 0x0(%5)\n" + "2: stb %2, 0x1(%5)\n" + "3: stb %3, 0x2(%5)\n" + "4: stb %4, 0x3(%5)\n" + "5:\n" + ".section __ex_table, \"a\"\n" + " .long 1b - .\n" + " ldi $31, 5b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 5b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 5b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 5b-4b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), + "=&r"(tmp3), "=&r"(tmp4) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + case 0x2f: /* fstd */ + fake_reg = sw64_read_fp_reg(reg); + fallthrough; + + case 0x2b: /* stl */ + __asm__ __volatile__( + " zapnot %10, 0x1, %1\n" + " srl %10, 8, %2\n" + " zapnot %2, 0x1, %2\n" + " srl %10, 16, %3\n" + " zapnot %3, 0x1, %3\n" + " srl %10, 24, %4\n" + " zapnot %4, 0x1, %4\n" + " srl %10, 32, %5\n" + " zapnot %5, 0x1, %5\n" + " srl %10, 40, %6\n" + " zapnot %6, 0x1, %6\n" + " srl %10, 48, %7\n" + " zapnot %7, 0x1, %7\n" + " srl %10, 56, %8\n" + " zapnot %8, 0x1, %8\n" + "1: stb %1, 0(%9)\n" + "2: stb %2, 1(%9)\n" + "3: stb %3, 2(%9)\n" + "4: stb %4, 3(%9)\n" + "5: stb %5, 4(%9)\n" + "6: stb %6, 5(%9)\n" + "7: stb %7, 6(%9)\n" + "8: stb %8, 7(%9)\n" + "9:\n" + ".section __ex_table, \"a\"\n\t" + " .long 1b - .\n" + " ldi $31, 9b-1b(%0)\n" + " .long 2b - .\n" + " ldi $31, 9b-2b(%0)\n" + " .long 3b - .\n" + " ldi $31, 9b-3b(%0)\n" + " .long 4b - .\n" + " ldi $31, 9b-4b(%0)\n" + " .long 5b - .\n" + " ldi $31, 9b-5b(%0)\n" + " .long 6b - .\n" + " ldi $31, 9b-6b(%0)\n" + " .long 7b - .\n" + " ldi $31, 9b-7b(%0)\n" + " .long 8b - .\n" + " ldi $31, 9b-8b(%0)\n" + ".previous" + : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), + "=&r"(tmp4), "=&r"(tmp5), "=&r"(tmp6), "=&r"(tmp7), "=&r"(tmp8) + : "r"(va), "r"(*reg_addr), "0"(0)); + + if (error) + goto give_sigsegv; + return; + + default: + /* What instruction were you trying to use, exactly? */ + goto give_sigbus; + } + + return; + +give_sigsegv: + regs->pc -= 4; /* make pc point to faulting insn */ + + /* We need to replicate some of the logic in mm/fault.c, + * since we don't have access to the fault code in the + * exception handling return path. + */ + if ((unsigned long)va >= TASK_SIZE) + si_code = SEGV_ACCERR; + else { + struct mm_struct *mm = current->mm; + + down_read(&mm->mmap_lock); + if (find_vma(mm, (unsigned long)va)) + si_code = SEGV_ACCERR; + else + si_code = SEGV_MAPERR; + up_read(&mm->mmap_lock); + } + force_sig_fault(SIGSEGV, si_code, va); + return; + +give_sigbus: + regs->pc -= 4; + force_sig_fault(SIGBUS, BUS_ADRALN, va); +} + +asmlinkage void do_entSys(struct pt_regs *regs) +{ + long ret = -ENOSYS; + unsigned long nr; + unsigned long ti_flags = current_thread_info()->flags; + + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + nr = regs->regs[0]; + + if (ti_flags & _TIF_SYSCALL_WORK) { + nr = syscall_trace_enter(); + if (nr == NO_SYSCALL) + goto syscall_out; + regs->orig_r0 = regs->regs[0]; + regs->orig_r19 = regs->regs[19]; + } + + if (nr < __NR_syscalls) { + syscall_fn_t syscall_fn = sys_call_table[nr]; + + ret = syscall_fn(regs->regs[16], regs->regs[17], regs->regs[18], + regs->regs[19], regs->regs[20], regs->regs[21]); + } + + if ((nr != __NR_sigreturn) && (nr != __NR_rt_sigreturn)) { + if (likely((ret >= 0) || regs->orig_r0 == NO_SYSCALL)) + syscall_set_return_value(current, regs, 0, ret); + else + syscall_set_return_value(current, regs, ret, 0); + } + +syscall_out: + rseq_syscall(regs); + + if (ti_flags & _TIF_SYSCALL_WORK) + syscall_trace_leave(); +} + +void +trap_init(void) +{ + /* Tell HMcode what global pointer we want in the kernel. */ + register unsigned long gptr __asm__("$29"); + wrkgp(gptr); + + wrent(entArith, 1); + wrent(entMM, 2); + wrent(entIF, 3); + wrent(entUna, 4); + wrent(entSys, 5); +#ifdef CONFIG_EFI + if (smp_processor_id() == 0) + wrent((void *)entSuspend, 6); +#endif +} diff --git a/arch/sw_64/kernel/unaligned.c b/arch/sw_64/kernel/unaligned.c new file mode 100644 index 0000000000000000000000000000000000000000..40a17fb9cbd2c7e0ac75041644e2bd46ff30a370 --- /dev/null +++ b/arch/sw_64/kernel/unaligned.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#include +#include +#include + +unsigned long unaligned_count; +char unaligned_task[TASK_COMM_LEN]; +struct unaligned_stat unaligned[UNA_MAX_ENTRIES]; + +static ssize_t unaligned_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + size_t size; + + unaligned_count = 0; + size = min(sizeof(unaligned_task), len); + if (copy_from_user(unaligned_task, user_buf, size)) + return -EFAULT; + unaligned_task[size - 1] = '\0'; + + return len; +} + +static int unaligned_show(struct seq_file *m, void *v) +{ + int i, idx, nr; + + if (!unaligned_task[0]) { + seq_puts(m, "No task traced\n"); + return 0; + } + seq_printf(m, "Task command:\t\t%s\n", unaligned_task); + seq_printf(m, "Unaligned count:\t%ld\n", unaligned_count); + if (!unaligned_count) + return 0; + nr = 0; + idx = unaligned_count % UNA_MAX_ENTRIES; + seq_printf(m, "Latest %d unaligned stat:\nNo.\tVA\t\tPC\n", UNA_MAX_ENTRIES); + if (unaligned_count >= UNA_MAX_ENTRIES) { + for (i = idx; i < UNA_MAX_ENTRIES; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + } + for (i = 0; i < idx; i++) + seq_printf(m, "%d\t%#lx\t%#lx\n", + nr++, unaligned[i].va, unaligned[i].pc); + return 0; +} + +static int unaligned_open(struct inode *inode, struct file *file) +{ + return single_open(file, unaligned_show, NULL); +} + +static const struct file_operations unaligned_fops = { + .read = seq_read, + .write = unaligned_set, + .open = unaligned_open, + .llseek = default_llseek, +}; + +static int __init unaligned_init(void) +{ + struct dentry *unaligned; + + if (!sw64_debugfs_dir) + return -ENODEV; + + unaligned = debugfs_create_file("unaligned", 0644, + sw64_debugfs_dir, NULL, + &unaligned_fops); + if (!unaligned) + return -ENOMEM; + + return 0; +} + +late_initcall(unaligned_init); diff --git a/arch/sw_64/kernel/uprobes.c b/arch/sw_64/kernel/uprobes.c new file mode 100644 index 0000000000000000000000000000000000000000..928312d62cfd172f20edcc113fea897a5c054a56 --- /dev/null +++ b/arch/sw_64/kernel/uprobes.c @@ -0,0 +1,182 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +/** + * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. + * @mm: the probed address space. + * @arch_uprobe: the probepoint information. + * @addr: virtual address at which to install the probepoint + * Return 0 on success or a -ve number on error. + */ +int arch_uprobe_analyze_insn(struct arch_uprobe *aup, + struct mm_struct *mm, unsigned long addr) +{ + u32 inst; + + if (addr & 0x03) + return -EINVAL; + + inst = aup->insn; + + aup->ixol[0] = aup->insn; + aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ + + return 0; +} + +void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, + void *src, unsigned long len) +{ + unsigned long kaddr, kstart; + + /* Initialize the slot */ + kaddr = (unsigned long)kmap_local_page(page); + kstart = kaddr + (vaddr & ~PAGE_MASK); + memcpy((void *)kstart, src, len); + flush_icache_range(kstart, kstart + len); + kunmap_local((void *)kaddr); +} + +/* + * arch_uprobe_pre_xol - prepare to execute out of line. + * @auprobe: the probepoint information. + * @regs: reflects the saved user state of current task. + */ +int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute ol */ + instruction_pointer_set(regs, utask->xol_vaddr); + + return 0; +} + +int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + /* Instruction points to execute next to breakpoint address */ + instruction_pointer_set(regs, utask->vaddr + 4); + + return 0; +} + +/* + * If xol insn itself traps and generates a signal(Say, + * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped + * instruction jumps back to its own address. It is assumed that anything + * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. + * + * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, + * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to + * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). + */ +bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) +{ + return false; +} + +int arch_uprobe_exception_notify(struct notifier_block *self, + unsigned long val, void *data) +{ + struct die_args *args = data; + struct pt_regs *regs = args->regs; + + /* regs == NULL is a kernel bug */ + if (WARN_ON(!regs)) + return NOTIFY_DONE; + + /* We are only interested in userspace traps */ + if (!user_mode(regs)) + return NOTIFY_DONE; + + switch (val) { + case DIE_UPROBE: + if (uprobe_pre_sstep_notifier(regs)) + return NOTIFY_STOP; + break; + case DIE_UPROBE_XOL: + if (uprobe_post_sstep_notifier(regs)) + return NOTIFY_STOP; + default: + break; + } + + return 0; +} + +/* + * This function gets called when XOL instruction either gets trapped or + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. + */ +void arch_uprobe_abort_xol(struct arch_uprobe *aup, + struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + instruction_pointer_set(regs, utask->vaddr); +} + +unsigned long arch_uretprobe_hijack_return_addr( + unsigned long trampoline_vaddr, struct pt_regs *regs) +{ + unsigned long ra; + + ra = regs->regs[26]; + + /* Replace the return address with the trampoline address */ + regs->regs[26] = trampoline_vaddr; + + return ra; +} + +/* + * See if the instruction can be emulated. + * Returns true if instruction was emulated, false otherwise. + * + * For now we always emulate so this function just returns 0. + */ +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return 0; +} + +/* + * struct xol_area and get_trampoline_vaddr() are copied from + * kernel/events/uprobes.c to avoid modifying arch-independent + * code. + */ +struct xol_area { + wait_queue_head_t wq; + atomic_t slot_count; + unsigned long *bitmap; + struct vm_special_mapping xol_mapping; + struct page *pages[2]; + unsigned long vaddr; +}; + +static unsigned long get_trampoline_vaddr(void) +{ + struct xol_area *area; + unsigned long trampoline_vaddr = -1; + + area = READ_ONCE(current->mm->uprobes_state.xol_area); + if (area) + trampoline_vaddr = area->vaddr; + + return trampoline_vaddr; +} + +void sw64_fix_uretprobe(struct pt_regs *regs, unsigned long exc_pc) +{ + /* + * regs->pc has been changed to orig_ret_vaddr in handle_trampoline(). + */ + if (exc_pc == get_trampoline_vaddr()) + regs->regs[26] = regs->pc; +} diff --git a/arch/sw_64/kernel/vdso.c b/arch/sw_64/kernel/vdso.c new file mode 100644 index 0000000000000000000000000000000000000000..b4126cbaa4bda220635ac284a6fe526ee02a923b --- /dev/null +++ b/arch/sw_64/kernel/vdso.c @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + +#include + +extern char vdso_start, vdso_end; +static unsigned long vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +static struct vm_special_mapping vdso_spec[2]; + +static int __init vdso_init(void) +{ + int i; + + if (memcmp(&vdso_start, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + return -EINVAL; + } + + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", + vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); + + /* Allocate the vDSO pagelist, plus a page for the data. */ + vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), + GFP_KERNEL); + if (vdso_pagelist == NULL) + return -ENOMEM; + + /* Grab the vDSO data page. */ + vdso_pagelist[0] = virt_to_page(vdso_data); + + /* Grab the vDSO code pages. */ + for (i = 0; i < vdso_pages; i++) + vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); + + /* Populate the special mapping structures */ + vdso_spec[0] = (struct vm_special_mapping) { + .name = "[vvar]", + .pages = vdso_pagelist, + }; + + vdso_spec[1] = (struct vm_special_mapping) { + .name = "[vdso]", + .pages = &vdso_pagelist[1], + }; + + return 0; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_text_len, vdso_mapping_len; + void *ret; + + vdso_text_len = vdso_pages << PAGE_SHIFT; + /* Be sure to map the data page */ + vdso_mapping_len = vdso_text_len + PAGE_SIZE; + + if (down_write_killable(&mm->mmap_lock)) + return -EINTR; + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = ERR_PTR(vdso_base); + goto up_fail; + } + ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, + VM_READ|VM_MAYREAD, + &vdso_spec[0]); + if (IS_ERR(ret)) + goto up_fail; + + vdso_base += PAGE_SIZE; + mm->context.vdso = (void *)vdso_base; + ret = _install_special_mapping(mm, vdso_base, vdso_text_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + &vdso_spec[1]); + if (IS_ERR(ret)) + goto up_fail; + + up_write(&mm->mmap_lock); + return 0; + +up_fail: + mm->context.vdso = NULL; + up_write(&mm->mmap_lock); + return PTR_ERR(ret); +} + +void update_vsyscall(struct timekeeper *tk) +{ + vdso_data_write_begin(vdso_data); + + vdso_data->xtime_sec = tk->xtime_sec; + vdso_data->xtime_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->wall_to_mono_sec = tk->wall_to_monotonic.tv_sec; + vdso_data->wall_to_mono_nsec = tk->wall_to_monotonic.tv_nsec; + vdso_data->cs_shift = tk->tkr_mono.shift; + + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; + vdso_data->cs_mask = tk->tkr_mono.mask; + + vdso_data_write_end(vdso_data); +} + +void update_vsyscall_tz(void) +{ + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; +} diff --git a/arch/sw_64/kernel/vdso/.gitignore b/arch/sw_64/kernel/vdso/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..2b6a8b0ed7ca3c7bfcf8165790cf8a2a08814ae3 --- /dev/null +++ b/arch/sw_64/kernel/vdso/.gitignore @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 +vdso.lds +vdso.so.dbg.tmp +vdso-syms.S diff --git a/arch/sw_64/kernel/vdso/Makefile b/arch/sw_64/kernel/vdso/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..190cc345dbb909a618467d85aedc7761cbb5bd33 --- /dev/null +++ b/arch/sw_64/kernel/vdso/Makefile @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: GPL-2.0 +# Symbols present in the vdso +vdso-syms = rt_sigreturn gettimeofday + +# Files to link into the vdso +obj-vdso = $(patsubst %, v%.o, $(vdso-syms)) + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +obj-y += vdso.o vdso-syms.o +extra-y += vdso.lds +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# vDSO code runs in userspace and -pg doesn't help with profiling anyway. +CFLAGS_REMOVE_vdso.o = -pg +CFLAGS_REMOVE_vrt_sigreturn.o = -pg +CFLAGS_REMOVE_vgettimeofday.o = -pg + +ifdef CONFIG_FEEDBACK_COLLECT +# vDSO code runs in userspace, not collecting feedback data. +CFLAGS_REMOVE_vdso.o = -ffeedback-generate +CFLAGS_REMOVE_vrt_sigreturn.o = -ffeedback-generate +CFLAGS_REMOVE_vgettimeofday.o = -ffeedback-generate +endif + +# Disable gcov profiling for VDSO code +GCOV_PROFILE := n + +# Force dependency +$(obj)/vdso.o: $(obj)/vdso.so + +# link rule for the .so file, .lds has to be first +SYSCFLAGS_vdso.so.dbg = $(c_flags) +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE + $(call if_changed,vdsold) +SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=both) + +$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE + $(call if_changed,so2s) + +# strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# actual build commands +# The DSO images are built using a special linker script +# Add -lgcc so tilepro gets static muldi3 and lshrdi3 definitions. +# Make sure only to export the intended __vdso_xxx symbol offsets. +quiet_cmd_vdsold = VDSOLD $@ + cmd_vdsold = $(CC) $(KCFLAGS) -nostdlib $(SYSCFLAGS_$(@F)) \ + -Wl,-T,$(filter-out FORCE,$^) -o $@.tmp -lgcc && \ + $(CROSS_COMPILE)objcopy \ + $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \ + rm $@.tmp + +# Extracts symbol offsets from the VDSO, converting them into an assembly file +# that contains the same symbols at the same offsets. +quiet_cmd_so2s = SO2S $@ + cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@ + +# install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + + +vdso_install: vdso.so diff --git a/arch/sw_64/kernel/vdso/so2s.sh b/arch/sw_64/kernel/vdso/so2s.sh new file mode 100755 index 0000000000000000000000000000000000000000..e1763af8e7301a0ec8ca7e9f901c6bf438c5920d --- /dev/null +++ b/arch/sw_64/kernel/vdso/so2s.sh @@ -0,0 +1,4 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0+ + +grep "__vdso_" | sed 's/\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_.*\)*/.globl\t\2\n\2:\n.quad\t0x\1/' diff --git a/arch/sw_64/kernel/vdso/vdso.S b/arch/sw_64/kernel/vdso/vdso.S new file mode 100644 index 0000000000000000000000000000000000000000..edd9be27db9d5b90652553bf48196b4f0f999e3a --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.S @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/sw_64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/sw_64/kernel/vdso/vdso.lds.S b/arch/sw_64/kernel/vdso/vdso.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..de1782ccb7b678c44497377f2a7985355b94a4a4 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vdso.lds.S @@ -0,0 +1,89 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * GNU linker script for the VDSO library. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) + +SECTIONS +{ + PROVIDE(_vdso_data = . - PAGE_SIZE); + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + .text : { *(.text*) } + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6 { + global: + __vdso_rt_sigreturn; + __vdso_gettimeofday; + __vdso_clock_gettime; + local: *; + }; +} diff --git a/arch/sw_64/kernel/vdso/vgettimeofday.c b/arch/sw_64/kernel/vdso/vgettimeofday.c new file mode 100644 index 0000000000000000000000000000000000000000..0aa16e988e88efaeda40178f46583ee6647e9f59 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vgettimeofday.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation, version 2. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for + * more details. + */ + +#include + +#include +#include +#include +#include + +static __always_inline int syscall_fallback(clockid_t clkid, struct timespec64 *ts) +{ + register int r0 asm("$0"); + register unsigned long r19 asm("$19"); + asm volatile( + " mov %0, $16\n" + " mov %1, $17\n" + " ldi $0, %2\n" + " sys_call %3\n" + :: "r"(clkid), "r"(ts), "i"(__NR_clock_gettime), "i"(HMC_callsys) + : "$0", "$16", "$17", "$19"); + if (unlikely(r19)) + return -r0; + else + return r0; +} + +static __always_inline int do_realtime_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + } while (vdso_data_read_retry(data, start_seq)); + + return 0; +} + + +static __always_inline int do_monotonic_coarse(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ts->tv_nsec = data->xtime_nsec >> data->cs_shift; + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + timespec64_add_ns(ts, to_mono_nsec); + + return 0; +} + +#if defined(CONFIG_SUBARCH_C3B) +static __always_inline u64 read_longtime(void) +{ + register unsigned long __r0 __asm__("$0"); + + __asm__ __volatile__( + "sys_call %1" : "=r"(__r0) : "i" (HMC_longtime)); + + return __r0; +} +#elif defined(CONFIG_SUBARCH_C4) +static __always_inline u64 read_longtime(void) +{ + return read_csr(CSR_SHTCLOCK); +} +#endif + +static __always_inline u64 get_ns(const struct vdso_data *data) +{ + u64 cycle_now, delta, nsec; + + cycle_now = read_longtime(); + delta = (cycle_now - data->cs_cycle_last) & data->cs_mask; + + nsec = (delta * data->cs_mult) + data->xtime_nsec; + nsec >>= data->cs_shift; + + return nsec; +} + + +static __always_inline int do_realtime(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns); + + return 0; +} + +static __always_inline int do_monotonic(struct timespec64 *ts, + const struct vdso_data *data) +{ + u32 start_seq; + u64 ns; + u64 to_mono_sec; + u64 to_mono_nsec; + + do { + start_seq = vdso_data_read_begin(data); + + ts->tv_sec = data->xtime_sec; + ns = get_ns(data); + + to_mono_sec = data->wall_to_mono_sec; + to_mono_nsec = data->wall_to_mono_nsec; + } while (vdso_data_read_retry(data, start_seq)); + + ts->tv_sec += to_mono_sec; + ts->tv_nsec = 0; + timespec64_add_ns(ts, ns + to_mono_nsec); + + return 0; +} + + +int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) +{ + const struct vdso_data *data = get_vdso_data(); + struct timespec64 ts; + int ret; + + ret = do_realtime(&ts, data); + if (ret) + return ret; + + if (tv) { + tv->tv_sec = ts.tv_sec; + tv->tv_usec = ts.tv_nsec / 1000; + } + + if (tz) { + tz->tz_minuteswest = data->tz_minuteswest; + tz->tz_dsttime = data->tz_dsttime; + } + + return 0; +} + +int __vdso_clock_gettime(clockid_t clkid, struct timespec64 *ts) +{ + const struct vdso_data *data = get_vdso_data(); + int ret; + + switch (clkid) { + case CLOCK_REALTIME_COARSE: + ret = do_realtime_coarse(ts, data); + break; + case CLOCK_MONOTONIC_COARSE: + ret = do_monotonic_coarse(ts, data); + break; + case CLOCK_REALTIME: + ret = do_realtime(ts, data); + break; + case CLOCK_MONOTONIC: + ret = do_monotonic(ts, data); + break; + default: + /* fall back to a syscall */ + ret = syscall_fallback(clkid, ts); + } + + return ret; +} diff --git a/arch/sw_64/kernel/vdso/vrt_sigreturn.S b/arch/sw_64/kernel/vdso/vrt_sigreturn.S new file mode 100644 index 0000000000000000000000000000000000000000..cdbf6501ad6457799e5557da825e59e8ad658211 --- /dev/null +++ b/arch/sw_64/kernel/vdso/vrt_sigreturn.S @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + * + */ + +#include +#include +#include +#include + + .text + + .macro SIGCONTEXT_REGS_I base, from = 0 + .cfi_offset \from, \base + (4 + \from) * 8 + .if 30 - \from + SIGCONTEXT_REGS_I \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_F base, from = 32 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + (\from - 32) * 32 + .if 62 - \from + SIGCONTEXT_REGS_F \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS_V base, from = 67 + .cfi_offset \from, \base + (4 + 32 + 1) * 8 + ((\from - 67) & 0x1f) * 32 + (((\from - 67) >> 5) + 1) * 8 + .if 161 - \from + SIGCONTEXT_REGS_V \base, "(\from + 1)" + .endif + .endm + + .macro SIGCONTEXT_REGS base + SIGCONTEXT_REGS_I \base + SIGCONTEXT_REGS_F \base + SIGCONTEXT_REGS_V \base + .cfi_offset 63, \base + (4 + 32 + 1) * 8 + 32 * 32 + .cfi_offset 64, \base + 2 * 8 + .endm + + .cfi_startproc + .cfi_return_column 64 + .cfi_signal_frame + SIGCONTEXT_REGS (-RT_SIGFRAME_SIZE + RT_SIGFRAME_MCTX) + .cfi_def_cfa_offset RT_SIGFRAME_SIZE + + nop +ENTRY(__vdso_rt_sigreturn) + mov $sp, $16 + ldi $0, __NR_rt_sigreturn + sys_call HMC_callsys +ENDPROC(__vdso_rt_sigreturn) + .cfi_endproc diff --git a/arch/sw_64/kernel/vmlinux.lds.S b/arch/sw_64/kernel/vmlinux.lds.S new file mode 100644 index 0000000000000000000000000000000000000000..9b81b2c7afb8d69e379bc799b9dc2a930259dbfc --- /dev/null +++ b/arch/sw_64/kernel/vmlinux.lds.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#define RUNTIME_DISCARD_EXIT +#define EMITS_PT_NOTE +#define RO_EXCEPTION_TABLE_ALIGN 16 + +#include +#include +#include +#include +#include + +OUTPUT_FORMAT("elf64-sw_64") +OUTPUT_ARCH(sw_64) +ENTRY(__start) +PHDRS { text PT_LOAD; note PT_NOTE; } +jiffies = jiffies_64; +SECTIONS +{ + . = _TEXT_START; + + __start = .; + _text = .; /* Text and read-only data */ + _stext = .; + .text : { + HEAD_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + IRQENTRY_TEXT + SOFTIRQENTRY_TEXT + KPROBES_TEXT + *(.fixup) + *(.gnu.warning) + } :text + _etext = .; /* End of text section */ + + RO_DATA(PAGE_SIZE) + + /* Will be freed after init */ + __init_begin = ALIGN(PAGE_SIZE); + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) + /* we have to discard exit text and such at runtime, not link time */ + .exit.text : + { + EXIT_TEXT + } + .exit.data : + { + EXIT_DATA + } + PERCPU_SECTION(L1_CACHE_BYTES) + + /* + * Align to THREAD_SIZE rather than PAGE_SIZE here so any padding page + * needed for the THREAD_SIZE aligned init_task gets freed after init + */ + . = ALIGN(THREAD_SIZE); + __init_end = .; + /* Freed after init ends here */ + + _sdata = .; /* Start of rw data section */ + _data = .; + RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) + .got : { +#ifdef CONFIG_RELOCATABLE + _got_start = .; +#endif + *(.got) +#ifdef CONFIG_RELOCATABLE + _got_end = .; +#endif + } + .sdata : { + *(.sdata) + } + _edata = .; /* End of data section */ + +#ifdef CONFIG_RELOCATABLE + _. = ALIGN(4); + .data.reloc : { + _relocation_start = .; + /* + * Space for relocation table + * This needs to be filled so that the + * relocs tool can overwrite the content. + * An invalid value is left at the start of the + * section to abort relocation if the table + * has not been filled in. + */ + LONG(0xFFFFFFFF); + FILL(0); + . += CONFIG_RELOCATION_TABLE_SIZE - 4; + _relocation_end = .; + } +#endif + BSS_SECTION(0, 0, 0) + _end = .; + + .mdebug 0 : { + *(.mdebug) + } + .note 0 : { + *(.note) + } + + STABS_DEBUG + DWARF_DEBUG + ELF_DETAILS + + DISCARDS +} diff --git a/arch/sw_64/kvm/Kconfig b/arch/sw_64/kvm/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..b7e43d0bae510ebe8297d60f0ff35fe30d97cbea --- /dev/null +++ b/arch/sw_64/kvm/Kconfig @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + help + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + select PREEMPT_NOTIFIERS + select CMA + depends on NET + select HAVE_KVM_EVENTFD + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQ_ROUTING + select HAVE_KVM_IRQFD + select HAVE_KVM_MSI + select KVM_VFIO + select MMU_NOTIFIER + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + select TUN + select GENERIC_ALLOCATOR + select KVM_GENERIC_DIRTYLOG_READ_PROTECT + help + Support for hosting Guest kernels. + We don't support KVM with 3-level page tables yet. + + If unsure, say N. + +config KVM_MEMHOTPLUG + bool "Memory hotplug support for guest" + depends on KVM && MEMORY_HOTPLUG && SUBARCH_C3B + help + Provides memory hotplug support for SW64 guest. + + +source "drivers/vhost/Kconfig" + +endif # VIRTUALIZATION diff --git a/arch/sw_64/kvm/Makefile b/arch/sw_64/kvm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8111014c5cca82fcfbf050d751a04c7281a4c32a --- /dev/null +++ b/arch/sw_64/kvm/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Kernel-based Virtual Machine module +# + +ccflags-y += -I $(srctree)/$(src) + +include $(srctree)/virt/kvm/Makefile.kvm + +obj-$(CONFIG_KVM) += kvm.o + +kvm-y += sw64.o +kvm-y += entry.o +kvm-y += emulate.o +kvm-y += mmio.o +kvm-y += kvm_timer.o +kvm-y += handle_exit.o +kvm-y += perf.o +kvm-$(CONFIG_SUBARCH_C3B) += kvm_core3.o kvm_cma.o +kvm-$(CONFIG_SUBARCH_C4) += kvm_core4.o mmu.o diff --git a/arch/sw_64/kvm/emulate.c b/arch/sw_64/kvm/emulate.c new file mode 100644 index 0000000000000000000000000000000000000000..fc37461b97a031f3f7a26c9c208e3803975ff1d5 --- /dev/null +++ b/arch/sw_64/kvm/emulate.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +void sw64_decode(struct kvm_vcpu *vcpu, unsigned int insn, struct kvm_run *run) +{ + int opc, ra; + +#ifdef CONFIG_SUBARCH_C3B + opc = (insn >> 26) & 0x3f; + ra = (insn >> 21) & 0x1f; +#elif defined(CONFIG_SUBARCH_C4) + unsigned long ds_stat, exc_sum; + + ds_stat = read_csr(CSR_DS_STAT); + exc_sum = read_csr(CSR_EXC_SUM); + + opc = (ds_stat >> 4) & 0x3f; + ra = (exc_sum >> 8) & 0x1f; +#endif + + switch (opc) { + case 0x20: /* LDBU */ + run->mmio.is_write = 0; + run->mmio.len = 1; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x21: /* LDHU */ + run->mmio.is_write = 0; + run->mmio.len = 2; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x22: /* LDW */ + run->mmio.is_write = 0; + run->mmio.len = 4; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x23: /* LDL */ + case 0x24: /* LDL_U */ + run->mmio.is_write = 0; + run->mmio.len = 8; + vcpu->arch.mmio_decode.rt = ra; + break; + case 0x28: /* STB */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffUL; + run->mmio.len = 1; + break; + case 0x29: /* STH */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffUL; + run->mmio.len = 2; + break; + case 0x2a: /* STW */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra) & 0xffffffffUL; + run->mmio.len = 4; + break; + case 0x2b: /* STL */ + case 0x2c: /* STL_U */ + run->mmio.is_write = 1; + *(unsigned long *)run->mmio.data = vcpu_get_reg(vcpu, ra); + run->mmio.len = 8; + break; + default: + pr_info("Miss done opc %d\n", opc); + break; + } +} + +/* + * Virtual Interrupts. + */ +unsigned int interrupt_pending(struct kvm_vcpu *vcpu, bool *more) +{ + unsigned int irq; + DECLARE_BITMAP(blk, SWVM_IRQS); + + bitmap_copy(blk, vcpu->arch.irqs_pending, SWVM_IRQS); + + irq = find_last_bit(blk, SWVM_IRQS); + + return irq; +} + +void clear_vcpu_irq(struct kvm_vcpu *vcpu) +{ + vcpu->arch.vcb.vcpu_irq = 0xffffffffffffffffUL; +} + +void inject_vcpu_irq(struct kvm_vcpu *vcpu, unsigned int irq) +{ + vcpu->arch.vcb.vcpu_irq = irq; +} + +/* + * This actually diverts the Guest to running an interrupt handler, once an + * interrupt has been identified by interrupt_pending(). + */ +void try_deliver_interrupt(struct kvm_vcpu *vcpu, unsigned int irq, bool more) +{ + BUG_ON(irq >= SWVM_IRQS); + + /* Otherwise we check if they have interrupts disabled. */ + if (vcpu->arch.vcb.vcpu_irq_disabled) { + clear_vcpu_irq(vcpu); + return; + } + + /* If they don't have a handler (yet?), we just ignore it */ + if (vcpu->arch.vcb.ent_int != 0) { + /* OK, mark it no longer pending and deliver it. */ + clear_bit(irq, (vcpu->arch.irqs_pending)); + /* + * set_guest_interrupt() takes the interrupt descriptor and a + * flag to say whether this interrupt pushes an error code onto + * the stack as well: virtual interrupts never do. + */ + inject_vcpu_irq(vcpu, irq); + } +} diff --git a/arch/sw_64/kvm/entry.S b/arch/sw_64/kvm/entry.S new file mode 100644 index 0000000000000000000000000000000000000000..a61ecc387d260497bd414b588a8fac00d67d8bbe --- /dev/null +++ b/arch/sw_64/kvm/entry.S @@ -0,0 +1,263 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 + */ + .text +#include +#include +#include +#include + + .set noat + +/* + * r16: physical address of guest kvm_vcpu.arch.vcb + * r17: pointer to guest kvm_vcpu.arch.kvm_regs + * r18: pointer to hcall args + */ +ENTRY(__sw64_vcpu_run) + /* save host fpregs */ + rfpcr $f0 + fstd $f0, TASK_THREAD_FPCR($8) + vstd $f2, TASK_THREAD_F2($8) + vstd $f3, TASK_THREAD_F3($8) + vstd $f4, TASK_THREAD_F4($8) + vstd $f5, TASK_THREAD_F5($8) + vstd $f6, TASK_THREAD_F6($8) + vstd $f7, TASK_THREAD_F7($8) + vstd $f8, TASK_THREAD_F8($8) + vstd $f9, TASK_THREAD_F9($8) + + ldi sp, -VCPU_RET_SIZE(sp) + /* save host pt_regs to current kernel stack */ + ldi sp, -PT_REGS_SIZE(sp) + stl $9, PT_REGS_R9(sp) + stl $8, PT_REGS_R8(sp) + stl $10, PT_REGS_R10(sp) + stl $11, PT_REGS_R11(sp) + stl $12, PT_REGS_R12(sp) + stl $13, PT_REGS_R13(sp) + stl $14, PT_REGS_R14(sp) + stl $15, PT_REGS_R15(sp) + stl $26, PT_REGS_R26(sp) + + /* restore guest switch stack from guest kvm_regs struct */ + ldl $0, KVM_REGS_R0($17) + ldl $1, KVM_REGS_R1($17) + /* restore $2 later */ + ldl $3, KVM_REGS_R3($17) + ldl $4, KVM_REGS_R4($17) + ldl $5, KVM_REGS_R5($17) + ldl $6, KVM_REGS_R6($17) + ldl $7, KVM_REGS_R7($17) + ldl $8, KVM_REGS_R8($17) + ldl $9, KVM_REGS_R9($17) + ldl $10, KVM_REGS_R10($17) + ldl $11, KVM_REGS_R11($17) + ldl $12, KVM_REGS_R12($17) + ldl $13, KVM_REGS_R13($17) + ldl $14, KVM_REGS_R14($17) + ldl $15, KVM_REGS_R15($17) + ldl $19, KVM_REGS_R19($17) + ldl $20, KVM_REGS_R20($17) + ldl $21, KVM_REGS_R21($17) + ldl $22, KVM_REGS_R22($17) + ldl $23, KVM_REGS_R23($17) + ldl $24, KVM_REGS_R24($17) + ldl $25, KVM_REGS_R25($17) + ldl $26, KVM_REGS_R26($17) + ldl $27, KVM_REGS_R27($17) + ldl $28, KVM_REGS_R28($17) + + fldd $f0, KVM_REGS_FPCR($17) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $g_setfpec_0 + subl $2, 0x1, $2 + beq $2, $g_setfpec_1 + subl $2, 0x1, $2 + beq $2, $g_setfpec_2 + setfpec3 + br $g_setfpec_over +$g_setfpec_0: + setfpec0 + br $g_setfpec_over +$g_setfpec_1: + setfpec1 + br $g_setfpec_over +$g_setfpec_2: + setfpec2 +$g_setfpec_over: + ldl $2, KVM_REGS_R2($17) + vldd $f0, KVM_REGS_F0($17) + vldd $f1, KVM_REGS_F1($17) + vldd $f2, KVM_REGS_F2($17) + vldd $f3, KVM_REGS_F3($17) + vldd $f4, KVM_REGS_F4($17) + vldd $f5, KVM_REGS_F5($17) + vldd $f6, KVM_REGS_F6($17) + vldd $f7, KVM_REGS_F7($17) + vldd $f8, KVM_REGS_F8($17) + vldd $f9, KVM_REGS_F9($17) + vldd $f10, KVM_REGS_F10($17) + vldd $f11, KVM_REGS_F11($17) + vldd $f12, KVM_REGS_F12($17) + vldd $f13, KVM_REGS_F13($17) + vldd $f14, KVM_REGS_F14($17) + vldd $f15, KVM_REGS_F15($17) + vldd $f16, KVM_REGS_F16($17) + vldd $f17, KVM_REGS_F17($17) + vldd $f18, KVM_REGS_F18($17) + vldd $f19, KVM_REGS_F19($17) + vldd $f20, KVM_REGS_F20($17) + vldd $f21, KVM_REGS_F21($17) + vldd $f22, KVM_REGS_F22($17) + vldd $f23, KVM_REGS_F23($17) + vldd $f24, KVM_REGS_F24($17) + vldd $f25, KVM_REGS_F25($17) + vldd $f26, KVM_REGS_F26($17) + vldd $f27, KVM_REGS_F27($17) + vldd $f28, KVM_REGS_F28($17) + vldd $f29, KVM_REGS_F29($17) + vldd $f30, KVM_REGS_F30($17) + + ldi $17, KVM_REGS_PS($17) + + /* enter guest */ + /* r16 = guest vcpucb pointer */ + /* r17 = base of guest kvm_regs.ps, saved/restored by hmcode */ + + /* enter guest now */ + sys_call 0x31 + /* exit guest now */ + + ldi $17, -KVM_REGS_PS($17) /* r17: base of kvm_regs */ + + vstd $f0, KVM_REGS_F0($17) + vstd $f1, KVM_REGS_F1($17) + vstd $f2, KVM_REGS_F2($17) + vstd $f3, KVM_REGS_F3($17) + vstd $f4, KVM_REGS_F4($17) + vstd $f5, KVM_REGS_F5($17) + vstd $f6, KVM_REGS_F6($17) + vstd $f7, KVM_REGS_F7($17) + vstd $f8, KVM_REGS_F8($17) + vstd $f9, KVM_REGS_F9($17) + vstd $f10, KVM_REGS_F10($17) + vstd $f11, KVM_REGS_F11($17) + vstd $f12, KVM_REGS_F12($17) + vstd $f13, KVM_REGS_F13($17) + vstd $f14, KVM_REGS_F14($17) + vstd $f15, KVM_REGS_F15($17) + vstd $f16, KVM_REGS_F16($17) + vstd $f17, KVM_REGS_F17($17) + vstd $f18, KVM_REGS_F18($17) + vstd $f19, KVM_REGS_F19($17) + vstd $f20, KVM_REGS_F20($17) + vstd $f21, KVM_REGS_F21($17) + vstd $f22, KVM_REGS_F22($17) + vstd $f23, KVM_REGS_F23($17) + vstd $f24, KVM_REGS_F24($17) + vstd $f25, KVM_REGS_F25($17) + vstd $f26, KVM_REGS_F26($17) + vstd $f27, KVM_REGS_F27($17) + vstd $f28, KVM_REGS_F28($17) + vstd $f29, KVM_REGS_F29($17) + vstd $f30, KVM_REGS_F30($17) + + rfpcr $f0 + fstd $f0, KVM_REGS_FPCR($17) + + /* don't save r0 Hmcode have saved r0 for us */ + stl $1, KVM_REGS_R1($17) + stl $2, KVM_REGS_R2($17) + stl $3, KVM_REGS_R3($17) + stl $4, KVM_REGS_R4($17) + stl $5, KVM_REGS_R5($17) + stl $6, KVM_REGS_R6($17) + stl $7, KVM_REGS_R7($17) + stl $8, KVM_REGS_R8($17) + stl $9, KVM_REGS_R9($17) + stl $10, KVM_REGS_R10($17) + stl $11, KVM_REGS_R11($17) + stl $12, KVM_REGS_R12($17) + stl $13, KVM_REGS_R13($17) + stl $14, KVM_REGS_R14($17) + stl $15, KVM_REGS_R15($17) + stl $19, KVM_REGS_R19($17) + stl $20, KVM_REGS_R20($17) + stl $21, KVM_REGS_R21($17) + stl $22, KVM_REGS_R22($17) + stl $23, KVM_REGS_R23($17) + stl $24, KVM_REGS_R24($17) + stl $25, KVM_REGS_R25($17) + stl $26, KVM_REGS_R26($17) + stl $27, KVM_REGS_R27($17) + stl $28, KVM_REGS_R28($17) + + /* restore host regs from host sp */ + ldl $8, PT_REGS_R8(sp) + ldl $9, PT_REGS_R9(sp) + ldl $10, PT_REGS_R10(sp) + ldl $11, PT_REGS_R11(sp) + ldl $12, PT_REGS_R12(sp) + ldl $13, PT_REGS_R13(sp) + ldl $14, PT_REGS_R14(sp) + ldl $15, PT_REGS_R15(sp) + ldl $26, PT_REGS_R26(sp) + ldi sp, PT_REGS_SIZE(sp) + + /* restore host fpregs */ + fldd $f0, TASK_THREAD_FPCR($8) + wfpcr $f0 + fimovd $f0, $2 + and $2, 0x3, $2 + beq $2, $setfpec_0 + subl $2, 0x1, $2 + beq $2, $setfpec_1 + subl $2, 0x1, $2 + beq $2, $setfpec_2 + setfpec3 + br $setfpec_over +$setfpec_0: + setfpec0 + br $setfpec_over +$setfpec_1: + setfpec1 + br $setfpec_over +$setfpec_2: + setfpec2 +$setfpec_over: + vldd $f2, TASK_THREAD_F2($8) + vldd $f3, TASK_THREAD_F3($8) + vldd $f4, TASK_THREAD_F4($8) + vldd $f5, TASK_THREAD_F5($8) + vldd $f6, TASK_THREAD_F6($8) + vldd $f7, TASK_THREAD_F7($8) + vldd $f8, TASK_THREAD_F8($8) + vldd $f9, TASK_THREAD_F9($8) + + /* if $0 > 0, handle hcall */ + bgt $0, $ret_to + + stl $26, VCPU_RET_RA(sp) + stl $0, VCPU_RET_R0(sp) + + /* Hmcode will setup in */ + /* restore $16 $17 $18, do interrupt trick */ + ldi sp, -(HOST_INT_SIZE + PT_REGS_SIZE)(sp) + ldl $16, HOST_INT_R16(sp) + ldl $17, HOST_INT_R17(sp) + ldl $18, HOST_INT_R18(sp) + ldi sp, (HOST_INT_SIZE + PT_REGS_SIZE)(sp) + + ldi $19, -PT_REGS_SIZE(sp) + call $26, do_entInt + ldl $26, VCPU_RET_RA(sp) + ldl $0, VCPU_RET_R0(sp) +$ret_to: + /* ret($0) indicate hcall number */ + ldi sp, VCPU_RET_SIZE(sp) /* pop stack */ + ret diff --git a/arch/sw_64/kvm/handle_exit.c b/arch/sw_64/kvm/handle_exit.c new file mode 100644 index 0000000000000000000000000000000000000000..69b97860db88885af651aeb1c45896752832c72a --- /dev/null +++ b/arch/sw_64/kvm/handle_exit.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include +#include +#include +#include + +int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + int exception_index, struct hcall_args *hargs) +{ + gfn_t gfn __maybe_unused; + + switch (exception_index) { + case SW64_KVM_EXIT_IO: + vcpu->stat.io_exits++; + return io_mem_abort(vcpu, run, hargs); + case SW64_KVM_MIGRATION_SET_DIRTY_HM: + case SW64_KVM_MIGRATION_SET_DIRTY: + vcpu->stat.migration_set_dirty++; + gfn = hargs->arg2 >> 24; + mutex_lock(&vcpu->kvm->slots_lock); + kvm_vcpu_mark_page_dirty(vcpu, gfn); + mutex_unlock(&vcpu->kvm->slots_lock); + return 1; + case SW64_KVM_EXIT_HALT: + vcpu->stat.halt_exits++; + vcpu->arch.halted = 1; + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_SHUTDOWN: + vcpu->stat.shutdown_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN; + return 0; + case SW64_KVM_EXIT_RESTART: + vcpu->stat.restart_exits++; + vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; + vcpu->run->system_event.type = KVM_SYSTEM_EVENT_RESET; + return 0; + case SW64_KVM_EXIT_STOP: + vcpu->stat.stop_exits++; + vcpu->arch.halted = 1; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + kvm_vcpu_block(vcpu); + return 1; + case SW64_KVM_EXIT_TIMER: + vcpu->stat.timer_exits++; + set_timer(vcpu, hargs->arg0); + return 1; + case SW64_KVM_EXIT_IPI: + vcpu->stat.ipi_exits++; + vcpu_send_ipi(vcpu, hargs->arg0, hargs->arg1); + return 1; + case SW64_KVM_EXIT_DEBUG: + vcpu->stat.debug_exits++; + vcpu->run->exit_reason = KVM_EXIT_DEBUG; + vcpu->run->debug.arch.epc = vcpu->arch.regs.pc; + return 0; +#ifdef CONFIG_KVM_MEMHOTPLUG + case SW64_KVM_EXIT_MEMHOTPLUG: + vcpu->stat.memhotplug_exits++; + vcpu_mem_hotplug(vcpu, hargs->arg0); + return 1; +#endif +#ifdef CONFIG_SUBARCH_C4 + case SW64_KVM_EXIT_APT_FAULT: + return kvm_handle_guest_abort(vcpu, run); +#endif + case SW64_KVM_EXIT_FATAL_ERROR: + vcpu->stat.fatal_error_exits++; + pr_err("Guest fatal error: Reason=[%lx], EXC_PC=[%lx], DVA=[%lx]", hargs->arg0, hargs->arg1, hargs->arg2); + vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; + vcpu->run->hw.hardware_exit_reason = hargs->arg0; + return 0; + } + + return 1; +} diff --git a/arch/sw_64/kvm/irq.h b/arch/sw_64/kvm/irq.h new file mode 100644 index 0000000000000000000000000000000000000000..9268ab6af4920818566e2d39e056be795429d20d --- /dev/null +++ b/arch/sw_64/kvm/irq.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * irq.h: in kernel interrupt controller related definitions + */ + +#ifndef _SW64_KVM_IRQ_H +#define _SW64_KVM_IRQ_H +static inline int irqchip_in_kernel(struct kvm *kvm) +{ + return 1; +} +#endif /* _SW64_KVM_IRQ_H */ diff --git a/arch/sw_64/kvm/kvm_cma.c b/arch/sw_64/kvm/kvm_cma.c new file mode 100644 index 0000000000000000000000000000000000000000..de04eb5d20d746ee436ec40b449dc7385722443f --- /dev/null +++ b/arch/sw_64/kvm/kvm_cma.c @@ -0,0 +1,269 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Contiguous Memory Allocator for KVM + * + * This program is modified on the basis of CMA, to achieve cross-node + * memory reservation, as well as reserved memory information statistics. + */ + +#define pr_fmt(fmt) "kvm_cma: " fmt + +#include +#include +#include +#include +#include +#include + +#include "../../../mm/cma.h" +#include "../../../mm/internal.h" + +struct cma kvm_cma_areas[MAX_CMA_AREAS]; +unsigned int kvm_cma_area_count; + +static void __init init_kvm_cma_reserved_pageblock(struct page *page) +{ + unsigned int i = pageblock_nr_pages; + struct page *p = page; + + do { + __ClearPageReserved(p); + set_page_count(p, 0); + } while (++p, --i); + + set_pageblock_migratetype(page, MIGRATE_ISOLATE); + + if (pageblock_order >= MAX_ORDER) { + i = pageblock_nr_pages; + p = page; + do { + set_page_refcounted(p); + __free_pages(p, MAX_ORDER - 1); + p += MAX_ORDER_NR_PAGES; + } while (i -= MAX_ORDER_NR_PAGES); + } else { + set_page_refcounted(page); + __free_pages(page, pageblock_order); + } + + adjust_managed_page_count(page, pageblock_nr_pages); +} + +static int __init kvm_cma_activate_area(struct cma *cma) +{ + int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long); + unsigned long base_pfn = cma->base_pfn, pfn = base_pfn; + unsigned int i = cma->count >> pageblock_order; + + cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL); + + if (!cma->bitmap) { + cma->count = 0; + return -ENOMEM; + } + + WARN_ON_ONCE(!pfn_valid(pfn)); + + do { + unsigned int j; + + base_pfn = pfn; + + for (j = pageblock_nr_pages; j; --j, pfn++) + WARN_ON_ONCE(!pfn_valid(pfn)); + + init_kvm_cma_reserved_pageblock(pfn_to_page(base_pfn)); + } while (--i); + + spin_lock_init(&cma->lock); + + return 0; +} + +static int __init kvm_cma_init_reserved_areas(void) +{ + int i; + + for (i = 0; i < kvm_cma_area_count; i++) { + int ret = kvm_cma_activate_area(&kvm_cma_areas[i]); + + if (ret) + return ret; + } + + return 0; +} +core_initcall(kvm_cma_init_reserved_areas); + +/** + * kvm_cma_init_reserved_mem() - create custom contiguous area + * from reserved memory + * @base: Base address of the reserved area + * @size: Size of the reserved area (in bytes), + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. If this parameter is NULL, the name of + * the area will be set to "cmaN", where N is a running counter of + * used areas. + * @res_cma: Pointer to store the created cma region. + * + * This function creates custom contiguous area from already reserved memory. + */ +int __init kvm_cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, + unsigned int order_per_bit, const char *name, + struct cma **res_cma) +{ + struct cma *cma; + phys_addr_t alignment; + + /* Sanity checks */ + if (kvm_cma_area_count == ARRAY_SIZE(kvm_cma_areas)) { + pr_err("Not enough slots for CMA reserved regions!\n"); + return -ENOSPC; + } + + if (!size || !memblock_is_region_reserved(base, size)) + return -EINVAL; + + /* ensure minimal alignment required by mm core */ + alignment = PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order); + + /* alignment should be aligned with order_per_bit */ + if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size) + return -EINVAL; + + /* + * Each reserved area must be initialised later, when more kernel + * subsystems (like slab allocator) are available. + */ + cma = &kvm_cma_areas[kvm_cma_area_count]; + + if (name) + snprintf(cma->name, CMA_MAX_NAME, name); + else + snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count); + + cma->base_pfn = PFN_DOWN(base); + cma->count = size >> PAGE_SHIFT; + cma->order_per_bit = order_per_bit; + *res_cma = cma; + kvm_cma_area_count++; + totalcma_pages += (size / PAGE_SIZE); + + return 0; +} + +/** + * kvm_cma_declare_contiguous() - reserve contiguous area for VM + * @base: Base address of the reserved area optional, + * @size: Size of the reserved area (in bytes), + * @limit: End address of the reserved memory (optional, 0 for any). + * @alignment: Alignment for the CMA area, should be power of 2 or zero + * @order_per_bit: Order of pages represented by one bit on bitmap. + * @name: The name of the area. See function cma_init_reserved_mem() + * @res_cma: Pointer to store the created cma region. + * + * This function reserves memory from early allocator. It should be + * called by arch specific code once the early allocator (memblock or bootmem) + * has been activated and all other subsystems have already allocated/reserved + * memory. This function allows to create custom reserved areas. + */ +int __init kvm_cma_declare_contiguous(phys_addr_t base, + phys_addr_t size, phys_addr_t limit, + phys_addr_t alignment, unsigned int order_per_bit, + const char *name, struct cma **res_cma) +{ + phys_addr_t memblock_end = memblock_end_of_DRAM(); + phys_addr_t highmem_start; + int ret = 0; + + /* + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. + */ + highmem_start = __pa(high_memory - 1) + 1; + + if (!size) + return -EINVAL; + + if (alignment && !is_power_of_2(alignment)) + return -EINVAL; + + /* + * Sanitise input arguments. + * Pages both ends in CMA area could be merged into adjacent unmovable + * migratetype page by page allocator's buddy algorithm. In the case, + * you couldn't get a contiguous memory, which is not what we want. + */ + alignment = max(alignment, (phys_addr_t)PAGE_SIZE << + max_t(unsigned long, MAX_ORDER - 1, pageblock_order)); + if (base & (alignment - 1)) { + ret = -EINVAL; + pr_err("Region at %pa must be aligned to %pa bytes\n", + &base, &alignment); + goto err; + } + base = ALIGN(base, alignment); + size = ALIGN(size, alignment); + limit &= ~(alignment - 1); + + if (!base) { + pr_err("Base address of region must be needed!\n"); + goto err; + } + + /* size should be aligned with order_per_bit */ + if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) + return -EINVAL; + + /* + * The request region must not cross the low/high memory boundary. + */ + if (base < highmem_start && base + size > highmem_start) { + ret = -EINVAL; + pr_err("Region at %pa defined on low/high memory boundary (%pa)\n", + &base, &highmem_start); + goto err; + } + + /* + * If the limit is unspecified or above the memblock end, its effective + * value will be the memblock end. Set it explicitly to simplify further + * checks. + */ + if (limit == 0 || limit > memblock_end) + limit = memblock_end; + + if (base + size > limit) { + ret = -EINVAL; + pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n", + &size, &base, &limit); + goto err; + } + + /* Reserve memory */ + if (memblock_is_region_reserved(base, size) || + memblock_reserve(base, size) < 0) { + ret = -EBUSY; + goto err; + } + ret = kvm_cma_init_reserved_mem(base, size, order_per_bit, + name, res_cma); + if (ret) + goto free_mem; + + pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M, + &base); + return 0; + +free_mem: + memblock_free((void *)base, size); +err: + pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M); + return ret; +} diff --git a/arch/sw_64/kvm/kvm_core3.c b/arch/sw_64/kvm/kvm_core3.c new file mode 100644 index 0000000000000000000000000000000000000000..f7e9150d40e0e4a88ac14bb08869302bc97b8e6e --- /dev/null +++ b/arch/sw_64/kvm/kvm_core3.c @@ -0,0 +1,419 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "trace.h" +#include "vmem.c" + +__read_mostly bool bind_vcpu_enabled; + +#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_NUMA) +static int __init bind_vcpu_init(void) +{ + if (!sw64_debugfs_dir) + return -ENODEV; + debugfs_create_bool("bind_vcpu", 0644, + sw64_debugfs_dir, &bind_vcpu_enabled); + return 0; +} + +static void bind_vcpu_exit(void) +{ + bind_vcpu_enabled = false; +} +#else +static int __init bind_vcpu_init(void) +{ + return 0; +} + +static void bind_vcpu_exit(void) { } + +#endif + +static unsigned long longtime_offset; + +#ifdef CONFIG_KVM_MEMHOTPLUG +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base; + + base = virt_to_phys(vcpu->kvm->arch.seg_pgd); + return base | ((vpn & VPN_MASK) << 44); +} +#else +static unsigned long get_vpcr(struct kvm_vcpu *vcpu, u64 vpn) +{ + unsigned long base, size; + + base = vcpu->kvm->arch.host_phys_addr; + size = vcpu->kvm->arch.size; + return (base >> 23) | ((size >> 23) << 16) | ((vpn & VPN_MASK) << 44); +} +#endif + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.vcb.vpcr == 0) { + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); +#ifndef CONFIG_KVM_MEMHOTPLUG + if (unlikely(bind_vcpu_enabled)) { + int nid; + unsigned long end; + + end = vcpu->kvm->arch.host_phys_addr + vcpu->kvm->arch.size; + nid = pfn_to_nid(PHYS_PFN(vcpu->kvm->arch.host_phys_addr)); + if (pfn_to_nid(PHYS_PFN(end)) == nid) + set_cpus_allowed_ptr(vcpu->arch.tsk, cpumask_of_node(nid)); + } +#endif + vcpu->arch.vcb.upcr = 0x7; + } +} + +void kvm_flush_tlb_all(void) +{ + tbia(); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = ((vcpu->arch.vcb.vpcr) & (~(VPN_MASK << 44))) | (vpn << 44); + vcpu->arch.vcb.dtb_vpcr = ((vcpu->arch.vcb.dtb_vpcr) & (~(VPN_MASK << VPN_SHIFT))) | (vpn << VPN_SHIFT); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ +#ifdef CONFIG_KVM_MEMHOTPLUG + unsigned long *seg_pgd; + + if (kvm->arch.seg_pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + seg_pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!seg_pgd) + return -ENOMEM; + + kvm->arch.seg_pgd = seg_pgd; + #endif + return 0; +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + #ifdef CONFIG_KVM_MEMHOTPLUG + void *seg_pgd = NULL; + + if (kvm->arch.seg_pgd) { + seg_pgd = READ_ONCE(kvm->arch.seg_pgd); + kvm->arch.seg_pgd = NULL; + } + + if (seg_pgd) + free_pages_exact(seg_pgd, PAGE_SIZE); + #endif + kvm_destroy_vcpus(kvm); +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +static void setup_segment_table(struct kvm *kvm, + struct kvm_memory_slot *memslot, unsigned long addr, size_t size) +{ + unsigned long *seg_pgd = kvm->arch.seg_pgd; + unsigned long num_of_entry; + unsigned long base_hpa = addr; + unsigned long i; + + num_of_entry = round_up(size, 1 << 30) >> 30; + + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } +} +#endif + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + const struct kvm_memory_slot *old, + struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + unsigned long addr; + struct file *vm_file; + struct vm_area_struct *vma; + struct vmem_info *info; + struct kvm_userspace_memory_region new_mem; + struct kvm_userspace_memory_region *mem = &new_mem; + unsigned long ret; + size_t size; + + mem->flags = new->flags; + mem->guest_phys_addr = ((new->base_gfn) << PAGE_SHIFT); + mem->memory_size = ((new->npages) << PAGE_SHIFT); + mem->userspace_addr = new->userspace_addr; + + if (change == KVM_MR_FLAGS_ONLY || change == KVM_MR_DELETE) + return 0; + + if (test_bit(IO_MARK_BIT, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + + if (test_bit(IO_MARK_BIT + 1, (unsigned long *)(&(mem->guest_phys_addr)))) + return 0; + +#ifndef CONFIG_KVM_MEMHOTPLUG + if (mem->guest_phys_addr) { + pr_info("%s, No KVM MEMHOTPLUG support!\n", __func__); + return 0; + } +#endif + if (!sw64_kvm_pool) + return -ENOMEM; + + pr_info("%s: %#llx %#llx, user addr: %#llx\n", __func__, + mem->guest_phys_addr, mem->memory_size, mem->userspace_addr); + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + vm_file = vma->vm_file; + + if (!vm_file) { + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + + size = round_up(mem->memory_size, 8<<20); + addr = gen_pool_alloc(sw64_kvm_pool, size); + if (!addr) + return -ENOMEM; + vm_munmap(mem->userspace_addr, mem->memory_size); + ret = vm_mmap(vm_file, mem->userspace_addr, mem->memory_size, + PROT_READ | PROT_WRITE, + MAP_SHARED | MAP_FIXED, 0); + if ((long)ret < 0) + return ret; + + vma = find_vma(current->mm, mem->userspace_addr); + if (!vma) + return -ENOMEM; + + info->start = addr; + info->size = size; + vma->vm_private_data = (void *) info; + + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + ret = vmem_vm_insert_page(vma); + if ((int)ret < 0) + return ret; + } else { + info = vm_file->private_data; + addr = info->start; + } + + pr_info("guest phys addr = %#lx, size = %#lx\n", + addr, vma->vm_end - vma->vm_start); + + kvm->arch.host_phys_addr = (u64)addr; + kvm->arch.size = round_up(mem->memory_size, 8<<20); + + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +/* + * kvm_mark_migration write the mark on every vcpucbs of the kvm, which tells + * the system to do migration while the mark is on, and flush all vcpu's tlbs + * at the beginning of the migration. + */ +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.vcb.migration_mark = mark << 2; + + kvm_flush_remote_tlbs(kvm); +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + + + /* If it's the first time dirty logging, flush all vcpu tlbs. */ + if ((change == KVM_MR_FLAGS_ONLY) && (new->flags & KVM_MEM_LOG_DIRTY_PAGES)) + kvm_mark_migration(kvm, 1); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + unsigned long addr = vcpu->kvm->arch.host_phys_addr; + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + if (vcpu->vcpu_id == 0) + memset(__va(addr), 0, 0x2000000); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.vcb.migration_mark) { + unsigned long result = sw64_io_read(0, LONG_TIME) + + vcpu->arch.vcb.guest_longtime_offset; + vcpu->arch.vcb.guest_longtime = result; + vcpu->arch.vcb.guest_irqs_pending = vcpu->arch.irqs_pending[0]; + } + + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + unsigned long result; + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.vcb.migration_mark) { + /* updated vpcr needed by destination vm */ + vcpu->arch.vcb.vpcr = get_vpcr(vcpu, 0); + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) { + result = sw64_io_read(0, LONG_TIME); + vcpu->arch.vcb.guest_longtime_offset = vcpu->arch.vcb.guest_longtime - result; + longtime_offset = vcpu->arch.vcb.guest_longtime_offset; + } else + vcpu->arch.vcb.guest_longtime_offset = longtime_offset; + + set_timer(vcpu, 200000000); + vcpu->arch.vcb.migration_mark = 0; + } + + return 0; +} + +#ifdef CONFIG_KVM_MEMHOTPLUG +void vcpu_mem_hotplug(struct kvm_vcpu *vcpu, unsigned long start_addr) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_memory_slot *slot; + unsigned long start_pfn = start_addr >> PAGE_SHIFT; + + kvm_for_each_memslot(slot, kvm_memslots(kvm)) { + if (start_pfn == slot->base_gfn) { + unsigned long *seg_pgd; + unsigned long num_of_entry = slot->npages >> 17; + unsigned long base_hpa = slot->arch.host_phys_addr; + unsigned long i; + + seg_pgd = kvm->arch.seg_pgd + (start_pfn >> 17); + for (i = 0; i < num_of_entry; i++) { + *seg_pgd = base_hpa + (i << 30); + seg_pgd++; + } + } + } +} +#endif + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, gfn_t gfn_offset, + unsigned long mask) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void update_aptp(unsigned long pgd) +{ +} + +static int __init kvm_core3_init(void) +{ + int i, ret; + + bind_vcpu_init(); + + ret = vmem_init(); + if (unlikely(ret)) + goto out; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (likely(!ret)) + return 0; + + vmem_exit(); +out: + bind_vcpu_exit(); + return ret; +} + +static void __exit kvm_core3_exit(void) +{ + kvm_exit(); + vmem_exit(); + bind_vcpu_exit(); +} + +module_init(kvm_core3_init); +module_exit(kvm_core3_exit); diff --git a/arch/sw_64/kvm/kvm_core4.c b/arch/sw_64/kvm/kvm_core4.c new file mode 100644 index 0000000000000000000000000000000000000000..08d28a365a3b0d13e64c8e703371b5c52456dcba --- /dev/null +++ b/arch/sw_64/kvm/kvm_core4.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include "trace.h" + +static unsigned long shtclock_offset; + +void update_aptp(unsigned long pgd) +{ + imemb(); + write_csr_imb(pgd, CSR_APTP); +} + +void kvm_sw64_update_vpn(struct kvm_vcpu *vcpu, unsigned long vpn) +{ + vcpu->arch.vcb.vpcr = vpn << 44; + vcpu->arch.vcb.dtb_vpcr = vpn; +} + +void kvm_flush_tlb_all(void) +{ + tbivpn(-1, 0, 0); +} + +int kvm_sw64_init_vm(struct kvm *kvm) +{ + return kvm_alloc_addtional_stage_pgd(kvm); +} + +void kvm_sw64_destroy_vm(struct kvm *kvm) +{ + kvm_destroy_vcpus(kvm); +} + +int kvm_sw64_vcpu_reset(struct kvm_vcpu *vcpu) +{ + if (vcpu->arch.has_run_once) + apt_unmap_vm(vcpu->kvm); + + hrtimer_cancel(&vcpu->arch.hrt); + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + vcpu->arch.power_off = 0; + memset(&vcpu->arch.irqs_pending, 0, sizeof(vcpu->arch.irqs_pending)); + + return 0; +} + +long kvm_sw64_get_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + + if (vcpu->arch.migration_mark) + vcpu->arch.shtclock = read_csr(CSR_SHTCLOCK) + + vcpu->arch.vcb.shtclock_offset; + if (copy_to_user((void __user *)arg, &(vcpu->arch.vcb), sizeof(struct vcpucb))) + return -EINVAL; + + return 0; +} + +long kvm_sw64_set_vcb(struct file *filp, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + struct vcpucb *kvm_vcb; + + kvm_vcb = memdup_user((void __user *)arg, sizeof(*kvm_vcb)); + memcpy(&(vcpu->arch.vcb), kvm_vcb, sizeof(struct vcpucb)); + + if (vcpu->arch.migration_mark) { + /* synchronize the longtime of source and destination */ + if (vcpu->arch.vcb.soft_cid == 0) + shtclock_offset = vcpu->arch.shtclock - read_csr(CSR_SHTCLOCK); + vcpu->arch.vcb.shtclock_offset = shtclock_offset; + set_timer(vcpu, 200000000); + vcpu->arch.migration_mark = 0; + } + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + const struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void vcpu_set_numa_affinity(struct kvm_vcpu *vcpu) +{ +} + +static int __init kvm_core4_init(void) +{ + int i, ret; + + for (i = 0; i < NR_CPUS; i++) + last_vpn(i) = VPN_FIRST_VERSION; + + ret = kvm_init(sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + return 0; +} + +static void __exit kvm_core4_exit(void) +{ + kvm_exit(); +} + +module_init(kvm_core4_init); +module_exit(kvm_core4_exit); diff --git a/arch/sw_64/kvm/kvm_timer.c b/arch/sw_64/kvm/kvm_timer.c new file mode 100644 index 0000000000000000000000000000000000000000..895be63cd8d132b316b02388764744d863c36131 --- /dev/null +++ b/arch/sw_64/kvm/kvm_timer.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + */ +#include +#include +#include +#include + +/* + * The Guest Clock. + * + * There are two sources of virtual interrupts. We saw one in lguest_user.c: + * the Launcher sending interrupts for virtual devices. The other is the Guest + * timer interrupt. + * + * The Guest uses the LHCALL_SET_CLOCKEVENT hypercall to tell us how long to + * the next timer interrupt (in ticks). We use the high-resolution timer + * infrastructure to set a callback at that time. + * + * 0 means "turn off the clock". + */ + +void set_timer(struct kvm_vcpu *vcpu, unsigned long delta) +{ + ktime_t expires; + + if (unlikely(delta == 0)) { + /* Clock event device is shutting down. */ + hrtimer_cancel(&vcpu->arch.hrt); + return; + } + + /* Convert clock event device ticks to nanoseconds */ + delta = delta * NSEC_PER_SEC; + do_div(delta, vcpu->arch.vtimer_freq); + + /* + * We use wallclock time here, so the Guest might not be running for + * all the time between now and the timer interrupt it asked for. This + * is almost always the right thing to do. + */ + + expires = ktime_add_ns(ktime_get_real(), delta); + vcpu->arch.timer_next_event = expires; + hrtimer_start(&vcpu->arch.hrt, expires, HRTIMER_MODE_ABS); +} + +/* And this is the routine when we want to set an interrupt for the Guest. */ +void set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq) +{ + /* + * Next time the Guest runs, the core code will see if it can deliver + * this interrupt. + */ + set_bit(irq, (vcpu->arch.irqs_pending)); + + /* + * Make sure it sees it; it might be asleep (eg. halted), or running + * the Guest right now, in which case kick_process() will knock it out. + */ + kvm_vcpu_kick(vcpu); +} + +enum hrtimer_restart clockdev_fn(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + ktime_t now, delta; + + vcpu = container_of(timer, struct kvm_vcpu, arch.hrt); + + now = ktime_get_real(); + + if (now < vcpu->arch.timer_next_event) { + delta = vcpu->arch.timer_next_event - now; + hrtimer_forward_now(timer, delta); + return HRTIMER_RESTART; + } + + set_interrupt(vcpu, SW64_KVM_IRQ_TIMER); + return HRTIMER_NORESTART; +} diff --git a/arch/sw_64/kvm/mmio.c b/arch/sw_64/kvm/mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..21ad89722f9ae12f2246f88625966a718058c250 --- /dev/null +++ b/arch/sw_64/kvm/mmio.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2018 - os kernal + * Author: fire3 yangzh + * linhn + */ +#include +#include +#include +#include + +static unsigned long mmio_read_buf(char *buf, unsigned int len) +{ + unsigned long data = 0; + union { + u16 hword; + u32 word; + u64 dword; + } tmp; + + switch (len) { + case 1: + data = buf[0]; + break; + case 2: + memcpy(&tmp.hword, buf, len); + data = tmp.hword; + break; + case 4: + memcpy(&tmp.word, buf, len); + data = tmp.word; + break; + case 8: + memcpy(&tmp.dword, buf, len); + data = tmp.dword; + break; + } + + return data; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long data; + unsigned int len; + + if (!run->mmio.is_write) { + len = run->mmio.len; + if (len > sizeof(unsigned long)) + return -EINVAL; + + data = mmio_read_buf(run->mmio.data, len); + vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); + } + + vcpu->stat.mmio_exits++; + vcpu->arch.regs.pc += 4; + + return 0; +} + +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct hcall_args *hargs) +{ + int ret; + +#ifdef CONFIG_SUBARCH_C3B + run->mmio.phys_addr = hargs->arg1 & 0xfffffffffffffUL; + sw64_decode(vcpu, hargs->arg2, run); +#elif defined(CONFIG_SUBARCH_C4) + run->mmio.phys_addr = read_csr(CSR_DVA) & 0xfffffffffffffUL; + sw64_decode(vcpu, 0, run); +#endif + if (run->mmio.is_write) + ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + else + ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, + run->mmio.len, run->mmio.data); + + if (!ret) { + /* We handled the access successfully in the kernel. */ + kvm_handle_mmio_return(vcpu, run); + return 1; + } + + run->exit_reason = KVM_EXIT_MMIO; + return 0; +} diff --git a/arch/sw_64/kvm/mmu.c b/arch/sw_64/kvm/mmu.c new file mode 100644 index 0000000000000000000000000000000000000000..b0b492a4fbff22dd58d64872d6b6abb15365aede --- /dev/null +++ b/arch/sw_64/kvm/mmu.c @@ -0,0 +1,1561 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 - os kernal + * Author: lff + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include + +#define KVM_APT_FLAG_LOGGING_ACTIVE (1UL << 1) + +static bool memslot_is_logging(struct kvm_memory_slot *memslot) +{ + return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); +} + +/* + * Return values of kvm_handle_mmio_page_fault and mmu.page_fault: + * RET_AF_RETRY: let CPU fault again on the address. + * RET_AF_EMULATE: mmio page fault, emulate the instruction directly. + * + * For kvm_handle_mmio_page_fault only: + * RET_AF_INVALID: the spte is invalid, let the real page fault path update it. + */ +enum { + RET_AF_RETRY = 0, + RET_AF_EMULATE = 1, + RET_AF_INVALID = 2, +}; + +/** + * apt_dissolve_pmd() - clear and flush huge PMD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pmd: pmd pointer for IPA + * + * Function clears a PMD entry, flushes TLBs. + */ +static void apt_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd) +{ + int i; + + if (!pmd_trans_huge(*pmd)) + return; + + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); +} + +/** + * apt_dissolve_pud() - clear and flush huge PUD entry + * @kvm: pointer to kvm structure. + * @addr: IPA + * @pud: pud pointer for IPA + * + * Function clears a PUD entry, flushes TLBs. + */ +static void apt_dissolve_pud(struct kvm *kvm, phys_addr_t addr, pud_t *pudp) +{ + if (!pud_huge(*pudp)) + return; + + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pudp)); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, + int min, int max) +{ + void *page; + + BUG_ON(max > KVM_NR_MEM_OBJS); + if (cache->nobjs >= min) + return 0; + while (cache->nobjs < max) { + page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + return -ENOMEM; + cache->objects[cache->nobjs++] = page; + } + return 0; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ + while (mc->nobjs) + free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ + mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ + void *p; + + BUG_ON(!mc || !mc->nobjs); + p = mc->objects[--mc->nobjs]; + return p; +} + +static void unmap_apt_ptes(struct kvm *kvm, pmd_t *pmd, + phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte, *start_pte; + struct page *ptr_page; + + start_pte = pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + put_page(virt_to_page(pte)); + } + } while (pte++, addr += PAGE_SIZE, addr != end); + + ptr_page = virt_to_page(start_pte); + if (page_count(ptr_page) == 1) { + pte_t *pte_table = pte_offset_kernel(pmd, 0); + + pmd_clear(pmd); + free_page((unsigned long)pte_table); + put_page(virt_to_page(pmd)); + } +} + +static void unmap_apt_pmds(struct kvm *kvm, pud_t *pud, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pmd_t *pmd, *start_pmd; + struct page *ptr_page; + int i; + + start_pmd = pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (pmd_trans_cont(*pmd)) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pmd)); + } else { + unmap_apt_ptes(kvm, pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pmd); + if (page_count(ptr_page) == 1) { + pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0UL); + + pud_clear(pud); + free_page((unsigned long)pmd_table); + put_page(virt_to_page(pud)); + } +} + +static void unmap_apt_puds(struct kvm *kvm, p4d_t *p4d, + phys_addr_t addr, phys_addr_t end) +{ + phys_addr_t next; + pud_t *pud, *start_pud; + struct page *ptr_page; + + start_pud = pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + pud_clear(pud); + /* Do we need flush tlb???? edited by lff */ + kvm_flush_remote_tlbs(kvm); + put_page(virt_to_page(pud)); + } else { + unmap_apt_pmds(kvm, pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); + + ptr_page = virt_to_page(start_pud); + if (page_count(ptr_page) == 1) { + pud_t *pud_table __maybe_unused = pud_offset(p4d, 0UL); + + p4d_clear(p4d); + kvm_flush_remote_tlbs(kvm); + free_page((unsigned long)pud_table); + put_page(virt_to_page(p4d)); + } +} + +/** + * unmap_apt_range -- Clear addtional page table entries to unmap a range + * @kvm: The VM pointer + * @start: The intermediate physical base address of the range to unmap + * @size: The size of the area to unmap + * + * Clear a range of apt mappings, lowering the various ref-counts. Must + * be called while holding mmu_lock (unless for freeing the apt pgd before + * destroying the VM), otherwise another faulting VCPU may come in and mess + * with things behind our backs. + */ +static void unmap_apt_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t addr = start, end = start + size; + phys_addr_t next; + + assert_spin_locked(&kvm->mmu_lock); + WARN_ON(size & ~PAGE_MASK); + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + do { + /* + * Make sure the page table is still active, as another thread + * could have possibly freed the page table, while we released + * the lock. + */ + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (!p4d_none(*p4d)) + unmap_apt_puds(kvm, p4d, addr, next); + /* + * If the range is too large, release the kvm->mmu_lock + * to prevent starvation and lockup detector warnings. + */ + if (next != end) + cond_resched_lock(&kvm->mmu_lock); + } while (pgd++, addr = next, addr != end); +} + +static void apt_unmap_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + hva_t hva = memslot->userspace_addr; + phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t size = PAGE_SIZE * memslot->npages; + hva_t reg_end = hva + size; + + /* + * A memory region could potentially cover multiple VMAs, and any holes + * between them, so iterate over all of them to find out if we should + * unmap any of them. + * + * +--------------------------------------------+ + * +---------------+----------------+ +----------------+ + * | : VMA 1 | VMA 2 | | VMA 3 : | + * +---------------+----------------+ +----------------+ + * | memory region | + * +--------------------------------------------+ + */ + do { + struct vm_area_struct *vma = find_vma(current->mm, hva); + hva_t vm_start, vm_end; + + if (!vma || vma->vm_start >= reg_end) + break; + + /* + * Take the intersection of this VMA with the memory region + */ + vm_start = max(hva, vma->vm_start); + vm_end = min(reg_end, vma->vm_end); + + if (!(vma->vm_flags & VM_PFNMAP)) { + gpa_t gpa = addr + (vm_start - memslot->userspace_addr); + + unmap_apt_range(kvm, gpa, vm_end - vm_start); + } + hva = vm_end; + } while (hva < reg_end); +} + +/** + * apt_unmap_vm - Unmap Additional Stage RAM mappings + * @kvm: The struct kvm pointer + * + * Go through the memregions and unmap any reguler RAM + * backing memory already mapped to the VM. + */ +void apt_unmap_vm(struct kvm *kvm) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int idx; + + idx = srcu_read_lock(&kvm->srcu); + down_read(¤t->mm->mmap_lock); + spin_lock(&kvm->mmu_lock); + + slots = kvm_memslots(kvm); + kvm_for_each_memslot(memslot, slots) + apt_unmap_memslot(kvm, memslot); + spin_unlock(&kvm->mmu_lock); + up_read(¤t->mm->mmap_lock); + srcu_read_unlock(&kvm->srcu, idx); +} + +static pud_t *apt_get_pud(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr) +{ + p4d_t *p4d; + pud_t *pud; + + pgd += pgd_index(addr); + if (pgd_none(*pgd)) { + /* Not used on SW64 yet */ + VM_BUG_ON(pgd); + return NULL; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + if (!cache) + return NULL; + pud = mmu_memory_cache_alloc(cache); + p4d_populate(NULL, p4d, pud); + get_page(virt_to_page(p4d)); + } + return pud_offset(p4d, addr); +} + +static pmd_t *apt_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, unsigned long sz) +{ + pud_t *pud; + pmd_t *pmd; + + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud || pud_huge(*pud)) + return NULL; + + if (pud_none(*pud)) { + if (!cache) + return NULL; + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + return pmd_offset(pud, addr); +} + +static bool kvm_is_write_fault(unsigned long access_type) +{ + if (access_type == AF_WRITE_ACCESS_TYPE) + return true; + + return false; +} + +static bool kvm_is_exec_fault(unsigned long access_type) +{ + if (access_type == AF_EXEC_ACCESS_TYPE) + return true; + + return false; +} +/** + * apt_wp_ptes - write protect PMD range + * @pmd: pointer to pmd entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + if (!pte_none(*pte)) { + if (!kvm_aptpte_readonly(pte)) + kvm_set_aptpte_readonly(pte); + } + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +/** + * apt_wp_pmds - write protect PUD range + * @pud: pointer to pud entry + * @addr: range start address + * @end: range end address + */ +static void apt_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end) +{ + pmd_t *pmd; + phys_addr_t next; + + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + if (!pmd_none(*pmd)) { + if (pmd_trans_huge(*pmd)) { + if (!kvm_aptpmd_readonly(pmd)) + kvm_set_aptpmd_readonly(pmd); + } else { + apt_wp_ptes(pmd, addr, next); + } + } + } while (pmd++, addr = next, addr != end); +} + +/** + * apt_wp_puds - write protect PGD range + * @pgd: pointer to pgd entry + * @addr: range start address + * @end: range end address + * + * Process PUD entries, for a huge PUD we cause a panic. + */ +static void apt_wp_puds(p4d_t *p4d, phys_addr_t addr, phys_addr_t end) +{ + pud_t *pud; + phys_addr_t next; + + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (!pud_none(*pud)) { + if (pud_huge(*pud)) { + if (!kvm_aptpud_readonly(pud)) + kvm_set_aptpud_readonly(pud); + } else { + /* TODO:PUD not supported, revisit later if supported */ +// BUG_ON(pud_trans_huge(*pud)); + apt_wp_pmds(pud, addr, next); + } + } + } while (pud++, addr = next, addr != end); +} + +/** + * apt_wp_range() - write protect apt memory region range + * @kvm: The KVM pointer + * @addr: Start address of range + * @end: End address of range + */ +static void apt_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end) +{ + pgd_t *pgd; + p4d_t *p4d; + phys_addr_t next; + + pgd = kvm->arch.pgd + pgd_index(addr); + p4d = p4d_offset(pgd, addr); + + do { + /* + * Release kvm_mmu_lock periodically if the memory region is + * large. Otherwise, we may see kernel panics with + * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR, + * CONFIG_LOCKDEP. Additionally, holding the lock too long + * will also starve other vCPUs. We have to also make sure + * that the page tables are not freed while we released + * the lock. + */ + cond_resched_lock(&kvm->mmu_lock); + if (!READ_ONCE(kvm->arch.pgd)) + break; + next = p4d_addr_end(addr, end); + if (p4d_present(*p4d)) + apt_wp_puds(p4d, addr, next); + } while (p4d++, addr = next, addr != end); +} + +/** + * kvm_mmu_wp_memory_region() - write protect apt entries for memory slot + * @kvm: The KVM pointer + * @slot: The memory slot to write protect + * + * Called to start logging dirty pages after memory region + * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns + * all present PMD and PTEs are write protected in the memory region. + * Afterwards read of dirty page log can be called. + * + * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, + * serializing operations for VM memory regions. + */ +void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot = id_to_memslot(slots, slot); + phys_addr_t start = memslot->base_gfn << PAGE_SHIFT; + phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); + apt_wp_range(kvm, start, end); + spin_unlock(&kvm->mmu_lock); + kvm_flush_remote_tlbs(kvm); // 需要通知其他vcpu进行tlb刷新,利用request机制 +} + +void kvm_mark_migration(struct kvm *kvm, int mark) +{ + struct kvm_vcpu *vcpu; + unsigned long cpu; + + kvm_for_each_vcpu(cpu, vcpu, kvm) + vcpu->arch.migration_mark = mark; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_memory_slot *old, + const struct kvm_memory_slot *new, + enum kvm_mr_change change) +{ + /* + * At this point memslot has been committed and there is an + * allocated dirty_bitmap[], dirty pages will be tracked while the + * memory slot is write protected. + */ + if (change == KVM_MR_FLAGS_ONLY && (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && + new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { + kvm_mark_migration(kvm, 1); + kvm_mmu_wp_memory_region(kvm, new->id); + } + /* If dirty logging has been stopped, do nothing for now. */ + if ((change != KVM_MR_DELETE) + && (old->flags & KVM_MEM_LOG_DIRTY_PAGES) + && (!(new->flags & KVM_MEM_LOG_DIRTY_PAGES))) { + kvm_mark_migration(kvm, 0); + return; + } +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ + gpa_t gpa = slot->base_gfn << PAGE_SHIFT; + phys_addr_t size = slot->npages << PAGE_SHIFT; + + spin_lock(&kvm->mmu_lock); +// flush_apt_tlbs(kvm); + unmap_apt_range(kvm, gpa, size); + spin_unlock(&kvm->mmu_lock); +} + +/** + * kvm_alloc_addtional_stage_pgd - allocate level-1 table for addtional stage translation. + * @kvm: The KVM struct pointer for the VM. + * + * Allocates only the addtional stage HW PGD level table(s) (can support full + * 48-bit input addresses). Clears the allocated pages. + * + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_alloc_addtional_stage_pgd(struct kvm *kvm) +{ + pgd_t *pgd; + + if (kvm->arch.pgd != NULL) { + kvm_err("kvm_arch already initialized?\n"); + return -EINVAL; + } + + /* Allocate the HW PGD, making sure that each page gets its own refcount */ + pgd = alloc_pages_exact(PAGE_SIZE, GFP_KERNEL | __GFP_ZERO); + if (!pgd) + return -ENOMEM; + + kvm->arch.pgd = pgd; + return 0; +} + +/** + * kvm_free_apt_pgd - free all apt tables + * @kvm: The KVM struct pointer for the VM. + * + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. + */ +void kvm_free_apt_pgd(struct kvm *kvm) +{ + void *pgd = NULL; + + spin_lock(&kvm->mmu_lock); + if (kvm->arch.pgd) { + unmap_apt_range(kvm, 0, KVM_PHYS_SIZE); + pgd = READ_ONCE(kvm->arch.pgd); + kvm->arch.pgd = NULL; + } + spin_unlock(&kvm->mmu_lock); + + /* Free the HW pgd, one page at a time */ + if (pgd) + free_pages_exact(pgd, PAGE_SIZE); +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ + kvm_free_apt_pgd(kvm); +} + +static void kvm_send_hwpoison_signal(unsigned long address, + struct vm_area_struct *vma) +{ + kernel_siginfo_t info; + + clear_siginfo(&info); + info.si_signo = SIGBUS; + info.si_errno = 0; + info.si_code = BUS_MCEERR_AR; + info.si_addr = (void __user *)address; + + if (is_vm_hugetlb_page(vma)) + info.si_addr_lsb = huge_page_shift(hstate_vma(vma)); + else + info.si_addr_lsb = PAGE_SHIFT; + + send_sig_info(SIGBUS, &info, current); +} + +static bool fault_supports_apt_huge_mapping(struct kvm_memory_slot *memslot, + unsigned long hva, + unsigned long map_size) +{ + gpa_t gpa_start; + hva_t uaddr_start, uaddr_end; + size_t size; + + /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ + if (map_size == PAGE_SIZE) + return true; + + size = memslot->npages * PAGE_SIZE; + + gpa_start = memslot->base_gfn << PAGE_SHIFT; + + uaddr_start = memslot->userspace_addr; + uaddr_end = uaddr_start + size; + + /* + * Pages belonging to memslots that don't have the same alignment + * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 + * PMD/PUD entries, because we'll end up mapping the wrong pages. + * + * Consider a layout like the following: + * + * memslot->userspace_addr: + * +-----+--------------------+--------------------+---+ + * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| + * +-----+--------------------+--------------------+---+ + * + * memslot->base_gfn << PAGE_SHIFT: + * +---+--------------------+--------------------+-----+ + * |abc|def Stage-2 block | Stage-2 block |tvxyz| + * +---+--------------------+--------------------+-----+ + * + * If we create those stage-2 blocks, we'll end up with this incorrect + * mapping: + * d -> f + * e -> g + * f -> h + */ + if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) + return false; + + /* + * Next, let's make sure we're not trying to map anything not covered + * by the memslot. This means we have to prohibit block size mappings + * for the beginning and end of a non-block aligned and non-block sized + * memory slot (illustrated by the head and tail parts of the + * userspace view above containing pages 'abcde' and 'xyz', + * respectively). + * + * Note that it doesn't matter if we do the check using the + * userspace_addr or the base_gfn, as both are equally aligned (per + * the check above) and equally sized. + */ + return (hva & ~(map_size - 1)) >= uaddr_start && + (hva & ~(map_size - 1)) + map_size <= uaddr_end; +} + +/* + * apt_get_leaf_entry - walk the stage2 VM page tables and return + * true if a valid and present leaf-entry is found. A pointer to the + * leaf-entry is returned in the appropriate level variable - pudpp, + * pmdpp, ptepp. + */ +static bool apt_get_leaf_entry(struct kvm *kvm, phys_addr_t addr, + pud_t **pudpp, pmd_t **pmdpp, pte_t **ptepp) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + + *pudpp = NULL; + *pmdpp = NULL; + *ptepp = NULL; + + pudp = apt_get_pud(kvm->arch.pgd, NULL, addr); + if (!pudp || pud_none(*pudp) || !pud_present(*pudp)) + return false; + + if (pud_huge(*pudp)) { + *pudpp = pudp; + return true; + } + + pmdp = pmd_offset(pudp, addr); + if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp)) + return false; + + if (pmd_trans_huge(*pmdp)) { + *pmdpp = pmdp; + return true; + } + + ptep = pte_offset_kernel(pmdp, addr); + if (!ptep || pte_none(*ptep) || !pte_present(*ptep)) + return false; + + *ptepp = ptep; + return true; +} + +static bool apt_is_exec(struct kvm *kvm, phys_addr_t addr) +{ + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep; + bool found; + + found = apt_get_leaf_entry(kvm, addr, &pudp, &pmdp, &ptep); + if (!found) + return false; + + if (pudp) + return kvm_pud_exec(pudp); + else if (pmdp) + return kvm_pmd_exec(pmdp); + else + return kvm_pte_exec(ptep); +} + +static int apt_set_pte_fast(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + int inv_level = ((read_csr(CSR_AS_INFO)) >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + unsigned long inv_hpa = read_csr(CSR_AS_INFO) & AF_ENTRY_ADDR_MASK; + + VM_BUG_ON(logging_active && !cache); + + if (inv_level == 1) { + pud = (pud_t *)(inv_hpa | PAGE_OFFSET); + goto find_pud; + } else if (inv_level == 2) { + pmd = (pmd_t *)(inv_hpa | PAGE_OFFSET); + goto find_pmd; + } else if (inv_level == 3) { + pte = (pte_t *)(inv_hpa | PAGE_OFFSET); + goto find_pte; + } + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + +find_pud: + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + +find_pmd: + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + +find_pte: + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + +static int apt_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pte_t *new_pte, + unsigned long flags) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte, old_pte; + bool logging_active = flags & KVM_APT_FLAG_LOGGING_ACTIVE; + + VM_BUG_ON(logging_active && !cache); + + /* Create addtional page table mapping - Levels 0 and 1 */ + pud = apt_get_pud(kvm->arch.pgd, cache, addr); + if (!pud) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PUD, then continue + * on to allocate page. + */ + if (logging_active) + apt_dissolve_pud(kvm, addr, pud); + + if (pud_none(*pud)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pmd = mmu_memory_cache_alloc(cache); + pud_populate(NULL, pud, pmd); + get_page(virt_to_page(pud)); + } + + pmd = pmd_offset(pud, addr); + if (!pmd) { + /* + * Ignore calls from kvm_set_spte_hva for unallocated + * address ranges. + */ + return 0; + } + + /* + * While dirty page logging - dissolve huge PMD, then continue on to + * allocate page. + */ + if (logging_active) + apt_dissolve_pmd(kvm, addr, pmd); + + /* Create stage-2 page mappings - Level 2 */ + if (pmd_none(*pmd)) { + if (!cache) + return 0; /* ignore calls from kvm_set_spte_hva */ + pte = mmu_memory_cache_alloc(cache); + pmd_populate_kernel(NULL, pmd, pte); + get_page(virt_to_page(pmd)); + } + + pte = pte_offset_kernel(pmd, addr); + + /* Create 2nd stage page table mapping - Level 3 */ + old_pte = *pte; + + /* new pte should be readonly? */ +// *new_pte = pte_wrprotect(*new_pte); + + if (pte_present(old_pte)) { + /* Skip page table update if there is no change */ + if (pte_val(old_pte) == pte_val(*new_pte)) + return 0; + + /* Do we need WRITE_ONCE(pte, 0)? */ + set_pte(pte, __pte(0)); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pte)); + } + + /* Do we need WRITE_ONCE(pte, new_pte)? */ + set_pte(pte, *new_pte); + return 0; +} + + + +static int apt_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache + *cache, phys_addr_t addr, const pmd_t *new_pmd, unsigned long sz) +{ + pmd_t *pmd, old_pmd, *ori_pmd; + int i; +retry: + pmd = apt_get_pmd(kvm, cache, addr, sz); + VM_BUG_ON(!pmd); + ori_pmd = pmd; + old_pmd = *pmd; + if (pmd_present(old_pmd)) { + /* + * If we already have PTE level mapping for this block, + * we must unmap it to avoid inconsistent TLB state and + * leaking the table page. We could end up in this situation + * if the memory slot was marked for dirty logging and was + * reverted, leaving PTE level mappings for the pages accessed + * during the period. So, unmap the PTE level mapping for this + * block and retry, as we could have released the upper level + * table in the process. + * + * Normal THP split/merge follows mmu_notifier callbacks and do + * get handled accordingly. + */ + if (!pmd_trans_huge(old_pmd)) { + unmap_apt_range(kvm, addr & PMD_MASK, PMD_SIZE); + goto retry; + } + /* + * Multiple vcpus faulting on the same PMD entry, can + * lead to them sequentially updating the PMD with the + * same value. Following the break-before-make + * (pmd_clear() followed by tlb_flush()) process can + * hinder forward progress due to refaults generated + * on missing translations. + * + * Skip updating the page table if the entry is + * unchanged. + */ + if (pmd_val(old_pmd) == pmd_val(*new_pmd)) + return 0; + + /* + * Mapping in huge pages should only happen through a + * fault. If a page is merged into a transparent huge + * page, the individual subpages of that huge page + * should be unmapped through MMU notifiers before we + * get here. + * + * Merging of CompoundPages is not supported; they + * should become splitting first, unmapped, merged, + * and mapped back in on-demand. + */ + VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd)); + + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, pmd++) + pmd_clear(pmd); + } else + pmd_clear(pmd); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pmd)); + } + + /* Do we need WRITE_ONCE(pmd, new_pmd)? */ + if (sz == CONT_PMD_SIZE) { + for (i = 0; i < CONT_PMDS; i++, ori_pmd++) + set_pmd(ori_pmd, *new_pmd); + } else + set_pmd(pmd, *new_pmd); + return 0; +} + +static int apt_set_pud_huge(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, + phys_addr_t addr, const pud_t *new_pudp) +{ + pud_t *pudp, old_pud; + +retry: + pudp = apt_get_pud(kvm->arch.pgd, cache, addr); + VM_BUG_ON(!pudp); + + old_pud = *pudp; + + /* + * A large number of vcpus faulting on the same stage 2 entry, + * can lead to a refault due to the stage2_pud_clear()/tlb_flush(). + * Skip updating the page tables if there is no change. + */ + if (pud_val(old_pud) == pud_val(*new_pudp)) + return 0; + + if (pud_present(old_pud)) { + /* + * If we already have table level mapping for this block, unmap + * the range for this block and retry. + */ + if (!pud_huge(old_pud)) { + unmap_apt_range(kvm, addr & PUD_MASK, PUD_SIZE); + goto retry; + } + +// WARN_ON_ONCE(kvm_pud_pfn(old_pud) != kvm_pud_pfn(*new_pudp)); + pud_clear(pudp); + kvm_flush_remote_tlbs(kvm); + } else { + get_page(virt_to_page(pudp)); + } + + set_pud(pudp, *new_pudp); + return 0; +} + +static unsigned long +transparent_hugepage_adjust(struct kvm_memory_slot *memslot, + unsigned long hva, kvm_pfn_t *pfnp, + phys_addr_t *gpap) +{ + kvm_pfn_t pfn = *pfnp; + struct page *page = pfn_to_page(pfn); + + /* + * Make sure the adjustment is done only for THP pages. Also make + * sure that the HVA and IPA are sufficiently aligned and that the + * block map is contained within the memslot. + */ + if (!PageHuge(page) && PageTransCompoundMap(page) && + fault_supports_apt_huge_mapping(memslot, hva, PMD_SIZE)) { + /* + * The address we faulted on is backed by a transparent huge + * page. However, because we map the compound huge page and + * not the individual tail page, we need to transfer the + * refcount to the head page. We have to be careful that the + * THP doesn't start to split while we are adjusting the + * refcounts. + * + * We are sure this doesn't happen, because mmu_notifier_retry + * was successful and we are holding the mmu_lock, so if this + * THP is trying to split, it will be blocked in the mmu + * notifier before touching any of the pages, specifically + * before being able to call __split_huge_page_refcount(). + * + * We can therefore safely transfer the refcount from PG_tail + * to PG_head and switch the pfn from a tail page to the head + * page accordingly. + */ + *gpap &= PMD_MASK; + kvm_release_pfn_clean(pfn); + pfn &= ~(PTRS_PER_PMD - 1); + kvm_get_pfn(pfn); + *pfnp = pfn; + return PMD_SIZE; + } + + return PAGE_SIZE; +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_gpa, + struct kvm_memory_slot *memslot, unsigned long hva, + unsigned long fault_status) +{ + int ret; + bool write_fault, exec_fault, writable, force_pte = false; + unsigned long mmu_seq; + gfn_t gfn = fault_gpa >> PAGE_SHIFT; + struct kvm *kvm = vcpu->kvm; + struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + struct vm_area_struct *vma; + kvm_pfn_t pfn; + pgprot_t mem_type = PAGE_READONLY; + bool logging_active = memslot_is_logging(memslot); + unsigned long vma_pagesize, flags = 0; + unsigned long as_info, access_type; + unsigned int vma_shift; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + write_fault = kvm_is_write_fault(access_type); + exec_fault = kvm_is_exec_fault(access_type); + VM_BUG_ON(write_fault && exec_fault); + + if (fault_status == AF_STATUS_FOR) { + kvm_err("Unexpected APT read permission error\n"); + return -EFAULT; + } + + /* Let's check if we will get back a huge page backed by hugetlbfs */ + down_read(¤t->mm->mmap_lock); + vma = find_vma_intersection(current->mm, hva, hva + 1); + if (unlikely(!vma)) { + kvm_err("Failed to find VMA for hva 0x%lx\n", hva); + up_read(¤t->mm->mmap_lock); + return -EFAULT; + } + + if (is_vm_hugetlb_page(vma)) + vma_shift = huge_page_shift(hstate_vma(vma)); + else + vma_shift = PAGE_SHIFT; + + vma_pagesize = 1ULL << vma_shift; + if (logging_active || (vma->vm_flags & VM_PFNMAP) || + !fault_supports_apt_huge_mapping(memslot, hva, vma_pagesize)) { + force_pte = true; + vma_pagesize = PAGE_SIZE; + } + + if (vma_pagesize == PMD_SIZE || vma_pagesize == CONT_PMD_SIZE || vma_pagesize == PUD_SIZE) + gfn = (fault_gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT; + up_read(¤t->mm->mmap_lock); + /* We need minimum second+third level pages */ + ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES, + KVM_NR_MEM_OBJS); + if (ret) + return ret; + + mmu_seq = vcpu->kvm->mmu_notifier_seq; + /* + * Ensure the read of mmu_notifier_seq happens before we call + * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk + * the page we just got a reference to gets unmapped before we have a + * chance to grab the mmu_lock, which ensure that if the page gets + * unmapped afterwards, the call to kvm_unmap_hva will take it away + * from us again properly. This smp_rmb() interacts with the smp_wmb() + * in kvm_mmu_notifier_invalidate_. + */ + smp_rmb(); + + pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); + if (pfn == KVM_PFN_ERR_HWPOISON) { + kvm_send_hwpoison_signal(hva, vma); + return 0; + } + if (is_error_noslot_pfn(pfn)) + return -EFAULT; + + if (logging_active) { + /* + * Faults on pages in a memslot with logging enabled + * should not be mapped with huge pages (it introduces churn + * and performance degradation), so force a pte mapping. + */ + flags |= KVM_APT_FLAG_LOGGING_ACTIVE; + + /* + * Only actually map the page as writable if this was a write + * fault. + */ + if (!write_fault) + writable = false; + } + + spin_lock(&kvm->mmu_lock); + if (mmu_notifier_retry(kvm, mmu_seq)) + goto out_unlock; + + /* + * If we are not forced to use page mapping, check if we are + * backed by a THP and thus use block mapping if possible. + */ + if (vma_pagesize == PAGE_SIZE && !force_pte) { + vma_pagesize = transparent_hugepage_adjust(memslot, hva, + &pfn, &fault_gpa); + } + + if (vma_pagesize == PUD_SIZE) { + pud_t new_pud = pfn_pud(pfn, mem_type); + + new_pud = pud_mkhuge(new_pud); + + if (writable) { + new_pud = kvm_pud_mkwrite(new_pud); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pud = kvm_pud_mkexec(new_pud); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pud = kvm_pud_mkexec(new_pud); + } + + ret = apt_set_pud_huge(kvm, memcache, fault_gpa, &new_pud); + } else if (vma_pagesize == CONT_PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + new_pmd = pmd_mkcont(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else if (vma_pagesize == PMD_SIZE) { + pmd_t new_pmd = pfn_pmd(pfn, mem_type); + + new_pmd = pmd_mkhuge(new_pmd); + + if (writable) { + new_pmd = kvm_pmd_mkwrite(new_pmd); + kvm_set_pfn_dirty(pfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pmd = kvm_pmd_mkexec(new_pmd); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pmd = kvm_pmd_mkexec(new_pmd); + } + + ret = apt_set_pmd_huge(kvm, memcache, fault_gpa, &new_pmd, vma_pagesize); + } else { + pte_t new_pte = pfn_pte(pfn, mem_type); + + if (writable) { + new_pte = kvm_pte_mkwrite(new_pte); + kvm_set_pfn_dirty(pfn); + mark_page_dirty(kvm, gfn); + } + + if (exec_fault && fault_status == AF_STATUS_INV) { + new_pte = kvm_pte_mkexec(new_pte); + } else if (fault_status == AF_STATUS_FOE) { + /* Preserve execute if FOE was already cleared */ + if (apt_is_exec(kvm, fault_gpa)) + new_pte = kvm_pte_mkexec(new_pte); + } + + ret = apt_set_pte_fast(kvm, memcache, fault_gpa, &new_pte, flags); + if (!ret) + goto out_unlock; + } + +out_unlock: + spin_unlock(&kvm->mmu_lock); + kvm_set_pfn_accessed(pfn); + kvm_release_pfn_clean(pfn); + return ret; +} + +/** + * kvm_handle_guest_abort - handles all 2nd stage aborts + * @vcpu: the VCPU pointer + * @run: the kvm_run structure + * + * Any abort that gets to the host is almost guaranteed to be caused by a + * missing second stage translation table entry, which can mean that either the + * guest simply needs more memory and we must allocate an appropriate page or it + * can mean that the guest tried to access I/O memory, which is emulated by user + * space. The distinction is based on the IPA causing the fault and whether this + * memory region has been registered as standard RAM by user space. + */ +#ifdef CONFIG_SUBARCH_C4 +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long as_info; /* the value of CSR: AS_INFO */ + unsigned int access_type, inv_level; + unsigned int fault_status; + unsigned long fault_entry_addr; + phys_addr_t fault_gpa; + struct kvm_memory_slot *memslot; + unsigned long hva; + bool write_fault, writable; + gfn_t gfn; + + int ret, idx; + + as_info = read_csr(CSR_AS_INFO); + access_type = (as_info >> AF_ACCESS_TYPE_SHIFT) & AF_ACCESS_TYPE_MASK; + inv_level = (as_info >> AF_INV_LEVEL_SHIFT) & AF_INV_LEVEL_MASK; + fault_status = (as_info >> AF_FAULT_STATUS_SHIFT) & AF_FAULT_STATUS_MASK; + fault_entry_addr = (as_info & AF_ENTRY_ADDR_MASK) >> 3; + + fault_gpa = read_csr(CSR_EXC_GPA); + idx = srcu_read_lock(&vcpu->kvm->srcu); + + gfn = fault_gpa >> PAGE_SHIFT; + memslot = gfn_to_memslot(vcpu->kvm, gfn); + hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); + + write_fault = kvm_is_write_fault(access_type); + + /* The memory slot for IO doesn't register in memory region + * with kvm, if hva == KVM_HVA_ERR_BAD, the gpa used for MMIO + * needs emulation. + */ + + if (hva == KVM_HVA_ERR_BAD) { + ret = io_mem_abort(vcpu, run, NULL); + goto out_unlock; + } + /* Userspace should not be able to register out-of-bounds IPAs */ + VM_BUG_ON(fault_gpa >= KVM_PHYS_SIZE); + + ret = user_mem_abort(vcpu, fault_gpa, memslot, hva, fault_status); + if (ret == 0) + ret = 1; +out_unlock: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; +} +#endif +static int handle_hva_to_gpa(struct kvm *kvm, unsigned long start, unsigned long end, + int (*handler)(struct kvm *kvm, gpa_t gpa, u64 size, void *data), + void *data) +{ + struct kvm_memslots *slots; + struct kvm_memory_slot *memslot; + int ret = 0; + + slots = kvm_memslots(kvm); + + /* we only care about the pages that the guest sees */ + kvm_for_each_memslot(memslot, slots) { + unsigned long hva_start, hva_end; + gfn_t gpa; + + hva_start = max(start, memslot->userspace_addr); + hva_end = min(end, memslot->userspace_addr + + (memslot->npages << PAGE_SHIFT)); + if (hva_start >= hva_end) + continue; + + gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT; + ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data); + } + + return ret; +} + +static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + unmap_apt_range(kvm, gpa, size); + return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, + unsigned long start, unsigned long end, bool blockable) +{ + if (!kvm->arch.pgd) + return 0; + + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); + return 1; +} + +static int apt_ptep_test_and_clear_young(pte_t *pte) +{ + if (pte_young(*pte)) { + *pte = pte_mkold(*pte); + return 1; + } + return 0; +} + +static int apt_pmdp_test_and_clear_young(pmd_t *pmd) +{ + return apt_ptep_test_and_clear_young((pte_t *)pmd); +} + +static int apt_pudp_test_and_clear_young(pud_t *pud) +{ + return apt_ptep_test_and_clear_young((pte_t *)pud); +} + +static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + WARN_ON(size != PAGE_SIZE && size != PMD_SIZE && size != PUD_SIZE); + if (!apt_get_leaf_entry(kvm, gpa, &pud, &pmd, &pte)) + return 0; + + if (pud) + return apt_pudp_test_and_clear_young(pud); + else if (pmd) + return apt_pmdp_test_and_clear_young(pmd); + else + return apt_ptep_test_and_clear_young(pte); +} + +int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) +{ + if (!kvm->arch.pgd) + return 0; + + return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); +} + +int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ + if (!kvm->arch.pgd) + return 0; + return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); +} + +static int kvm_set_apte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data) +{ + pte_t *pte = (pte_t *)data; + + WARN_ON(size != PAGE_SIZE); + + apt_set_pte(kvm, NULL, gpa, pte, 0); + return 0; +} + +int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ + unsigned long end = hva + PAGE_SIZE; + pte_t apt_pte; + + if (!kvm->arch.pgd) + return 0; + + apt_pte = pte_wrprotect(pte); + handle_hva_to_gpa(kvm, hva, end, &kvm_set_apte_handler, &apt_pte); + return 0; +} + +/** + * kvm_mmu_write_protect_pt_masked() - write protect dirty pages + * @kvm: The KVM pointer + * @slot: The memory slot associated with mask + * @gfn_offset: The gfn offset in memory slot + * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory + * slot to be write protected + * + * Walks bits set in mask write protects the associated pte's. Caller must + * acquire kvm_mmu_lock. + */ +static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + phys_addr_t base_gfn = slot->base_gfn + gfn_offset; + phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; + phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; + + apt_wp_range(kvm, start, end); +} + +/* + * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected + * dirty pages. + * + * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to + * enable dirty logging for them. + */ +void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, + struct kvm_memory_slot *slot, + gfn_t gfn_offset, unsigned long mask) +{ + kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask); +} diff --git a/arch/sw_64/kvm/perf.c b/arch/sw_64/kvm/perf.c new file mode 100644 index 0000000000000000000000000000000000000000..730dd1feeccf83e4de96ea93608b7a7752ef5ebc --- /dev/null +++ b/arch/sw_64/kvm/perf.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Performance events support for KVM. + */ + +#include +#include + +#include + + +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) +{ + return (vcpu->arch.regs.ps & 8) != 0; +} + +unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.regs.pc; +} + + + +static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu) +{ + return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu; +} diff --git a/arch/sw_64/kvm/sw64.c b/arch/sw_64/kvm/sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..f6bfb2452938c0e573c222e23b7ea04a2a4e2ab6 --- /dev/null +++ b/arch/sw_64/kvm/sw64.c @@ -0,0 +1,592 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CREATE_TRACE_POINTS +#include "trace.h" +#include "irq.h" + +bool set_msi_flag; + + + +static unsigned long get_new_vpn_context(struct kvm_vcpu *vcpu, long cpu) +{ + unsigned long vpn = last_vpn(cpu); + unsigned long next = vpn + 1; + + if ((vpn & VPN_MASK) >= VPN_MASK) { + kvm_flush_tlb_all(); + next = (vpn & ~VPN_MASK) + VPN_FIRST_VERSION + 1; /* bypass 0 */ + } + last_vpn(cpu) = next; + return next; +} + +int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) +{ + set_bit(number, (vcpu->arch.irqs_pending)); + kvm_vcpu_kick(vcpu); + return 0; +} + +int kvm_arch_check_processor_compat(void *opaque) +{ + return 0; +} + +int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm, int irq_source_id, + int level, bool line_status) +{ + unsigned int vcid; + unsigned int vcpu_idx; + struct kvm_vcpu *vcpu = NULL; + int irq = e->msi.data & 0xff; + + vcid = (e->msi.address_lo & VT_MSIX_ADDR_DEST_ID_MASK) >> VT_MSIX_ADDR_DEST_ID_SHIFT; + vcpu_idx = vcid & 0x1f; + vcpu = kvm_get_vcpu(kvm, vcpu_idx); + + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq, true); +} + +void sw64_kvm_switch_vpn(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + unsigned long vpnc; + long cpu = smp_processor_id(); + + vpn = last_vpn(cpu); + vpnc = vcpu->arch.vpnc[cpu]; + + if ((vpnc ^ vpn) & ~VPN_MASK) { + /* vpnc and cpu vpn not in the same version, get new vpnc and vpn */ + vpnc = get_new_vpn_context(vcpu, cpu); + vcpu->arch.vpnc[cpu] = vpnc; + } + + vpn = vpnc & VPN_MASK; + + /* Always update vpn */ + /* Just setup vcb, hardware CSR will be changed later in HMcode */ + kvm_sw64_update_vpn(vcpu, vpn); + + /* + * If vcpu migrate to a new physical cpu, the new physical cpu may keep + * old tlb entries for this vcpu's vpn, upn in the old tlb entries and + * current vcpu's upn may not in the same version. + * For now, we don't know the vcpu's upn version and the current version. + * If we keep track of the vcpu's upn version, the TLB-flush could be less. + * To be safe and correct, flush all tlb entries of current vpn for now. + */ + + if (vcpu->arch.pcpu_id != cpu) { + tbivpn(0, 0, vpn); + vcpu->arch.pcpu_id = cpu; + vcpu->cpu = cpu; + } +} + +void check_vcpu_requests(struct kvm_vcpu *vcpu) +{ + unsigned long vpn; + long cpu = smp_processor_id(); + + if (kvm_request_pending(vcpu)) { + if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { + vpn = vcpu->arch.vpnc[cpu] & VPN_MASK; + tbivpn(0, 0, vpn); + } + } +} + + +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return ((!bitmap_empty(vcpu->arch.irqs_pending, SWVM_IRQS) || !vcpu->arch.halted) + && !vcpu->arch.power_off); +} + +int kvm_arch_hardware_enable(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +bool kvm_arch_has_vcpu_debugfs(void) +{ + return false; +} + +int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) +{ + int r = 0; + + switch (ext) { + case KVM_CAP_IRQCHIP: + case KVM_CAP_IOEVENTFD: + case KVM_CAP_SYNC_MMU: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) +{ +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(SW64_KVM_IRQ_TIMER, vcpu->arch.irqs_pending); +} + +int kvm_arch_hardware_setup(void *opaque) +{ + return 0; +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (type) + return -EINVAL; + + return kvm_sw64_init_vm(kvm); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + return kvm_sw64_destroy_vm(kvm); +} + +long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, + unsigned long npages) +{ + return 0; +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + kvm_mmu_free_memory_caches(vcpu); + hrtimer_cancel(&vcpu->arch.hrt); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +{ + /* Set up the timer for Guest */ + pr_info("vcpu: [%d], regs addr = %#lx, vcpucb = %#lx\n", vcpu->vcpu_id, + (unsigned long)&vcpu->arch.regs, (unsigned long)&vcpu->arch.vcb); + vcpu->arch.vtimer_freq = cpuid(GET_CPU_FREQ, 0) * 1000UL * 1000UL; + hrtimer_init(&vcpu->arch.hrt, CLOCK_REALTIME, HRTIMER_MODE_ABS); + vcpu->arch.hrt.function = clockdev_fn; + vcpu->arch.tsk = current; + + vcpu->arch.vcb.soft_cid = vcpu->vcpu_id; + vcpu->arch.vcb.vcpu_irq_disabled = 1; + vcpu->arch.pcpu_id = -1; /* force flush tlb for the first time */ + + return 0; +} + +int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) +{ + return 0; +} + +int kvm_set_routing_entry(struct kvm *kvm, + struct kvm_kernel_irq_routing_entry *e, + const struct kvm_irq_routing_entry *ue) +{ + int r = -EINVAL; + + switch (ue->type) { + case KVM_IRQ_ROUTING_MSI: + e->set = kvm_set_msi; + e->msi.address_lo = ue->u.msi.address_lo; + e->msi.address_hi = ue->u.msi.address_hi; + e->msi.data = ue->u.msi.data; + e->msi.flags = ue->flags; + e->msi.devid = ue->u.msi.devid; + set_msi_flag = true; + break; + default: + goto out; + } + r = 0; +out: + return r; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, + struct kvm_translation *tr) +{ + return -EINVAL; /* not implemented yet */ +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return 0; +} + + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + vcpu->cpu = cpu; +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + /* + * The arch-generic KVM code expects the cpu field of a vcpu to be -1 + * if the vcpu is no longer assigned to a cpu. This is used for the + * optimized make_all_cpus_request path. + */ + vcpu->cpu = -1; +} + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(&(vcpu->arch.regs), regs, sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + memcpy(regs, &(vcpu->arch.regs), sizeof(struct kvm_regs)); + return 0; +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return 0; +} + + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to userspace. + */ +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + struct vcpucb *vcb = &(vcpu->arch.vcb); + struct hcall_args hargs; + int irq, ret; + bool more; + sigset_t sigsaved; + + /* Set guest vcb */ + /* vpn will update later when vcpu is running */ + vcpu_set_numa_affinity(vcpu); +#ifdef CONFIG_PERF_EVENTS + vcpu_load(vcpu); +#endif + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (run->exit_reason == KVM_EXIT_MMIO) + kvm_handle_mmio_return(vcpu, run); + + run->exit_reason = KVM_EXIT_UNKNOWN; + ret = 1; + while (ret > 0) { + /* Check conditions before entering the guest */ + cond_resched(); + + preempt_disable(); + local_irq_disable(); + + if (signal_pending(current)) { + ret = -EINTR; + run->exit_reason = KVM_EXIT_INTR; + } + + if (ret <= 0) { + local_irq_enable(); + preempt_enable(); + continue; + } + + memset(&hargs, 0, sizeof(hargs)); + + clear_vcpu_irq(vcpu); + + if (vcpu->arch.restart == 1) { + /* handle reset vCPU */ + vcpu->arch.regs.pc = GUEST_RESET_PC; + vcpu->arch.restart = 0; + } + + irq = interrupt_pending(vcpu, &more); + if (irq < SWVM_IRQS) + try_deliver_interrupt(vcpu, irq, more); + + vcpu->arch.halted = 0; + + sw64_kvm_switch_vpn(vcpu); + check_vcpu_requests(vcpu); + guest_enter_irqoff(); + + /* update aptp before the guest runs */ + update_aptp((unsigned long)vcpu->kvm->arch.pgd); + + /* Enter the guest */ + trace_kvm_sw64_entry(vcpu->vcpu_id, vcpu->arch.regs.pc); + vcpu->mode = IN_GUEST_MODE; + + ret = __sw64_vcpu_run(__pa(vcb), &(vcpu->arch.regs), &hargs); + + /* Back from guest */ + vcpu->mode = OUTSIDE_GUEST_MODE; + + local_irq_enable(); + guest_exit_irqoff(); + + trace_kvm_sw64_exit(ret, vcpu->arch.regs.pc); + + preempt_enable(); + + /* ret = 0 indicate interrupt in guest mode, ret > 0 indicate hcall */ + ret = handle_exit(vcpu, run, ret, &hargs); + } + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + +#ifdef CONFIG_PERF_EVENTS + vcpu_put(vcpu); +#endif + + return ret; +} + +long kvm_arch_vcpu_ioctl(struct file *filp, + unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + int r; + + switch (ioctl) { + case KVM_SW64_VCPU_INIT: + r = kvm_sw64_vcpu_reset(vcpu); + break; + case KVM_SW64_GET_VCB: + r = kvm_sw64_get_vcb(filp, arg); + break; + case KVM_SW64_SET_VCB: + r = kvm_sw64_set_vcb(filp, arg); + break; + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm *kvm __maybe_unused = filp->private_data; + long r; + + switch (ioctl) { + case KVM_CREATE_IRQCHIP: { + struct kvm_irq_routing_entry routing; + + r = -EINVAL; + memset(&routing, 0, sizeof(routing)); + r = kvm_set_irq_routing(kvm, &routing, 0, 0); + break; + } + default: + r = -ENOIOCTLCMD; + } + return r; +} + +int kvm_arch_init(void *opaque) +{ + return 0; +} + +void kvm_arch_exit(void) +{ +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, + struct kvm_sregs *sregs) +{ + return -ENOIOCTLCMD; +} + +void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOIOCTLCMD; +} + +vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, + struct kvm_memory_slot *memslot) +{ + /* Let implementation handle TLB/GVA invalidation */ + kvm_arch_flush_shadow_memslot(kvm, memslot); +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_IOEVENTFD: + r = 1; + break; + case KVM_CAP_NR_VCPUS: + case KVM_CAP_MAX_VCPUS: + r = KVM_MAX_VCPUS; + break; + default: + r = 0; + } + + return r; +} + +void vcpu_send_ipi(struct kvm_vcpu *vcpu, int target_vcpuid, int type) +{ + struct kvm_vcpu *target_vcpu = kvm_get_vcpu(vcpu->kvm, target_vcpuid); + + if (type == II_RESET) + target_vcpu->arch.restart = 1; + + if (target_vcpu != NULL) + vcpu_interrupt_line(target_vcpu, 1, 1); +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, + bool line_status) +{ + u32 irq = irq_level->irq; + unsigned int irq_num; + struct kvm_vcpu *vcpu = NULL; + bool level = irq_level->level; + + irq_num = irq; + /* target core for Intx is core0 */ + vcpu = kvm_get_vcpu(kvm, 0); + if (!vcpu) + return -EINVAL; + + return vcpu_interrupt_line(vcpu, irq_num, level); +} + + +const struct _kvm_stats_desc kvm_vm_stats_desc[] = { + KVM_GENERIC_VM_STATS() +}; + +const struct kvm_stats_header kvm_vm_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vm_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vm_stats_desc), +}; + +const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { + KVM_GENERIC_VCPU_STATS(), +}; + +const struct kvm_stats_header kvm_vcpu_stats_header = { + .name_size = KVM_STATS_NAME_SIZE, + .num_desc = ARRAY_SIZE(kvm_vcpu_stats_desc), + .id_offset = sizeof(struct kvm_stats_header), + .desc_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE, + .data_offset = sizeof(struct kvm_stats_header) + KVM_STATS_NAME_SIZE + + sizeof(kvm_vcpu_stats_desc), +}; + + + +bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) +{ + return irqchip_in_kernel(kvm); +} diff --git a/arch/sw_64/kvm/trace.h b/arch/sw_64/kvm/trace.h new file mode 100644 index 0000000000000000000000000000000000000000..2611df3d3fa57658881319c3384979aad2ea302c --- /dev/null +++ b/arch/sw_64/kvm/trace.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_SW64_KVM_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) +#define _SW64_KVM_TRACE_H + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoint for guest mode entry. + */ +TRACE_EVENT(kvm_sw64_entry, + TP_PROTO(unsigned int vcpu_id, unsigned int vcpu_pc), + TP_ARGS(vcpu_id, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, vcpu_id) + __field(unsigned int, vcpu_pc) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("VCPU %u: PC: 0x%08x", __entry->vcpu_id, __entry->vcpu_pc) +); + +/* + * Tracepoint for guest mode exit. + */ + +TRACE_EVENT(kvm_sw64_exit, + TP_PROTO(unsigned int exit_reason, unsigned long vcpu_pc), + TP_ARGS(exit_reason, vcpu_pc), + + TP_STRUCT__entry( + __field(unsigned int, exit_reason) + __field(unsigned long, vcpu_pc) + ), + + TP_fast_assign( + __entry->exit_reason = exit_reason; + __entry->vcpu_pc = vcpu_pc; + ), + + TP_printk("exit_reason: 0x%04x (%11s), PC: 0x%08lx", + __entry->exit_reason, + __print_symbolic(__entry->exit_reason, kvm_sw64_exception_type), + __entry->vcpu_pc) +); + +#endif /* _SW64_KVM_TRACE_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include diff --git a/arch/sw_64/kvm/vmem.c b/arch/sw_64/kvm/vmem.c new file mode 100644 index 0000000000000000000000000000000000000000..688449b65fa5a4df26bcb97594683129971fb27c --- /dev/null +++ b/arch/sw_64/kvm/vmem.c @@ -0,0 +1,183 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include + +static bool addr_in_pool(struct gen_pool *pool, + unsigned long start, size_t size) +{ + bool found = false; + unsigned long end = start + size - 1; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + +static int vmem_vm_insert_page(struct vm_area_struct *vma) +{ + unsigned long addr, uaddr; + struct page *vmem_page; + struct vmem_info *info; + size_t size; + int ret; + + info = vma->vm_private_data; + addr = info->start; + size = info->size; + uaddr = vma->vm_start; + + vm_flags_init(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); + vmem_page = pfn_to_page(addr >> PAGE_SHIFT); + do { + ret = vm_insert_page(vma, uaddr, vmem_page); + if (ret < 0) { + pr_info("vm_insert_page failed: %d\n", ret); + return ret; + } + vmem_page++; + uaddr += PAGE_SIZE; + size -= PAGE_SIZE; + } while (size > 0); + + return 0; +} + +static void vmem_vm_open(struct vm_area_struct *vma) +{ + struct vmem_info *info = vma->vm_private_data; + + atomic_inc(&info->refcnt); +} + +static void vmem_vm_close(struct vm_area_struct *vma) +{ + unsigned long addr; + size_t size; + struct vmem_info *info; + + info = vma->vm_private_data; + addr = info->start; + size = round_up(info->size, 8 << 20); + + if (atomic_dec_and_test(&info->refcnt)) { + if (sw64_kvm_pool && addr_in_pool(sw64_kvm_pool, addr, size)) { + pr_info("gen pool free addr: %#lx, size: %#lx\n", + addr, size); + gen_pool_free(sw64_kvm_pool, addr, size); + } + kfree(info); + } +} + +const struct vm_operations_struct vmem_vm_ops = { + .open = vmem_vm_open, + .close = vmem_vm_close, +}; +EXPORT_SYMBOL_GPL(vmem_vm_ops); + +static int vmem_open(struct inode *inode, struct file *flip) +{ + flip->private_data = NULL; + return 0; +} + +static loff_t vmem_llseek(struct file *filp, loff_t offset, int whence) +{ + loff_t newpos = 256UL << 30; + return newpos; +} + +static int vmem_release(struct inode *inode, struct file *flip) +{ + return 0; +} + +static int vmem_mmap(struct file *flip, struct vm_area_struct *vma) +{ + unsigned long addr; + static struct vmem_info *info; + size_t size = vma->vm_end - vma->vm_start; + int ret; + + if (!(vma->vm_flags & VM_SHARED)) { + pr_err("%s: mapping must be shared\n", __func__); + return -EINVAL; + } + + if (!sw64_kvm_pool) + return -ENOMEM; + + if (flip->private_data == NULL) { + addr = gen_pool_alloc(sw64_kvm_pool, round_up(size, 8 << 20)); + if (!addr) + return -ENOMEM; + + info = kzalloc(sizeof(struct vmem_info), GFP_KERNEL); + pr_info("guest phys addr=%#lx, size=%#lx\n", addr, size); + info->start = addr; + info->size = size; + flip->private_data = (void *)info; + } else { + info = flip->private_data; + addr = info->start; + } + + vma->vm_private_data = (void *)info; + vma->vm_ops = &vmem_vm_ops; + vma->vm_ops->open(vma); + + /*to do if size bigger than vm_mem_size*/ + pr_info("sw64_vmem: vm_start=%#lx, size= %#lx\n", vma->vm_start, size); + + vmem_vm_insert_page(vma); + if (ret < 0) + return ret; + + return 0; +} + +static const struct file_operations vmem_fops = { + .owner = THIS_MODULE, + .open = vmem_open, + .llseek = vmem_llseek, + .release = vmem_release, + .mmap = vmem_mmap, +}; + +static struct miscdevice vmem_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "sw64_vmem", + .fops = &vmem_fops, +}; + +int __init vmem_init(void) +{ + int err; + + err = misc_register(&vmem_dev); + if (err != 0) { + pr_err("Could not register sw64_vmem device\n"); + return err; + } + return 0; +} + +void vmem_exit(void) +{ + misc_deregister(&vmem_dev); +} diff --git a/arch/sw_64/lib/Kconfig b/arch/sw_64/lib/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e22751a457ceb1053fddd5411f90cf14d02af20a --- /dev/null +++ b/arch/sw_64/lib/Kconfig @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: GPL-2.0 +menu "Library optimization options" + +config DEEP_CLEAR_PAGE + bool "Clear Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear page routine. + Say N if you want to use the generic version. + +config DEEP_CLEAR_USER + bool "Clear User with SIMD optimization" + default y + help + This option enables the use of SIMD version of clear user routine. + Say N if you want to use the generic version. + +config DEEP_COPY_PAGE + bool "Copy Page with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy page routine. + Say N if you want to use the generic version. + +config DEEP_COPY_USER + bool "Copy User with SIMD optimization" + default y + help + This option enables the use of SIMD version of copy user routine. + Say N if you want to use the generic version. + + +config DEEP_MEMCPY + bool "Memory Copy with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory copy routine. + Say N if you want to use the generic version. + +config DEEP_MEMSET + bool "Memory Set with SIMD optimization" + default y + help + This option enables the use of SIMD version of memory set routine. + Say N if you want to use the generic version. + +endmenu diff --git a/arch/sw_64/lib/Makefile b/arch/sw_64/lib/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e6455bb5113911fa7c2fb599449aad5ef1391fea --- /dev/null +++ b/arch/sw_64/lib/Makefile @@ -0,0 +1,53 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for sw-specific library files.. +# + +asflags-y := $(KBUILD_CFLAGS) +ccflags-y := -Werror + +lib-y = __divlu.o __remlu.o __divwu.o __remwu.o \ + udelay.o \ + memmove.o \ + checksum.o \ + csum_partial_copy.o \ + fpreg.o \ + strcpy.o \ + strncpy.o \ + fls.o \ + csum_ipv6_magic.o + +lib-clear_page-y := clear_page.o +lib-clear_page-$(CONFIG_DEEP_CLEAR_PAGE) := deep-clear_page.o + +lib-clear_user-y := clear_user.o +lib-clear_user-$(CONFIG_DEEP_CLEAR_USER) := deep-clear_user.o + +lib-copy_page-y := copy_page.o +lib-copy_page-$(CONFIG_DEEP_COPY_PAGE) := deep-copy_page.o + +lib-copy_user-y := copy_user.o +lib-copy_user-$(CONFIG_DEEP_COPY_USER) := deep-copy_user.o + +lib-memcpy-y := memcpy.o +lib-memcpy-$(CONFIG_DEEP_MEMCPY) := deep-memcpy.o + +lib-memset-y := memset.o +lib-memset-$(CONFIG_DEEP_MEMSET) := deep-memset.o + +lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o + +lib-y += $(lib-clear_page-y) $(lib-clear_user-y) $(lib-copy_page-y) $(lib-copy_user-y) $(lib-memcpy-y) $(lib-memset-y) + +obj-y = iomap.o +obj-y += iomap_copy.o + +# The division routines are built from single source, with different defines. +AFLAGS___divlu.o = -DDIV +AFLAGS___remlu.o = -DREM +AFLAGS___divwu.o = -DDIV -DINTSIZE +AFLAGS___remwu.o = -DREM -DINTSIZE + +$(addprefix $(obj)/,__divlu.o __remlu.o __divwu.o __remwu.o): \ + $(src)/divide.S FORCE + $(call if_changed_rule,as_o_S) diff --git a/arch/sw_64/lib/checksum.c b/arch/sw_64/lib/checksum.c new file mode 100644 index 0000000000000000000000000000000000000000..d1314caa15bf44591ec1f6121017a77da16c6e2e --- /dev/null +++ b/arch/sw_64/lib/checksum.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file contains network checksum routines that are better done + * in an architecture-specific manner due to speed.. + * Comments in other versions indicate that the algorithms are from RFC1071 + */ +#include +#include +#include +#include + +/* + * computes the checksum of the TCP/UDP pseudo-header + * returns a 16-bit checksum, already complemented. + */ +__sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + return (__force __sum16)~from64to16( + (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8)); +} +EXPORT_SYMBOL(csum_tcpudp_magic); + +__wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, + __u32 len, __u8 proto, __wsum sum) +{ + unsigned long result; + + result = (__force u64)saddr + (__force u64)daddr + + (__force u64)sum + ((len + proto) << 8); + + /* + * Fold down to 32-bits so we don't lose in the typedef-less + * network stack. + * + * 64 to 33 + */ + result = (result & 0xffffffff) + (result >> 32); + /* 33 to 32 */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_tcpudp_nofold); + +/* + * Do a 64-bit checksum on an arbitrary memory area.. + */ +static inline unsigned long do_csum(const unsigned char *buff, int len) +{ + const unsigned long *dst = (unsigned long *)buff; + unsigned long doff = 7 & (unsigned long) dst; + unsigned long checksum = 0; + unsigned long word, patch; + unsigned long partial_dest, second_dest; + + len -= 8; + + if (!doff) { + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + + checksum += word; + checksum += (checksum < word); + } else { + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + } + + return from64to16(checksum); +} + +/* + * This is a version of ip_compute_csum() optimized for IP headers, + * which always checksum on 4 octet boundaries. + */ +__sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + return (__force __sum16)~do_csum(iph, ihl*4); +} +EXPORT_SYMBOL(ip_fast_csum); + +/* + * computes the checksum of a memory block at buff, length len, + * and adds in "sum" (32-bit) + * + * returns a 32-bit number suitable for feeding into itself + * or csum_tcpudp_magic + * + * this function must be called with even lengths, except + * for the last fragment, which may be odd + * + * it's best to have buff aligned on a 32-bit boundary + */ +__wsum csum_partial(const void *buff, int len, __wsum sum) +{ + unsigned long result = do_csum(buff, len); + + /* add in old sum, and carry.. */ + result += (__force u32)sum; + /* 32+c bits -> 32 bits */ + result = (result & 0xffffffff) + (result >> 32); + return (__force __wsum)result; +} +EXPORT_SYMBOL(csum_partial); + +/* + * this routine is used for miscellaneous IP-like checksums, mainly + * in icmp.c + */ +__sum16 ip_compute_csum(const void *buff, int len) +{ + return (__force __sum16)~from64to16(do_csum(buff, len)); +} +EXPORT_SYMBOL(ip_compute_csum); diff --git a/arch/sw_64/lib/clear_page.S b/arch/sw_64/lib/clear_page.S new file mode 100644 index 0000000000000000000000000000000000000000..e1cc7cddfd2f70e403afdb8bb8a9ca9e69e36d18 --- /dev/null +++ b/arch/sw_64/lib/clear_page.S @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + + stl_nc $31, 0x0($16) + stl_nc $31, 0x8($16) + stl_nc $31, 0x10($16) + stl_nc $31, 0x18($16) + + stl_nc $31, 0x20($16) + stl_nc $31, 0x28($16) + stl_nc $31, 0x30($16) + stl_nc $31, 0x38($16) + + stl_nc $31, 0x40($16) + stl_nc $31, 0x48($16) + stl_nc $31, 0x50($16) + stl_nc $31, 0x58($16) + + stl_nc $31, 0x60($16) + stl_nc $31, 0x68($16) + subl $0, 1, $0 + + stl_nc $31, 0x70($16) + stl_nc $31, 0x78($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/clear_user.S b/arch/sw_64/lib/clear_user.S new file mode 100644 index 0000000000000000000000000000000000000000..5ac77fc8ca0d70adc59b232126c614b64e688a31 --- /dev/null +++ b/arch/sw_64/lib/clear_user.S @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EX(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $exception-99b($31); \ + .previous + + .set noat + .set noreorder + .align 4 + + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 + .prologue 0 +__clear_user: + and $17, $17, $0 + and $16, 7, $4 + beq $0, $zerolength + addl $0, $4, $1 + and $1, 7, $2 + srl $1, 3, $1 + beq $4, $loop + + subl $4, 8, $4 + addl $0, $4, $0 + beq $1, $oneword + +$head: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $head + subl $1, 1, $1 + br $loop + unop + +$oneword: + EX(stb $31, 0($16)) + addl $16, 1, $16 + addl $4, 1, $4 + bne $4, $oneword + clr $0 + +$zerolength: +$exception: + ret $31, ($26), 1 + +$loop: + and $1, 3, $4 + beq $4, 1f + +0: EX(stl $31, 0($16)) + subl $0, 8, $0 + subl $4, 1, $4 + addl $16, 8, $16 + bne $4, 0b + unop + +1: bic $1, 3, $1 + beq $1, $tail + +2: EX(stl $31, 0($16)) + subl $0, 8, $0 + EX(stl $31, 8($16)) + subl $0, 8, $0 + EX(stl $31, 16($16)) + subl $0, 8, $0 + EX(stl $31, 24($16)) + subl $0, 8, $0 + subl $1, 4, $1 + addl $16, 32, $16 + bne $1, 2b + +$tail: + bne $2, 1f + ret $31, ($26), 1 + +1: + EX(stb $31, 0($16)) + addl $16, 1, $16 + subl $2, 1, $2 + bne $2, 1b + clr $0 + ret $31, ($26), 1 + + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/copy_page.S b/arch/sw_64/lib/copy_page.S new file mode 100644 index 0000000000000000000000000000000000000000..898472c36c80bcd6975b7e300665b665b3ffd6b7 --- /dev/null +++ b/arch/sw_64/lib/copy_page.S @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + ldl $0, 0($17) + ldl $1, 8($17) + ldl $2, 16($17) + ldl $3, 24($17) + + stl_nc $0, 0($16) + stl_nc $1, 8($16) + stl_nc $2, 16($16) + stl_nc $3, 24($16) + + ldl $4, 32($17) + ldl $5, 40($17) + ldl $6, 48($17) + ldl $7, 56($17) + + stl_nc $4, 32($16) + stl_nc $5, 40($16) + stl_nc $6, 48($16) + stl_nc $7, 56($16) + + ldl $0, 64($17) + ldl $1, 72($17) + ldl $2, 80($17) + ldl $3, 88($17) + + stl_nc $0, 64($16) + stl_nc $1, 72($16) + stl_nc $2, 80($16) + stl_nc $3, 88($16) + + ldl $4, 96($17) + ldl $5, 104($17) + ldl $6, 112($17) + ldl $7, 120($17) + + stl_nc $4, 96($16) + stl_nc $5, 104($16) + stl_nc $6, 112($16) + stl_nc $7, 120($16) + + ldwe $f31, 3 * 0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/copy_user.S b/arch/sw_64/lib/copy_user.S new file mode 100644 index 0000000000000000000000000000000000000000..2c3dd0b5656ced04b9ccfd7eeaea593d56c45ed7 --- /dev/null +++ b/arch/sw_64/lib/copy_user.S @@ -0,0 +1,106 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copy to/from user space, handling exceptions as we go.. This + * isn't exactly pretty. + * + * This is essentially the same as "memcpy()", but with a few twists. + * Notably, we have to make sure that $0 is always up-to-date and + * contains the right "bytes left to copy" value (and that it is updated + * only _after_ a successful copy). There is also some rather minor + * exception setup stuff.. + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define EXI(x,y...) \ + 99: x,##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitin-99b($31); \ + .previous + +#define EXO(x,y...) \ + 99: x, ##y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $exitout-99b($31); \ + .previous + + .set noat + .align 4 + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + and $18, $18, $0 + and $16, 7, $3 + beq $0, $35 + beq $3, $36 + subl $3, 8, $3 + .align 4 +$37: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $3, 1, $3 + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + beq $0, $41 + bne $3, $37 +$36: + and $17, 7, $1 + bic $0, 7, $4 + beq $1, $43 + beq $4, $48 + EXI(ldl_u $3, 0($17)) + .align 4 +$50: + EXI(ldl_u $2, 8($17)) + subl $4, 8, $4 + extll $3, $17, $3 + exthl $2, $17, $1 + bis $3, $1, $1 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bis $2, $2, $3 + bne $4, $50 +$48: + beq $0, $41 + .align 4 +$57: + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + subl $0, 1, $0 + addl $16, 1, $16 + addl $17, 1, $17 + bne $0, $57 + br $31, $41 + .align 4 +$43: + beq $4, $65 + .align 4 +$66: + EXI(ldl $1, 0($17)) + subl $4, 8, $4 + EXO(stl $1,0($16)) + addl $17, 8, $17 + subl $0, 8, $0 + addl $16, 8, $16 + bne $4, $66 +$65: + beq $0, $41 + EXI(ldbu $1, 0($17)) + EXO(stb $1, 0($16)) + addl $17, 1, $17 + addl $16, 1, $16 + subl $0, 1, $0 + br $31, $65 +$41: +$35: +$exitin: +$exitout: + ret $31, ($26), 1 + + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/csum_ipv6_magic.S b/arch/sw_64/lib/csum_ipv6_magic.S new file mode 100644 index 0000000000000000000000000000000000000000..755e1c13cb25ee04ecab64651080ef6d4e905781 --- /dev/null +++ b/arch/sw_64/lib/csum_ipv6_magic.S @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Richard Henderson + * + * unsigned short csum_ipv6_magic(struct in6_addr *saddr, + * struct in6_addr *daddr, __u32 len, + * unsigned short proto, unsigned int csum); + * + * Misalignment handling (which costs 16 instructions / 8 cycles) + * added by Ivan Kokshaysky + */ +#include + .globl csum_ipv6_magic + .align 4 + .ent csum_ipv6_magic + .frame $30, 0, $26, 0 +csum_ipv6_magic: + .prologue 0 + + ldl_u $0, 0($16) + zapnot $20, 15, $20 + exthl $18, 1, $4 + ldl_u $21, 7($16) + + extlb $18, 1, $5 + ldl_u $1, 8($16) + extlb $18, 2, $6 + ldl_u $22, 15($16) + + extlb $18, 3, $18 + ldl_u $2, 0($17) + sra $4, 32, $4 + ldl_u $23, 7($17) + + extll $0, $16, $0 + ldl_u $3, 8($17) + exthl $21, $16, $21 + ldl_u $24, 15($17) + + sll $5, 16, $5 + or $0, $21, $0 + extll $1, $16, $1 + addl $20, $0, $20 + + exthl $22, $16, $22 + cmpult $20, $0, $0 + sll $6, 8, $6 + or $1, $22, $1 + + extll $2, $17, $2 + or $4, $18, $18 + exthl $23, $17, $23 + or $5, $6, $5 + + extll $3, $17, $3 + or $2, $23, $2 + exthl $24, $17, $24 + or $18, $5, $18 + + exthh $19, 7, $7 + or $3, $24, $3 + extlb $19, 1, $19 + addl $20, $1, $20 + + or $19, $7, $19 + cmpult $20, $1, $1 + sll $19, 48, $19 + + sra $19, 32, $19 + addl $20, $2, $20 + cmpult $20, $2, $2 + addl $20, $3, $20 + + cmpult $20, $3, $3 + addl $20, $18, $20 + cmpult $20, $18, $18 + addl $20, $19, $20 + + cmpult $20, $19, $19 + addl $0, $1, $0 + addl $2, $3, $2 + addl $18, $19, $18 + + addl $0, $2, $0 + addl $20, $18, $20 + addl $0, $20, $0 + unop + + extlh $0, 2, $2 + zapnot $0, 3, $3 + extlh $0, 4, $1 + addl $2, $3, $3 + + extlh $0, 6, $0 + addl $3, $1, $3 + addl $0, $3, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + unop + + extlh $0, 2, $1 + zapnot $0, 3, $0 + addl $0, $1, $0 + not $0, $0 + + zapnot $0, 3, $0 + ret + + .end csum_ipv6_magic + EXPORT_SYMBOL(csum_ipv6_magic) diff --git a/arch/sw_64/lib/csum_partial_copy.c b/arch/sw_64/lib/csum_partial_copy.c new file mode 100644 index 0000000000000000000000000000000000000000..1a8c18757e095f289d4bed109d37cd4b6c0f2dbb --- /dev/null +++ b/arch/sw_64/lib/csum_partial_copy.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * csum_partial_copy - do IP checksumming and copy + * + * (C) Copyright 1996 Linus Torvalds + * + * Don't look at this too closely - you'll go mad. The things + * we do for performance.. + */ + +#include +#include +#include +#include + + +#define ldl_u(x, y) \ + __asm__ __volatile__("ldl_u %0, %1":"=r" (x):"m" (*(const unsigned long *)(y))) + +#define stl_u(x, y) \ + __asm__ __volatile__("stl_u %1, %0":"=m" (*(unsigned long *)(y)):"r" (x)) + +static inline void stll_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; doff < 8; i++, doff++) + *((char *)dst + i) = *((char *)&data + i); +} + +static inline void sthl_u(unsigned long data, unsigned long *dst) +{ + int i = 0; + unsigned long doff = (unsigned long)dst & 7; + + for (; i < doff; i++) + *((char *)dst + 8 - doff + i) = *((char *)&data + 8 - doff + i); +} + +#define __get_word(insn, x, ptr) \ +({ \ + long __guu_err; \ + __asm__ __volatile__( \ + "1: "#insn" %0,%2\n" \ + "2:\n" \ + ".section __ex_table,\"a\"\n" \ + " .long 1b - .\n" \ + " ldi %0,2b-1b(%1)\n" \ + ".previous" \ + : "=r"(x), "=r"(__guu_err) \ + : "m"(__m(ptr)), "1"(0)); \ + __guu_err; \ +}) + +static inline unsigned long +csum_partial_cfu_dest_aligned(const unsigned long __user *src, + unsigned long *dst, long len) +{ + unsigned long word; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + while (len > 0) { + word = *dst; + checksum += word; + checksum += (checksum < word); + dst++; + len -= 8; + } + len += 8; + word = *dst; + + if (len != 8) + maskll(word, len, word); + checksum += word; + checksum += (checksum < word); + + return checksum; +} + +static inline unsigned long +csum_partial_cfu_dest_unaligned(const unsigned long __user *src, + unsigned long *dst, unsigned long doff, long len) +{ + unsigned long word, patch; + unsigned long partial_dest, second_dest; + unsigned long checksum = ~0U; + int err = 0; + + err = __copy_from_user(dst, src, len+8); + + dst = (unsigned long *)((unsigned long)dst & (~7UL)); + word = *dst; + inshl(word, 8 - doff, partial_dest); + dst++; + + while (len >= 0) { + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + checksum += patch; + checksum += (checksum < patch); + inshl(word, 8 - doff, partial_dest); + dst++; + len -= 8; + } + + len += 8; + word = *dst; + insll(word, 8 - doff, second_dest); + patch = partial_dest | second_dest; + maskll(patch, len, patch); + checksum += patch; + checksum += (checksum < patch); + + return checksum; +} + +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) +{ + unsigned long checksum; + unsigned long doff = 7 & (unsigned long) dst; + + if (!doff) { + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + } else { + checksum = csum_partial_cfu_dest_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, doff, len-8); + } + return (__force __wsum)from64to16(checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); +} +EXPORT_SYMBOL(csum_and_copy_from_user); + +__wsum +csum_partial_copy_nocheck(const void *src, void *dst, int len) +{ + return __csum_and_copy((__force const void __user *)src, + dst, len); +} +EXPORT_SYMBOL(csum_partial_copy_nocheck); diff --git a/arch/sw_64/lib/deep-clear_page.S b/arch/sw_64/lib/deep-clear_page.S new file mode 100644 index 0000000000000000000000000000000000000000..52a3db33fc1736c4497bd3c4e670f86e2f32aa0d --- /dev/null +++ b/arch/sw_64/lib/deep-clear_page.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Zero an entire page. + */ +#include + .text + .align 4 + .global clear_page + .ent clear_page +clear_page: + .prologue 0 + + ldi $0,64 + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + +/* + stl_nc $31,0x0($16) + stl_nc $31,0x8($16) + stl_nc $31,0x10($16) + stl_nc $31,0x18($16) + + stl_nc $31,0x20($16) + stl_nc $31,0x28($16) + stl_nc $31,0x30($16) + stl_nc $31,0x38($16) + + stl_nc $31,0x40($16) + stl_nc $31,0x48($16) + stl_nc $31,0x50($16) + stl_nc $31,0x58($16) + + stl_nc $31,0x60($16) + stl_nc $31,0x68($16) + stl_nc $31,0x70($16) + stl_nc $31,0x78($16) +*/ + + vstd_nc $f31, 0x0($16) + vstd_nc $f31, 0x20($16) + subl $0, 1, $0 + vstd_nc $f31, 0x40($16) + + vstd_nc $f31, 0x60($16) + addl $16, 128, $16 + bne $0, 1b + + memb + ret + + .end clear_page + EXPORT_SYMBOL(clear_page) diff --git a/arch/sw_64/lib/deep-clear_user.S b/arch/sw_64/lib/deep-clear_user.S new file mode 100644 index 0000000000000000000000000000000000000000..c81418ed99a26b7bfc197863bb0ecc911b43a62c --- /dev/null +++ b/arch/sw_64/lib/deep-clear_user.S @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Contributed by Mao Minkai + * + * Zero user space, handling exceptions as we go. + * + * We have to make sure that $0 is always up-to-date and contains the + * right "bytes left to zero" value (and that it is updated only _after_ + * a successful copy). There is also some rather minor exception setup + * stuff. + * + */ +#include +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x,y...) \ + 99: x,##y; \ + .section __ex_table,"a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $18: bytes left to copy + * + */ + .globl __clear_user + .ent __clear_user +__clear_user: + .prologue 0 + bis $31, $31, $7 + mov $17, $18 + bis $31, $31, $17 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/sw_64/lib/deep-copy_page.S b/arch/sw_64/lib/deep-copy_page.S new file mode 100644 index 0000000000000000000000000000000000000000..a9b9d97f318af18a02bc7307d8d7d1b0cc1f0088 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_page.S @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * arch/sw/lib/copy_page.S + * + * Copy an entire page. + */ +#include +#include + + .text + .align 4 + .global copy_page + .ent copy_page +copy_page: + .prologue 0 + + ldi $18, 64 + subl $sp, 0x60, $sp + ldi $4, 0x40($sp) + stl $4, 0($sp) + bic $4, 0x1f, $4 + vstd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrr $5, CSR_WR_FREGS +#endif + +/* Optimize by GUOY from SOC 2013-06-04 */ +1: + vldd $f16, 0($17) + vstd_nc $f16, 0($16) + + vldd $f16, 32($17) + vstd_nc $f16, 32($16) + + vldd $f16, 64($17) + vstd_nc $f16, 64($16) + + vldd $f16, 96($17) + vstd_nc $f16, 96($16) + + ldwe $f31, 5*0x80($17) + subl $18, 1, $18 + addl $17, 128, $17 + + addl $16, 128, $16 + bne $18, 1b + + memb + ldl $4, 0($sp) + ldi $4, 0x40($sp) + bic $4, 0x1f, $4 + vldd $f16, 0($4) +#ifdef CONFIG_SUBARCH_C4 + csrw $5, CSR_WR_FREGS +#endif + addl $sp, 0x60, $sp + ret + + .end copy_page + EXPORT_SYMBOL(copy_page) diff --git a/arch/sw_64/lib/deep-copy_template.S b/arch/sw_64/lib/deep-copy_template.S new file mode 100644 index 0000000000000000000000000000000000000000..7705eb3f36d4edab9e09c49bafbe3129086b6252 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template.S @@ -0,0 +1,301 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $4: 8-byte misalignment of src when dest is 8-byte aligned + * $5: 32-byte misalignment of src when dest is 32-byte aligned + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + +#define SAVE_SIMD_U_REGS \ + ldi $sp, -0xc0($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + vstd $f4, 0x40($23); \ + vstd $f5, 0x60($23); \ + vstd $f3, 0x80($23); \ + ldi $7, 2 + +#define RESTORE_SIMD_U_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + vldd $f4, 0x40($23); \ + vldd $f5, 0x60($23); \ + vldd $f3, 0x80($23); \ + ldi $sp, 0xc0($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + +$byte_loop_head: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + and $17, 7, $4 + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + bne $4, $quad_u_loop_head + +$quad_loop_head: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + and $17, 31, $5 + bne $5, $prep_simd_u_loop + +$prep_simd_loop: + SAVE_SIMD_REGS + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 4 +$simd_loop_nc: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + bne $4, $prep_quad_u_loop_tail + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + bne $4, $move_one_quad_u + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out + +/* misaligned src and dst */ +$quad_u_loop_head: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 7($17) ) + extll $2, $4, $2 + exthl $3, $4, $3 + bis $2, $3, $2 + FIXUP_LDST( stl $2, 0($16) ) + addl $16, 8, $16 + addl $17, 8, $17 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_u_loop_head + +$prep_simd_u_loop: + SAVE_SIMD_U_REGS + andnot $17, 31, $3 + ldi $2, 256($31) + sll $5, 3, $1 + subl $2, $1, $2 + sll $1, 29, $1 + sll $2, 29, $2 + ifmovd $1, $f1 + ifmovd $2, $f2 + FIXUP_LDST( vldd $f4, 0($3) ) + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_u_loop + + .align 4 +$simd_u_loop_nc: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f3, $f4, $f31, $f3 + FIXUP_LDST( vstd_nc $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f5 + FIXUP_LDST( vstd_nc $f5, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop_nc + memb # required for _nc store instructions + br $31, $simd_u_loop_end + + .align 4 +$simd_u_loop: + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + FIXUP_LDST( vldd $f4, 64($3) ) + srlow $f5, $f1, $f5 + sllow $f4, $f2, $f3 + vlogfc $f5, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 32($16) ) + subl $18, 64, $18 + addl $3, 64, $3 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_u_loop + +$simd_u_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd_u + FIXUP_LDST( vldd $f5, 32($3) ) + srlow $f4, $f1, $f4 + sllow $f5, $f2, $f3 + vlogfc $f4, $f3, $f31, $f3 + FIXUP_LDST( vstd $f3, 0($16) ) + subl $18, 32, $18 + addl $3, 32, $3 + addl $16, 32, $16 + +$no_more_simd_u: + RESTORE_SIMD_U_REGS + bis $3, $5, $17 + br $31, $simd_end + +$prep_quad_u_loop_tail: + FIXUP_LDST( ldl_u $2, 0($17) ) + .align 4 +$quad_u_loop_tail: + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + FIXUP_LDST( ldl_u $2, 16($17) ) + extll $3, $4, $24 + exthl $2, $4, $25 + bis $24, $25, $24 + FIXUP_LDST( stl $24, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_u_loop_tail + br $31, $quad_loop_end + +$move_one_quad_u: + FIXUP_LDST( ldl_u $2, 0($17) ) + FIXUP_LDST( ldl_u $3, 8($17) ) + extll $2, $4, $22 + exthl $3, $4, $23 + bis $22, $23, $22 + FIXUP_LDST( stl $22, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + br $31, $byte_loop_tail diff --git a/arch/sw_64/lib/deep-copy_template_c4.S b/arch/sw_64/lib/deep-copy_template_c4.S new file mode 100644 index 0000000000000000000000000000000000000000..e0740874dfa32722b903793a9eaa797ba5007f1d --- /dev/null +++ b/arch/sw_64/lib/deep-copy_template_c4.S @@ -0,0 +1,108 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd and simd_u loop + * $16: latest dest, clobbered + * $17: latest src, clobbered + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x60($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + vstd $f2, 0x20($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vldd $f1, 0($23); \ + vldd $f2, 0x20($23); \ + ldi $sp, 0x60($sp); \ + bis $31, $31, $7 + + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 4 +$simd_loop: + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vldd $f2, 32($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f2, 32($16) ) + subl $18, 64, $18 + addl $17, 64, $17 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vldd $f1, 0($17) ) + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $17, 32, $17 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 4 +$quad_loop_tail: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( ldl $3, 8($17) ) + FIXUP_LDST( stl $2, 0($16) ) + FIXUP_LDST( stl $3, 8($16) ) + subl $18, 16, $18 + addl $17, 16, $17 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( ldl $2, 0($17) ) + FIXUP_LDST( stl $2, 0($16) ) + subl $18, 8, $18 + addl $17, 8, $17 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( ldbu $2, 0($17) ) + FIXUP_LDST( stb $2, 0($16) ) + subl $18, 1, $18 + addl $17, 1, $17 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-copy_user.S b/arch/sw_64/lib/deep-copy_user.S new file mode 100644 index 0000000000000000000000000000000000000000..b79f8f3f0f4ac1e85dd5d2f130e88f90c1e062c0 --- /dev/null +++ b/arch/sw_64/lib/deep-copy_user.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +/* Allow an exception for an insn; exit if we get one. */ +#define FIXUP_LDST(x, y) \ + 99: x, y; \ + .section __ex_table, "a"; \ + .long 99b - .; \ + ldi $31, $out-99b($31); \ + .previous + +/* + * $7: SIMD status for C3B + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $7: SIMD status for C4 + * 0: not in simd loop + * 1: in simd and simd_u loop + * $18: bytes left to copy + * + */ + .globl __copy_user + .ent __copy_user +__copy_user: + .prologue 0 + .set noreorder + bis $31, $31, $7 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + bis $31, $18, $0 + beq $7, $return + subl $7, 1, $7 + beq $7, $restore_simd + +#if defined(CONFIG_SUBARCH_C3B) +$restore_simd_u: + RESTORE_SIMD_U_REGS + br $31, $return +#endif + +$restore_simd: + RESTORE_SIMD_REGS + +$return: + ret + .end __copy_user + EXPORT_SYMBOL(__copy_user) diff --git a/arch/sw_64/lib/deep-memcpy.S b/arch/sw_64/lib/deep-memcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..78a6bd85cf016a49948c973ef3a8652a869bb8db --- /dev/null +++ b/arch/sw_64/lib/deep-memcpy.S @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + mov $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-copy_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-copy_template_c4.S" +#endif +$out: + ret + .end memcpy + EXPORT_SYMBOL(memcpy) +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/deep-memset.S b/arch/sw_64/lib/deep-memset.S new file mode 100644 index 0000000000000000000000000000000000000000..c6b5355beec64d3d5557ae7b26c80cbb7c8b114a --- /dev/null +++ b/arch/sw_64/lib/deep-memset.S @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Optimized memset() for SW64 with SIMD instructions + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Fill SIZE bytes pointed to by SRC with CHAR. + * + * Input: + * $16: SRC, clobbered + * $17: CHAR, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: SRC + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr), tmp data + * $2: tmp data + * $3: tmp data + * $4: tmp data + * $5: compare result + * $f10: 32 bytes data (manually saved) + * + */ + +#include +#include + +#define FIXUP_LDST(x, y) \ + x, y + + .set noat + .set noreorder + .text + .align 4 + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + .ent ___memset +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + +#ifdef CONFIG_SUBARCH_C4 + csrr $6, CSR_WR_FREGS +#endif +/* expand 1 byte data to 8 bytes */ + and $17, 0xff, $17 + sll $17, 8, $4 + bis $17, $4, $17 + sll $17, 16, $4 + bis $17, $4, $17 + sll $17, 32, $4 + bis $17, $4, $17 + +__constant_c_memset: + bis $31, $31, $7 + bis $31, $16, $0 +#if defined(CONFIG_SUBARCH_C3B) +#include "deep-set_template.S" +#elif defined(CONFIG_SUBARCH_C4) +#include "deep-set_template_c4.S" +#endif +$out: +#ifdef CONFIG_SUBARCH_C4 + csrw $6, CSR_WR_FREGS +#endif + ret + + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + bis $1, $2, $1 + inslh $17, 6, $4 + bis $1, $3, $1 + bis $1, $4, $17 + br $31, __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/deep-set_template.S b/arch/sw_64/lib/deep-set_template.S new file mode 100644 index 0000000000000000000000000000000000000000..f9073d638468dbb77d991ddbbc276f2f57c865ff --- /dev/null +++ b/arch/sw_64/lib/deep-set_template.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memcpy and copy_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * 2: in simd_u loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define NC_STORE_THRESHOLD 2048 + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + and $16, 7, $1 + beq $1, $dest_aligned_8 + + .align 3 +$byte_loop_head: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + ble $18, $out + and $16, 7, $1 + bne $1, $byte_loop_head + +$dest_aligned_8: + cmplt $18, 16, $1 + bne $1, $quad_loop_end + and $16, 31, $1 + beq $1, $dest_aligned_32 + cmplt $18, 64, $1 + bne $1, $simd_end + + .align 3 +$quad_loop_head: + FIXUP_LDST( stl $17, 0($16) ) + addl $16, 8, $16 + subl $18, 8, $18 + and $16, 31, $1 + beq $1, $dest_aligned_32 + br $31, $quad_loop_head + +$dest_aligned_32: + cmplt $18, 64, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + ldi $1, NC_STORE_THRESHOLD($31) + cmple $18, $1, $1 + bne $1, $simd_loop + + .align 3 +$simd_loop_nc: + FIXUP_LDST( vstd_nc $f1, 0($16) ) + FIXUP_LDST( vstd_nc $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop_nc + memb # required for _nc store instructions + br $31, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/deep-set_template_c4.S b/arch/sw_64/lib/deep-set_template_c4.S new file mode 100644 index 0000000000000000000000000000000000000000..2b1bcab8fec96d9495f85477ca4ab87abfbf8308 --- /dev/null +++ b/arch/sw_64/lib/deep-set_template_c4.S @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * template for memset and clear_user with SIMD + * + * $7: SIMD status + * 0: not in simd loop + * 1: in simd loop + * $16: latest dest, clobbered + * $17: 8-byte data to set + * $18: bytes left to copy + * + */ + +#define SAVE_SIMD_REGS \ + ldi $sp, -0x40($sp); \ + addl $sp, 0x1f, $23; \ + bic $23, 0x1f, $23; \ + vstd $f1, 0($23); \ + ldi $7, 1 + +#define RESTORE_SIMD_REGS \ + vldd $f1, 0($23); \ + ldi $sp, 0x40($sp); \ + bis $31, $31, $7 + + ble $18, $out + + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + cmplt $18, 16, $1 + bne $1, $quad_loop_end + cmplt $18, 32, $1 + bne $1, $simd_end + +$prep_simd_loop: + SAVE_SIMD_REGS + ifmovd $17, $f1 + vcpyf $f1, $f1 + cmplt $18, 64, $1 + bne $1, $simd_loop_end + + .align 3 +$simd_loop: + FIXUP_LDST( vstd $f1, 0($16) ) + FIXUP_LDST( vstd $f1, 32($16) ) + subl $18, 64, $18 + addl $16, 64, $16 + cmplt $18, 64, $1 + beq $1, $simd_loop + +$simd_loop_end: + cmplt $18, 32, $1 + bne $1, $no_more_simd + FIXUP_LDST( vstd $f1, 0($16) ) + subl $18, 32, $18 + addl $16, 32, $16 + +$no_more_simd: + RESTORE_SIMD_REGS + +$simd_end: + ble $18, $out + cmplt $18, 16, $1 + bne $1, $quad_loop_end + + .align 3 +$quad_loop_tail: + FIXUP_LDST( stl $17, 0($16) ) + FIXUP_LDST( stl $17, 8($16) ) + subl $18, 16, $18 + addl $16, 16, $16 + cmplt $18, 16, $1 + beq $1, $quad_loop_tail + +$quad_loop_end: + ble $18, $out + cmplt $18, 8, $1 + bne $1, $byte_loop_tail + +$move_one_quad: + FIXUP_LDST( stl $17, 0($16) ) + subl $18, 8, $18 + addl $16, 8, $16 + ble $18, $out + + .align 3 +$byte_loop_tail: + FIXUP_LDST( stb $17, 0($16) ) + subl $18, 1, $18 + addl $16, 1, $16 + bgt $18, $byte_loop_tail + br $31, $out diff --git a/arch/sw_64/lib/divide.S b/arch/sw_64/lib/divide.S new file mode 100644 index 0000000000000000000000000000000000000000..ceef343a6084f79774bfb122e51c34533a56839f --- /dev/null +++ b/arch/sw_64/lib/divide.S @@ -0,0 +1,190 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * (C) 1995 Linus Torvalds + * + * The sw64 chip doesn't provide hardware division, so we have to do it + * by hand. The compiler expects the functions + * + * __divlu: 64-bit unsigned long divide + * __remlu: 64-bit unsigned long remainder + * __divls/__remqs: signed 64-bit + * __divwu/__remlu: unsigned 32-bit + * __divws/__remls: signed 32-bit + * + * These are not normal C functions: instead of the normal + * calling sequence, these expect their arguments in registers + * $24 and $25, and return the result in $27. Register $28 may + * be clobbered (assembly temporary), anything else must be saved. + * + * In short: painful. + * + * This is a rather simple bit-at-a-time algorithm: it's very good + * at dividing random 64-bit numbers, but the more usual case where + * the divisor is small is handled better by the DEC algorithm + * using lookup tables. This uses much less memory, though, and is + * nicer on the cache.. Besides, I don't know the copyright status + * of the DEC code. + */ + +/* + * My temporaries: + * $0 - current bit + * $1 - shifted divisor + * $2 - modulus/quotient + * + * $23 - return address + * $24 - dividend + * $25 - divisor + * + * $27 - quotient/modulus + * $28 - compare status + */ +#include + +#define halt .long 0 + +/* + * Select function type and registers + */ +#define mask $0 +#define divisor $1 +#define compare $28 +#define tmp1 $3 +#define tmp2 $4 + +#ifdef DIV +#define DIV_ONLY(x,y...) x, ##y +#define MOD_ONLY(x,y...) +#define func(x) __div##x +#define modulus $2 +#define quotient $27 +#define GETSIGN(x) xor $24, $25, x +#define STACK 48 +#else +#define DIV_ONLY(x,y...) +#define MOD_ONLY(x,y...) x, ##y +#define func(x) __rem##x +#define modulus $27 +#define quotient $2 +#define GETSIGN(x) bis $24, $24, x +#define STACK 32 +#endif + +/* + * For 32-bit operations, we need to extend to 64-bit + */ +#ifdef INTSIZE +#define ufunction func(wu) +#define sfunction func(w) +#define LONGIFY(x) zapnot x, 15, x +#define SLONGIFY(x) addw x, 0, x +#else +#define ufunction func(lu) +#define sfunction func(l) +#define LONGIFY(x) +#define SLONGIFY(x) +#endif + +.set noat +.align 3 +.globl ufunction +.ent ufunction +ufunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + +7: stl $1, 0($30) + bis $25, $25, divisor + stl $2, 8($30) + bis $24, $24, modulus + stl $0, 16($30) + bis $31, $31, quotient + LONGIFY(divisor) + stl tmp1, 24($30) + LONGIFY(modulus) + bis $31, 1, mask + DIV_ONLY(stl tmp2, 32($30)) + beq divisor, 9f # div by zero + +#ifdef INTSIZE + /* + * shift divisor left, using 3-bit shifts for + * 32-bit divides as we can't overflow. Three-bit + * shifts will result in looping three times less + * here, but can result in two loops more later. + * Thus using a large shift isn't worth it (and + * s8add pairs better than a sll..) + */ +1: cmpult divisor, modulus, compare + s8addl divisor, $31, divisor + s8addl mask, $31, mask + bne compare, 1b +#else +1: cmpult divisor, modulus, compare + blt divisor, 2f + addl divisor, divisor, divisor + addl mask, mask, mask + bne compare, 1b +#endif + + /* ok, start to go right again.. */ +2: DIV_ONLY(addl quotient, mask, tmp2) + srl mask, 1, mask + cmpule divisor, modulus, compare + subl modulus, divisor, tmp1 + DIV_ONLY(selne compare, tmp2, quotient, quotient) + srl divisor, 1, divisor + selne compare, tmp1, modulus, modulus + bne mask, 2b + +9: ldl $1, 0($30) + ldl $2, 8($30) + ldl $0, 16($30) + ldl tmp1, 24($30) + DIV_ONLY(ldl tmp2, 32($30)) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end ufunction + EXPORT_SYMBOL(ufunction) +/* + * Uhh.. Ugly signed division. I'd rather not have it at all, but + * it's needed in some circumstances. There are different ways to + * handle this, really. This does: + * -a / b = a / -b = -(a / b) + * -a % b = -(a % b) + * a % -b = a % b + * which is probably not the best solution, but at least should + * have the property that (x/y)*y + (x%y) = x. + */ +.align 3 +.globl sfunction +.ent sfunction +sfunction: + subl $30, STACK, $30 + .frame $30, STACK, $23 + .prologue 0 + bis $24, $25, $28 + SLONGIFY($28) + bge $28, 7b + stl $24, 0($30) + subl $31, $24, $28 + stl $25, 8($30) + sellt $24, $28, $24, $24 # abs($24) + stl $23, 16($30) + subl $31, $25, $28 + stl tmp1, 24($30) + sellt $25, $28, $25, $25 # abs($25) + bsr $23, ufunction + ldl $24, 0($30) + ldl $25, 8($30) + GETSIGN($28) + subl $31, $27, tmp1 + SLONGIFY($28) + ldl $23, 16($30) + sellt $28, tmp1, $27, $27 + ldl tmp1, 24($30) + addl $30, STACK, $30 + ret $31, ($23), 1 + .end sfunction + EXPORT_SYMBOL(sfunction) diff --git a/arch/sw_64/lib/fls.c b/arch/sw_64/lib/fls.c new file mode 100644 index 0000000000000000000000000000000000000000..aa4231f7e472dc1fdca58ea2d63631b64cc2fc4f --- /dev/null +++ b/arch/sw_64/lib/fls.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +/* This is fls(x)-1, except zero is held to zero. This allows most + * efficient input into extbl, plus it allows easy handling of fls(0)=0. + */ + +const unsigned char __flsm1_tab[256] = { + 0, + 0, + 1, 1, + 2, 2, 2, 2, + 3, 3, 3, 3, 3, 3, 3, 3, + 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, + + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, + + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, + 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, +}; +EXPORT_SYMBOL(__flsm1_tab); diff --git a/arch/sw_64/lib/fpreg.c b/arch/sw_64/lib/fpreg.c new file mode 100644 index 0000000000000000000000000000000000000000..1788703109086d803c4ac0f13277e4eba02a7095 --- /dev/null +++ b/arch/sw_64/lib/fpreg.c @@ -0,0 +1,992 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) Copyright 1998 Linus Torvalds + */ + +#include +#include + +#define STT(reg, val) \ + asm volatile("fimovd $f"#reg", %0" : "=r"(val)) +#define STS(reg, val) \ + asm volatile("fimovs $f"#reg", %0" : "=r"(val)) +#define LDT(reg, val) \ + asm volatile("ifmovd %0, $f"#reg : : "r"(val)) +#define LDS(reg, val) \ + asm volatile("ifmovs %0, $f"#reg : : "r"(val)) +#define VLDD(reg, val) \ + asm volatile("vldd $f"#reg", %0" : : "m"(val) : "memory") +#define VSTD(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") +#define VLDS(reg, val) \ + asm volatile("vlds $f"#reg", %0" : : "m"(val) : "memory") +#define LDWE(reg, val) \ + asm volatile("ldwe $f"#reg", %0" : : "m"(val) : "memory") +#define VSTS(reg, val) \ + asm volatile("vsts $f"#reg", %0" : "=m"(val) : : "memory") +#define STDH(reg, val) \ + asm volatile("vstd $f"#reg", %0" : "=m"(val) : : "memory") + +void +sw64_write_simd_fp_reg_s(unsigned long reg, unsigned long f0, unsigned long f1) +{ + + unsigned long tmpa[4] __aligned(16); + + tmpa[0] = f0; + tmpa[1] = f1; + + switch (reg) { + case 0: + VLDS(0, *tmpa); + break; + case 1: + VLDS(1, *tmpa); + break; + case 2: + VLDS(2, *tmpa); + break; + case 3: + VLDS(3, *tmpa); + break; + case 4: + VLDS(4, *tmpa); + break; + case 5: + VLDS(5, *tmpa); + break; + case 6: + VLDS(6, *tmpa); + break; + case 7: + VLDS(7, *tmpa); + break; + case 8: + VLDS(8, *tmpa); + break; + case 9: + VLDS(9, *tmpa); + break; + case 10: + VLDS(10, *tmpa); + break; + case 11: + VLDS(11, *tmpa); + break; + case 12: + VLDS(12, *tmpa); + break; + case 13: + VLDS(13, *tmpa); + break; + case 14: + VLDS(14, *tmpa); + break; + case 15: + VLDS(15, *tmpa); + break; + case 16: + VLDS(16, *tmpa); + break; + case 17: + VLDS(17, *tmpa); + break; + case 18: + VLDS(18, *tmpa); + break; + case 19: + VLDS(19, *tmpa); + break; + case 20: + VLDS(20, *tmpa); + break; + case 21: + VLDS(21, *tmpa); + break; + case 22: + VLDS(22, *tmpa); + break; + case 23: + VLDS(23, *tmpa); + break; + case 24: + VLDS(24, *tmpa); + break; + case 25: + VLDS(25, *tmpa); + break; + case 26: + VLDS(26, *tmpa); + break; + case 27: + VLDS(27, *tmpa); + break; + case 28: + VLDS(28, *tmpa); + break; + case 29: + VLDS(29, *tmpa); + break; + case 30: + VLDS(30, *tmpa); + break; + case 31: + break; + } + +} + + +void sw64_write_simd_fp_reg_d(unsigned long reg, unsigned long f0, + unsigned long f1, unsigned long f2, unsigned long f3) +{ + unsigned long tmpa[4] __aligned(32); + + tmpa[0] = f0; + tmpa[1] = f1; + tmpa[2] = f2; + tmpa[3] = f3; + + switch (reg) { + case 0: + VLDD(0, *tmpa); + break; + case 1: + VLDD(1, *tmpa); + break; + case 2: + VLDD(2, *tmpa); + break; + case 3: + VLDD(3, *tmpa); + break; + case 4: + VLDD(4, *tmpa); + break; + case 5: + VLDD(5, *tmpa); + break; + case 6: + VLDD(6, *tmpa); + break; + case 7: + VLDD(7, *tmpa); + break; + case 8: + VLDD(8, *tmpa); + break; + case 9: + VLDD(9, *tmpa); + break; + case 10: + VLDD(10, *tmpa); + break; + case 11: + VLDD(11, *tmpa); + break; + case 12: + VLDD(12, *tmpa); + break; + case 13: + VLDD(13, *tmpa); + break; + case 14: + VLDD(14, *tmpa); + break; + case 15: + VLDD(15, *tmpa); + break; + case 16: + VLDD(16, *tmpa); + break; + case 17: + VLDD(17, *tmpa); + break; + case 18: + VLDD(18, *tmpa); + break; + case 19: + VLDD(19, *tmpa); + break; + case 20: + VLDD(20, *tmpa); + break; + case 21: + VLDD(21, *tmpa); + break; + case 22: + VLDD(22, *tmpa); + break; + case 23: + VLDD(23, *tmpa); + break; + case 24: + VLDD(24, *tmpa); + break; + case 25: + VLDD(25, *tmpa); + break; + case 26: + VLDD(26, *tmpa); + break; + case 27: + VLDD(27, *tmpa); + break; + case 28: + VLDD(28, *tmpa); + break; + case 29: + VLDD(29, *tmpa); + break; + case 30: + VLDD(30, *tmpa); + break; + case 31: + break; + } + + +} + + +void sw64_write_simd_fp_reg_ldwe(unsigned long reg, int a) +{ + switch (reg) { + case 0: + LDWE(0, a); + break; + case 1: + LDWE(1, a); + break; + case 2: + LDWE(2, a); + break; + case 3: + LDWE(3, a); + break; + case 4: + LDWE(4, a); + break; + case 5: + LDWE(5, a); + break; + case 6: + LDWE(6, a); + break; + case 7: + LDWE(7, a); + break; + case 8: + LDWE(8, a); + break; + case 9: + LDWE(9, a); + break; + case 10: + LDWE(10, a); + break; + case 11: + LDWE(11, a); + break; + case 12: + LDWE(12, a); + break; + case 13: + LDWE(13, a); + break; + case 14: + LDWE(14, a); + break; + case 15: + LDWE(15, a); + break; + case 16: + LDWE(16, a); + break; + case 17: + LDWE(17, a); + break; + case 18: + LDWE(18, a); + break; + case 19: + LDWE(19, a); + break; + case 20: + LDWE(20, a); + break; + case 21: + LDWE(21, a); + break; + case 22: + LDWE(22, a); + break; + case 23: + LDWE(23, a); + break; + case 24: + LDWE(24, a); + break; + case 25: + LDWE(25, a); + break; + case 26: + LDWE(26, a); + break; + case 27: + LDWE(27, a); + break; + case 28: + LDWE(28, a); + break; + case 29: + LDWE(29, a); + break; + case 30: + LDWE(30, a); + break; + case 31: + break; + } +} + + +void sw64_read_simd_fp_m_s(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[2] __aligned(16); + + switch (reg) { + case 0: + VSTS(0, *tmpa); + break; + case 1: + VSTS(1, *tmpa); + break; + case 2: + VSTS(2, *tmpa); + break; + case 3: + VSTS(3, *tmpa); + break; + case 4: + VSTS(4, *tmpa); + break; + case 5: + VSTS(5, *tmpa); + break; + case 6: + VSTS(6, *tmpa); + break; + case 7: + VSTS(7, *tmpa); + break; + case 8: + VSTS(8, *tmpa); + break; + case 9: + VSTS(9, *tmpa); + break; + case 10: + VSTS(10, *tmpa); + break; + case 11: + VSTS(11, *tmpa); + break; + case 12: + VSTS(12, *tmpa); + break; + case 13: + VSTS(13, *tmpa); + break; + case 14: + VSTS(14, *tmpa); + break; + case 15: + VSTS(15, *tmpa); + break; + case 16: + VSTS(16, *tmpa); + break; + case 17: + VSTS(17, *tmpa); + break; + case 18: + VSTS(18, *tmpa); + break; + case 19: + VSTS(19, *tmpa); + break; + case 20: + VSTS(20, *tmpa); + break; + case 21: + VSTS(21, *tmpa); + break; + case 22: + VSTS(22, *tmpa); + break; + case 23: + VSTS(23, *tmpa); + break; + case 24: + VSTS(24, *tmpa); + break; + case 25: + VSTS(25, *tmpa); + break; + case 26: + VSTS(26, *tmpa); + break; + case 27: + VSTS(27, *tmpa); + break; + case 28: + VSTS(28, *tmpa); + break; + case 29: + VSTS(29, *tmpa); + break; + case 30: + VSTS(30, *tmpa); + break; + case 31: + VSTS(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; +} + +void sw64_read_simd_fp_m_d(unsigned long reg, unsigned long *fp_value) +{ + volatile unsigned long tmpa[4] __aligned(32); + + switch (reg) { + case 0: + VSTD(0, *tmpa); + break; + case 1: + VSTD(1, *tmpa); + break; + case 2: + VSTD(2, *tmpa); + break; + case 3: + VSTD(3, *tmpa); + break; + case 4: + VSTD(4, *tmpa); + break; + case 5: + VSTD(5, *tmpa); + break; + case 6: + VSTD(6, *tmpa); + break; + case 7: + VSTD(7, *tmpa); + break; + case 8: + VSTD(8, *tmpa); + break; + case 9: + VSTD(9, *tmpa); + break; + case 10: + VSTD(10, *tmpa); + break; + case 11: + VSTD(11, *tmpa); + break; + case 12: + VSTD(12, *tmpa); + break; + case 13: + VSTD(13, *tmpa); + break; + case 14: + VSTD(14, *tmpa); + break; + case 15: + VSTD(15, *tmpa); + break; + case 16: + VSTD(16, *tmpa); + break; + case 17: + VSTD(17, *tmpa); + break; + case 18: + VSTD(18, *tmpa); + break; + case 19: + VSTD(19, *tmpa); + break; + case 20: + VSTD(20, *tmpa); + break; + case 21: + VSTD(21, *tmpa); + break; + case 22: + VSTD(22, *tmpa); + break; + case 23: + VSTD(23, *tmpa); + break; + case 24: + VSTD(24, *tmpa); + break; + case 25: + VSTD(25, *tmpa); + break; + case 26: + VSTD(26, *tmpa); + break; + case 27: + VSTD(27, *tmpa); + break; + case 28: + VSTD(28, *tmpa); + break; + case 29: + VSTD(29, *tmpa); + break; + case 30: + VSTD(30, *tmpa); + break; + case 31: + VSTD(31, *tmpa); + break; + } + + *fp_value = tmpa[0]; + *(fp_value+1) = tmpa[1]; + *(fp_value+2) = tmpa[2]; + *(fp_value+3) = tmpa[3]; +} + +unsigned long sw64_read_fp_reg(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STT(0, val); + break; + case 1: + STT(1, val); + break; + case 2: + STT(2, val); + break; + case 3: + STT(3, val); + break; + case 4: + STT(4, val); + break; + case 5: + STT(5, val); + break; + case 6: + STT(6, val); + break; + case 7: + STT(7, val); + break; + case 8: + STT(8, val); + break; + case 9: + STT(9, val); + break; + case 10: + STT(10, val); + break; + case 11: + STT(11, val); + break; + case 12: + STT(12, val); + break; + case 13: + STT(13, val); + break; + case 14: + STT(14, val); + break; + case 15: + STT(15, val); + break; + case 16: + STT(16, val); + break; + case 17: + STT(17, val); + break; + case 18: + STT(18, val); + break; + case 19: + STT(19, val); + break; + case 20: + STT(20, val); + break; + case 21: + STT(21, val); + break; + case 22: + STT(22, val); + break; + case 23: + STT(23, val); + break; + case 24: + STT(24, val); + break; + case 25: + STT(25, val); + break; + case 26: + STT(26, val); + break; + case 27: + STT(27, val); + break; + case 28: + STT(28, val); + break; + case 29: + STT(29, val); + break; + case 30: + STT(30, val); + break; + case 31: + STT(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg); + +void sw64_write_fp_reg(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDT(0, val); + break; + case 1: + LDT(1, val); + break; + case 2: + LDT(2, val); + break; + case 3: + LDT(3, val); + break; + case 4: + LDT(4, val); + break; + case 5: + LDT(5, val); + break; + case 6: + LDT(6, val); + break; + case 7: + LDT(7, val); + break; + case 8: + LDT(8, val); + break; + case 9: + LDT(9, val); + break; + case 10: + LDT(10, val); + break; + case 11: + LDT(11, val); + break; + case 12: + LDT(12, val); + break; + case 13: + LDT(13, val); + break; + case 14: + LDT(14, val); + break; + case 15: + LDT(15, val); + break; + case 16: + LDT(16, val); + break; + case 17: + LDT(17, val); + break; + case 18: + LDT(18, val); + break; + case 19: + LDT(19, val); + break; + case 20: + LDT(20, val); + break; + case 21: + LDT(21, val); + break; + case 22: + LDT(22, val); + break; + case 23: + LDT(23, val); + break; + case 24: + LDT(24, val); + break; + case 25: + LDT(25, val); + break; + case 26: + LDT(26, val); + break; + case 27: + LDT(27, val); + break; + case 28: + LDT(28, val); + break; + case 29: + LDT(29, val); + break; + case 30: + LDT(30, val); + break; + case 31: + LDT(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg); + +unsigned long sw64_read_fp_reg_s(unsigned long reg) +{ + unsigned long val; + + switch (reg) { + case 0: + STS(0, val); + break; + case 1: + STS(1, val); + break; + case 2: + STS(2, val); + break; + case 3: + STS(3, val); + break; + case 4: + STS(4, val); + break; + case 5: + STS(5, val); + break; + case 6: + STS(6, val); + break; + case 7: + STS(7, val); + break; + case 8: + STS(8, val); + break; + case 9: + STS(9, val); + break; + case 10: + STS(10, val); + break; + case 11: + STS(11, val); + break; + case 12: + STS(12, val); + break; + case 13: + STS(13, val); + break; + case 14: + STS(14, val); + break; + case 15: + STS(15, val); + break; + case 16: + STS(16, val); + break; + case 17: + STS(17, val); + break; + case 18: + STS(18, val); + break; + case 19: + STS(19, val); + break; + case 20: + STS(20, val); + break; + case 21: + STS(21, val); + break; + case 22: + STS(22, val); + break; + case 23: + STS(23, val); + break; + case 24: + STS(24, val); + break; + case 25: + STS(25, val); + break; + case 26: + STS(26, val); + break; + case 27: + STS(27, val); + break; + case 28: + STS(28, val); + break; + case 29: + STS(29, val); + break; + case 30: + STS(30, val); + break; + case 31: + STS(31, val); + break; + default: + return 0; + } + + return val; +} +EXPORT_SYMBOL(sw64_read_fp_reg_s); + +void sw64_write_fp_reg_s(unsigned long reg, unsigned long val) +{ + switch (reg) { + case 0: + LDS(0, val); + break; + case 1: + LDS(1, val); + break; + case 2: + LDS(2, val); + break; + case 3: + LDS(3, val); + break; + case 4: + LDS(4, val); + break; + case 5: + LDS(5, val); + break; + case 6: + LDS(6, val); + break; + case 7: + LDS(7, val); + break; + case 8: + LDS(8, val); + break; + case 9: + LDS(9, val); + break; + case 10: + LDS(10, val); + break; + case 11: + LDS(11, val); + break; + case 12: + LDS(12, val); + break; + case 13: + LDS(13, val); + break; + case 14: + LDS(14, val); + break; + case 15: + LDS(15, val); + break; + case 16: + LDS(16, val); + break; + case 17: + LDS(17, val); + break; + case 18: + LDS(18, val); + break; + case 19: + LDS(19, val); + break; + case 20: + LDS(20, val); + break; + case 21: + LDS(21, val); + break; + case 22: + LDS(22, val); + break; + case 23: + LDS(23, val); + break; + case 24: + LDS(24, val); + break; + case 25: + LDS(25, val); + break; + case 26: + LDS(26, val); + break; + case 27: + LDS(27, val); + break; + case 28: + LDS(28, val); + break; + case 29: + LDS(29, val); + break; + case 30: + LDS(30, val); + break; + case 31: + LDS(31, val); + break; + } +} +EXPORT_SYMBOL(sw64_write_fp_reg_s); diff --git a/arch/sw_64/lib/iomap.c b/arch/sw_64/lib/iomap.c new file mode 100644 index 0000000000000000000000000000000000000000..d9c66a89131e4028ced9454d27c94dc5aae388f9 --- /dev/null +++ b/arch/sw_64/lib/iomap.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Sw_64 IO and memory functions. + */ + +#include + +#include +#include + +/* + * Here comes the sw64 implementation of the IOMAP interfaces. + */ +unsigned int ioread8(const void __iomem *addr) +{ + return readb(addr); +} +EXPORT_SYMBOL(ioread8); + +unsigned int ioread16(const void __iomem *addr) +{ + return readw(addr); +} +EXPORT_SYMBOL(ioread16); + +unsigned int ioread32(const void __iomem *addr) +{ + return readl(addr); +} +EXPORT_SYMBOL(ioread32); + +void iowrite8(u8 b, void __iomem *addr) +{ + writeb(b, addr); +} +EXPORT_SYMBOL(iowrite8); + +void iowrite16(u16 b, void __iomem *addr) +{ + writew(b, addr); +} +EXPORT_SYMBOL(iowrite16); + +void iowrite32(u32 b, void __iomem *addr) +{ + writel(b, addr); +} +EXPORT_SYMBOL(iowrite32); + +u8 inb(unsigned long port) +{ + return ioread8(ioport_map(port, 1)); +} +EXPORT_SYMBOL(inb); + +u16 inw(unsigned long port) +{ + return ioread16(ioport_map(port, 2)); +} +EXPORT_SYMBOL(inw); + +u32 inl(unsigned long port) +{ + return ioread32(ioport_map(port, 4)); +} +EXPORT_SYMBOL(inl); + +void outb(u8 b, unsigned long port) +{ + iowrite8(b, ioport_map(port, 1)); +} +EXPORT_SYMBOL(outb); + +void outw(u16 b, unsigned long port) +{ + iowrite16(b, ioport_map(port, 2)); +} +EXPORT_SYMBOL(outw); + +void outl(u32 b, unsigned long port) +{ + iowrite32(b, ioport_map(port, 4)); +} +EXPORT_SYMBOL(outl); + + +/* + * Read COUNT 8-bit bytes from port PORT into memory starting at SRC. + */ +void ioread8_rep(const void __iomem *port, void *dst, unsigned long count) +{ + while ((unsigned long)dst & 0x3) { + if (!count) + return; + count--; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } + + while (count >= 4) { + unsigned int w; + + count -= 4; + w = ioread8(port); + w |= ioread8(port) << 8; + w |= ioread8(port) << 16; + w |= ioread8(port) << 24; + *(unsigned int *)dst = w; + dst += 4; + } + + while (count) { + --count; + *(unsigned char *)dst = ioread8(port); + dst += 1; + } +} +EXPORT_SYMBOL(ioread8_rep); + +void insb(unsigned long port, void *dst, unsigned long count) +{ + ioread8_rep(ioport_map(port, 1), dst, count); +} +EXPORT_SYMBOL(insb); + +/* + * Read COUNT 16-bit words from port PORT into memory starting at + * SRC. SRC must be at least short aligned. This is used by the + * IDE driver to read disk sectors. Performance is important, but + * the interfaces seems to be slow: just using the inlined version + * of the inw() breaks things. + */ +void ioread16_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)dst & 0x1); + count--; + *(unsigned short *)dst = ioread16(port); + dst += 2; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = ioread16(port); + w |= ioread16(port) << 16; + *(unsigned int *)dst = w; + dst += 4; + } + + if (count) + *(unsigned short *)dst = ioread16(port); +} +EXPORT_SYMBOL(ioread16_rep); + +void insw(unsigned long port, void *dst, unsigned long count) +{ + ioread16_rep(ioport_map(port, 2), dst, count); +} +EXPORT_SYMBOL(insw); + + +/* + * Read COUNT 32-bit words from port PORT into memory starting at + * SRC. Now works with any alignment in SRC. Performance is important, + * but the interfaces seems to be slow: just using the inlined version + * of the inl() breaks things. + */ +void ioread32_rep(const void __iomem *port, void *dst, unsigned long count) +{ + if (unlikely((unsigned long)dst & 0x3)) { + while (count--) { + struct S { int x __packed; }; + ((struct S *)dst)->x = ioread32(port); + dst += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + *(unsigned int *)dst = ioread32(port); + dst += 4; + } + } +} +EXPORT_SYMBOL(ioread32_rep); + +void insl(unsigned long port, void *dst, unsigned long count) +{ + ioread32_rep(ioport_map(port, 4), dst, count); +} +EXPORT_SYMBOL(insl); + + +/* + * Like insb but in the opposite direction. + * Don't worry as much about doing aligned memory transfers: + * doing byte reads the "slow" way isn't nearly as slow as + * doing byte writes the slow way (no r-m-w cycle). + */ +void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count) +{ + const unsigned char *src = xsrc; + + while (count--) + iowrite8(*src++, port); +} +EXPORT_SYMBOL(iowrite8_rep); + +void outsb(unsigned long port, const void *src, unsigned long count) +{ + iowrite8_rep(ioport_map(port, 1), src, count); +} +EXPORT_SYMBOL(outsb); + + +/* + * Like insw but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Performance is important, but the + * interfaces seems to be slow: just using the inlined version of the + * outw() breaks things. + */ +void iowrite16_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + if (!count) + return; + BUG_ON((unsigned long)src & 0x1); + iowrite16(*(unsigned short *)src, port); + src += 2; + --count; + } + + while (count >= 2) { + unsigned int w; + + count -= 2; + w = *(unsigned int *)src; + src += 4; + iowrite16(w >> 0, port); + iowrite16(w >> 16, port); + } + + if (count) + iowrite16(*(unsigned short *)src, port); +} +EXPORT_SYMBOL(iowrite16_rep); + +void outsw(unsigned long port, const void *src, unsigned long count) +{ + iowrite16_rep(ioport_map(port, 2), src, count); +} +EXPORT_SYMBOL(outsw); + + +/* + * Like insl but in the opposite direction. This is used by the IDE + * driver to write disk sectors. Works with any alignment in SRC. + * Performance is important, but the interfaces seems to be slow: + * just using the inlined version of the outl() breaks things. + */ +void iowrite32_rep(void __iomem *port, const void *src, unsigned long count) +{ + if (unlikely((unsigned long)src & 0x3)) { + while (count--) { + struct S { int x __packed; }; + iowrite32(((struct S *)src)->x, port); + src += 4; + } + } else { + /* Buffer 32-bit aligned. */ + while (count--) { + iowrite32(*(unsigned int *)src, port); + src += 4; + } + } +} +EXPORT_SYMBOL(iowrite32_rep); + +void outsl(unsigned long port, const void *src, unsigned long count) +{ + iowrite32_rep(ioport_map(port, 4), src, count); +} +EXPORT_SYMBOL(outsl); + + +/* + * Copy data from IO memory space to "real" memory space. + * This needs to be optimized. + */ +void memcpy_fromio(void *to, const volatile void __iomem *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + *(u64 *)to = __raw_readq(from); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + *(u32 *)to = __raw_readl(from); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + *(u16 *)to = __raw_readw(from); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + *(u8 *) to = __raw_readb(from); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_fromio); + + +/* + * Copy data from "real" memory space to IO memory space. + * This needs to be optimized. + */ +void memcpy_toio(volatile void __iomem *to, const void *from, long count) +{ + /* + * Optimize co-aligned transfers. Everything else gets handled + * a byte at a time. + * FIXME -- align FROM. + */ + + if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) { + count -= 8; + do { + __raw_writeq(*(const u64 *)from, to); + count -= 8; + to += 8; + from += 8; + } while (count >= 0); + count += 8; + } + + if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) { + count -= 4; + do { + __raw_writel(*(const u32 *)from, to); + count -= 4; + to += 4; + from += 4; + } while (count >= 0); + count += 4; + } + + if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) { + count -= 2; + do { + __raw_writew(*(const u16 *)from, to); + count -= 2; + to += 2; + from += 2; + } while (count >= 0); + count += 2; + } + + while (count > 0) { + __raw_writeb(*(const u8 *) from, to); + count--; + to++; + from++; + } + mb(); +} +EXPORT_SYMBOL(memcpy_toio); + + +/* + * "memset" on IO memory space. + */ +void _memset_c_io(volatile void __iomem *to, unsigned long c, long count) +{ + /* Handle any initial odd byte */ + if (count > 0 && ((u64)to & 1)) { + __raw_writeb(c, to); + to++; + count--; + } + + /* Handle any initial odd halfword */ + if (count >= 2 && ((u64)to & 2)) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* Handle any initial odd word */ + if (count >= 4 && ((u64)to & 4)) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* + * Handle all full-sized quadwords: we're aligned + * (or have a small count) + */ + count -= 8; + if (count >= 0) { + do { + __raw_writeq(c, to); + to += 8; + count -= 8; + } while (count >= 0); + } + count += 8; + + /* The tail is word-aligned if we still have count >= 4 */ + if (count >= 4) { + __raw_writel(c, to); + to += 4; + count -= 4; + } + + /* The tail is half-word aligned if we have count >= 2 */ + if (count >= 2) { + __raw_writew(c, to); + to += 2; + count -= 2; + } + + /* And finally, one last byte.. */ + if (count) + __raw_writeb(c, to); + mb(); +} +EXPORT_SYMBOL(_memset_c_io); + +void __iomem *ioport_map(unsigned long port, unsigned int size) +{ + unsigned long io_offset; + + if (port < 0x100000) { + io_offset = is_in_host() ? LPC_LEGACY_IO : PCI_VT_LEGACY_IO; + port = port | io_offset; + } + + return __va(port); +} +EXPORT_SYMBOL(ioport_map); + +void ioport_unmap(void __iomem *addr) +{ +} +EXPORT_SYMBOL(ioport_unmap); diff --git a/arch/sw_64/lib/iomap_copy.c b/arch/sw_64/lib/iomap_copy.c new file mode 100644 index 0000000000000000000000000000000000000000..1c75bd602d7e7fcd01591cdcbdd73b8e6d258aec --- /dev/null +++ b/arch/sw_64/lib/iomap_copy.c @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +/** + * __iowrite32_copy - copy data to MMIO space, in 32-bit units + * @to: destination, in MMIO space (must be 32-bit aligned) + * @from: source (must be 32-bit aligned) + * @count: number of 32-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite32_copy(void __iomem *to, + const void *from, + size_t count) +{ + u32 __iomem *dst = to; + const u32 *src = from; + const u32 *end = src + count; + + while (src < end) { + __raw_writel(*src++, dst++); + mb(); + } + +} + +/** + * __iowrite64_copy - copy data to MMIO space, in 64-bit or 32-bit units + * @to: destination, in MMIO space (must be 64-bit aligned) + * @from: source (must be 64-bit aligned) + * @count: number of 64-bit quantities to copy + * + * Copy data from kernel space to MMIO space, in units of 32 or 64 bits at a + * time. Order of access is not guaranteed, nor is a memory barrier + * performed afterwards. + */ +void __iowrite64_copy(void __iomem *to, + const void *from, + size_t count) +{ + u64 __iomem *dst = to; + const u64 *src = from; + const u64 *end = src + count; + + while (src < end) { + __raw_writeq(*src++, dst++); + mb(); + } +} diff --git a/arch/sw_64/lib/memcpy.S b/arch/sw_64/lib/memcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..31c422b393eeb4b8795758540b8f6a8735121d4d --- /dev/null +++ b/arch/sw_64/lib/memcpy.S @@ -0,0 +1,201 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Reasonably optimized memcpy() routine for the sw64 + * + * - memory accessed as aligned quadwords only + * - uses bcmpge to compare 8 bytes in parallel + * + * Temp usage notes: + * $1, $2, - scratch + */ +#include + .set noreorder + .set noat + + .align 4 + .globl memcpy + .ent memcpy +memcpy: + .frame $30, 0, $26, 0 + .prologue 0 + + mov $16, $0 + ble $18, $nomoredata + xor $16, $17, $1 + and $1, 7, $1 + + bne $1, $misaligned + /* source and dest are same mod 8 address */ + and $16, 7, $1 + beq $1, $both_0mod8 + + /* + * source and dest are same misalignment. move a byte at a time + * until a 0mod8 alignment for both is reached. + * At least one byte more to move + */ + +$head_align: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + stb $1, 0($16) + addl $16, 1, $16 + and $16, 7, $1 + ble $18, $nomoredata + bne $1, $head_align + +$both_0mod8: + cmple $18, 127, $1 + bne $1, $no_unroll + and $16, 63, $1 + beq $1, $do_unroll + +$single_head_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + and $16, 63, $1 + bne $1, $single_head_quad + +$do_unroll: + addl $16, 64, $7 + cmple $18, 127, $1 + bne $1, $tail_quads + +$unroll_body: + #wh64 ($7) + fillde 0($7) + + ldl $6, 0($17) + + ldl $4, 8($17) + ldl $5, 16($17) + addl $7, 64, $7 + + ldl $3, 24($17) + addl $16, 64, $1 + + addl $17, 32, $17 + stl $6, 0($16) + + stl $4, 8($16) + stl $5, 16($16) + subl $18, 192, $2 + + stl $3, 24($16) + addl $16, 32, $16 + + ldl $6, 0($17) + ldl $4, 8($17) + #cmovlt $2, $1, $7 + sellt $2, $1, $7, $7 + + ldl $5, 16($17) + ldl $3, 24($17) + addl $16, 32, $16 + subl $18, 64, $18 + + addl $17, 32, $17 + stl $6, -32($16) + stl $4, -24($16) + cmple $18, 63, $1 + + stl $5, -16($16) + stl $3, -8($16) + beq $1, $unroll_body + +$tail_quads: +$no_unroll: + .align 4 + subl $18, 8, $18 + blt $18, $less_than_8 + +$move_a_quad: + ldl $1, 0($17) + subl $18, 8, $18 + addl $17, 8, $17 + + stl $1, 0($16) + addl $16, 8, $16 + bge $18, $move_a_quad + +$less_than_8: + .align 4 + addl $18, 8, $18 + ble $18, $nomoredata + + /* Trailing bytes */ +$tail_bytes: + subl $18, 1, $18 + ldbu $1, 0($17) + addl $17, 1, $17 + + stb $1, 0($16) + addl $16, 1, $16 + bgt $18, $tail_bytes + + /* branching to exit takes 3 extra cycles, so replicate exit here */ + ret $31, ($26), 1 + +$misaligned: + mov $0, $4 + and $0, 7, $1 + beq $1, $dest_0mod8 + +$aligndest: + ble $18, $nomoredata + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + and $4, 7, $1 + bne $1, $aligndest + + /* Source has unknown alignment, but dest is known to be 0mod8 */ +$dest_0mod8: + subl $18, 8, $18 + blt $18, $misalign_tail + ldl_u $3, 0($17) + +$mis_quad: + ldl_u $16, 8($17) + extll $3, $17, $3 + exthl $16, $17, $1 + bis $3, $1, $1 + + subl $18, 8, $18 + addl $17, 8, $17 + stl $1, 0($4) + mov $16, $3 + + addl $4, 8, $4 + bge $18, $mis_quad + +$misalign_tail: + addl $18, 8, $18 + ble $18, $nomoredata + +$misalign_byte: + ldbu $1, 0($17) + subl $18, 1, $18 + addl $17, 1, $17 + + stb $1, 0($4) + addl $4, 1, $4 + bgt $18, $misalign_byte + + +$nomoredata: + ret $31, ($26), 1 + + .end memcpy + EXPORT_SYMBOL(memcpy) +/* For backwards module compatibility. */ +__memcpy = memcpy +.globl __memcpy diff --git a/arch/sw_64/lib/memmove.S b/arch/sw_64/lib/memmove.S new file mode 100644 index 0000000000000000000000000000000000000000..3e34fcd5b217fb2891cabfa99d3cef9139c79a1c --- /dev/null +++ b/arch/sw_64/lib/memmove.S @@ -0,0 +1,148 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Barely optimized memmove routine for sw64. + * This is hand-massaged output from the original memcpy.c. We defer to + * memcpy whenever possible; the backwards copy loops are not unrolled. + */ +#include + .set noat + .set noreorder + .text + + .align 4 + .globl memmove + .ent memmove +memmove: + ldgp $29, 0($27) + unop + .prologue 1 + + addl $16, $18, $4 + addl $17, $18, $5 + cmpule $4, $17, $1 # dest + n <= src + cmpule $5, $16, $2 # dest >= src + n + + bis $1, $2, $1 + mov $16, $0 + xor $16, $17, $2 + bne $1, memcpy # samegp + + and $2, 7, $2 # Test for src/dest co-alignment. + and $16, 7, $1 + cmpule $16, $17, $3 + bne $3, $memmove_up # dest < src + + and $4, 7, $1 + bne $2, $misaligned_dn + unop + beq $1, $skip_aligned_byte_loop_head_dn + +$aligned_byte_loop_head_dn: + ldi $4, -1($4) + ldi $5, -1($5) + unop + ble $18, $egress + + ldbu $1, 0($5) + ldi $18, -1($18) + stb $1, 0($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_dn + +$skip_aligned_byte_loop_head_dn: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_dn + +$aligned_word_loop_dn: + ldl $1, -8($5) + ldi $5, -8($5) + ldi $18, -8($18) + + stl $1, -8($4) + ldi $4, -8($4) + bge $18, $aligned_word_loop_dn + +$skip_aligned_word_loop_dn: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_dn + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_dn: + fnop + unop + beq $18, $egress + +$byte_loop_tail_dn: + ldbu $1, -1($5) + ldi $5, -1($5) + ldi $4, -1($4) + + ldi $18, -1($18) + stb $1, 0($4) + + bgt $18, $byte_loop_tail_dn + br $egress + +$memmove_up: + mov $16, $4 + mov $17, $5 + bne $2, $misaligned_up + beq $1, $skip_aligned_byte_loop_head_up + +$aligned_byte_loop_head_up: + unop + ble $18, $egress + ldbu $1, 0($5) + + ldi $18, -1($18) + + ldi $5, 1($5) + stb $1, 0($4) + ldi $4, 1($4) + + and $4, 7, $6 + bne $6, $aligned_byte_loop_head_up + +$skip_aligned_byte_loop_head_up: + ldi $18, -8($18) + blt $18, $skip_aligned_word_loop_up + +$aligned_word_loop_up: + ldl $1, 0($5) + ldi $5, 8($5) + ldi $18, -8($18) + + stl $1, 0($4) + ldi $4, 8($4) + bge $18, $aligned_word_loop_up + +$skip_aligned_word_loop_up: + ldi $18, 8($18) + bgt $18, $byte_loop_tail_up + unop + ret $31, ($26), 1 + + .align 4 +$misaligned_up: + fnop + unop + beq $18, $egress + +$byte_loop_tail_up: + ldbu $1, 0($5) + ldi $18, -1($18) + + stb $1, 0($4) + + ldi $5, 1($5) + ldi $4, 1($4) + bgt $18, $byte_loop_tail_up + +$egress: + ret $31, ($26), 1 + + .end memmove + EXPORT_SYMBOL(memmove) diff --git a/arch/sw_64/lib/memset.S b/arch/sw_64/lib/memset.S new file mode 100644 index 0000000000000000000000000000000000000000..dbc4d775c7ea83d140ea7121ddefd46ca047a604 --- /dev/null +++ b/arch/sw_64/lib/memset.S @@ -0,0 +1,153 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This is an efficient (and small) implementation of the C library "memset()" + * function for the sw. + * + * (C) Copyright 1996 Linus Torvalds + * + * This routine is "moral-ware": you are free to use it any way you wish, and + * the only obligation I put on you is a moral one: if you make any improvements + * to the routine, please send me your improvements for me to use similarly. + * + * The scheduling comments are according to the documentation (and done by + * hand, so they might well be incorrect, please do tell me about it..) + */ + +#include + + .set noat + .set noreorder +.text + .globl memset + .globl __memset + .globl ___memset + .globl __memsetw + .globl __constant_c_memset + + .ent ___memset +.align 5 +___memset: + .frame $30, 0, $26, 0 + .prologue 0 + + and $17, 255, $1 + inslb $17, 1, $17 + bis $17, $1, $17 + sll $17, 16, $1 + + bis $17, $1, $17 + sll $17, 32, $1 + bis $17, $1, $17 + ldl_u $31, 0($30) + +.align 5 +__constant_c_memset: + addl $18, $16, $6 + bis $16, $16, $0 + xor $16, $6, $1 + ble $18, end + + bic $1, 7, $1 + beq $1, within_one_quad + and $16, 7, $3 + beq $3, aligned + + bis $16, $16, $5 + subl $3, 8, $3 + addl $18, $3, $18 + subl $16, $3, $16 + + eqv $3, $31, $3 + addl $3, 1, $3 +unaligned_start_loop: + stb $17, 0($5) + subl $3, 1, $3 + addl $5, 1, $5 + bgt $3, unaligned_start_loop + + +.align 4 +aligned: + sra $18, 3, $3 + and $18, 7, $18 + bis $16, $16, $5 + beq $3, no_quad + +/*added by JJ*/ + ldi $3, -8($3) + blt $3, nounrol + +.align 3 +wloop: + fillde 256($5) + stl $17, 0($5) + stl $17, 8($5) + stl $17, 16($5) + stl $17, 24($5) + subl $3, 8, $3 + stl $17, 32($5) + stl $17, 40($5) + stl $17, 48($5) + stl $17, 56($5) + addl $5, 0x40, $5 + bge $3, wloop + +nounrol: + addl $3, 8, $3 + beq $3, no_quad +/*end JJ*/ + +.align 3 +loop: + stl $17, 0($5) + subl $3, 1, $3 + addl $5, 8, $5 + bne $3, loop + +no_quad: + bis $31, $31, $31 + beq $18, end + and $6, 7, $6 +no_quad_loop: + stb $17, 0($5) + subl $6, 1, $6 + addl $5, 1, $5 + bgt $6, no_quad_loop + ret $31, ($26), 1 + +.align 3 +within_one_quad: + bis $18, $18, $1 + bis $16, $16, $5 +within_one_quad_loop: + stb $17, 0($5) + subl $1, 1, $1 + addl $5, 1, $5 + bgt $1, within_one_quad_loop + +end: + ret $31, ($26), 1 + .end ___memset + EXPORT_SYMBOL(___memset) + + .align 5 + .ent __memsetw +__memsetw: + .prologue 0 + + inslh $17, 0, $1 + inslh $17, 2, $2 + inslh $17, 4, $3 + or $1, $2, $1 + inslh $17, 6, $4 + or $1, $3, $1 + or $1, $4, $17 + br __constant_c_memset + + .end __memsetw + EXPORT_SYMBOL(__memsetw) + +memset = ___memset +EXPORT_SYMBOL(memset) +__memset = ___memset +EXPORT_SYMBOL(__memset) diff --git a/arch/sw_64/lib/strcpy.S b/arch/sw_64/lib/strcpy.S new file mode 100644 index 0000000000000000000000000000000000000000..61b6141f88e2393e28d0388ae726210a2d9cb2a1 --- /dev/null +++ b/arch/sw_64/lib/strcpy.S @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strcpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a null-terminated string from SRC to DST. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strcpy + .ent strcpy +strcpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + bis $31, $31, $6 + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + stl $4, 0($16) + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + beq $4, $out # we have reached null, return + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +/* copy is done, return */ +$out: + ret + + .end strcpy + EXPORT_SYMBOL(strcpy) diff --git a/arch/sw_64/lib/strncpy.S b/arch/sw_64/lib/strncpy.S new file mode 100644 index 0000000000000000000000000000000000000000..f50c70599bb4685518ed68379845d9b298af105d --- /dev/null +++ b/arch/sw_64/lib/strncpy.S @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Optimized strncpy() for SW64 + + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * Copy a string from SRC to DST. At most SIZE bytes are coppied. + * + * Input: + * $16: DST, clobbered + * $17: SRC, clobbered + * $18: SIZE, clobbered + * + * Output: + * $0: DST + * + * Temporaries: + * $1: unaligned parts of addr (0 means aligned addr) + * $4: current data to copy (could have 1 byte or 8 bytes) + * $5: parts of current data, compare result + * $6: number of bytes left to copy in head + * + * Tag naming: + * co: SRC and DST are co-aligned + * mis: SRC and DST are not co-aligned + * a: SRC or DST has aligned address + * una: SRC or DST has unaligned address + * + */ + +#include + + .text + .align 4 + .globl strncpy + .ent strncpy +strncpy: + .frame $30, 0, $26 + .prologue 0 + + bis $31, $16, $0 # set return value + beq $18, $out # return if size is 0 + cmplt $18, 8, $5 # size less than 8, do 1-byte copy + bne $5, $tail_loop + + xor $16, $17, $1 + and $1, 7, $1 + bne $1, $mis_aligned + +/* src and dst are co-aligned */ + and $16, 7, $1 + bne $1, $co_una_head + +/* do the copy in loop, for (co)-aligned src and dst with (a)ligned addr */ +$co_a_loop: + ldl $4, 0($17) + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $18, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $co_a_loop + +/* src and dst are co-aligned but have unaligned address */ +$co_una_head: + ldl_u $4, 0($17) + extll $4, $16, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + ldi $6, 8($31) + subl $6, $1, $6 + addl $17, $6, $17 # prepare addr of middle part + subl $18, $6, $18 # sub bytes going to be copy + +/* copy the unaligned part in loop */ +$co_una_head_loop: + stb $4, 0($16) + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $co_a_loop + addl $4, 1, $4 + br $31, $co_una_head_loop + +/* src and dst are not co-aligned */ +$mis_aligned: + and $16, 7, $1 + beq $1, $mis_a_dst + +$mis_una_head: + ldi $6, 8($31) + subl $6, $1, $6 + +/* copy the first few bytes to make dst aligned */ +$mis_una_head_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + subl $6, 1, $6 + beq $6, $mis_a_dst + br $31, $mis_una_head_loop + +/* dst has aligned addr */ +$mis_a_dst: + and $17, 7, $1 + +$mis_a_dst_loop: + ldl_u $4, 0($17) + ldl_u $5, 7($17) + extll $4, $1, $4 + exthl $5, $1, $5 + bis $4, $5, $4 + cmpgeb $31, $4, $5 + bne $5, $tail_loop # we find null + subl $18, 8, $5 + blt $5, $tail_loop # we have fewer than 8 bytes to copy + stl $4, 0($16) + subl $18, 8, $18 + beq $5, $out + addl $17, 8, $17 + addl $16, 8, $16 + br $31, $mis_a_dst_loop + +/* we have find null in the last few bytes, copy one byte each time */ +$tail_loop: + ldbu $4, 0($17) + stb $4, 0($16) + subl $18, 1, $18 + beq $18, $out + beq $4, $null_padding # we have reached null + addl $17, 1, $17 + addl $16, 1, $16 + br $31, $tail_loop + +$null_padding: + addl $16, 1, $16 + subl $18, 1, $18 + stb $31, 0($16) + beq $18, $out + br $31, $null_padding + +/* copy is done, return */ +$out: + ret + + .end strncpy + EXPORT_SYMBOL(strncpy) diff --git a/arch/sw_64/lib/uaccess_flushcache.c b/arch/sw_64/lib/uaccess_flushcache.c new file mode 100644 index 0000000000000000000000000000000000000000..353d5ac152481743a47776666ae6132626da5236 --- /dev/null +++ b/arch/sw_64/lib/uaccess_flushcache.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include + +void memcpy_flushcache(void *dst, const void *src, size_t cnt) +{ + memcpy(dst, src, cnt); + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(memcpy_flushcache); + +void memcpy_page_flushcache(char *to, struct page *page, size_t offset, + size_t len) +{ + memcpy_flushcache(to, page_address(page) + offset, len); +} + +unsigned long __copy_user_flushcache(void *to, const void __user *from, + unsigned long n) +{ + unsigned long rc = __copy_from_user(to, from, n); + + flush_cache_all(); + return rc; +} + +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + flush_cache_all(); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); +#endif diff --git a/arch/sw_64/lib/udelay.c b/arch/sw_64/lib/udelay.c new file mode 100644 index 0000000000000000000000000000000000000000..59ca8a97d748895a49e4fbaf83a85eee99f0d459 --- /dev/null +++ b/arch/sw_64/lib/udelay.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1993, 2000 Linus Torvalds + * + * Delay routines, using a pre-computed "loops_per_jiffy" value. + */ + +#include + +/* + * Use only for very small delays (< 1 msec). + * + * The active part of our cycle counter is only 32-bits wide, and + * we're treating the difference between two marks as signed. On + * a 1GHz box, that's about 2 seconds. + */ +void __delay(unsigned long loops) +{ + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(__delay); + +void udelay(unsigned long usecs) +{ + unsigned long loops = usecs * get_cpu_freq() / 1000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(udelay); + +void ndelay(unsigned long nsecs) +{ + unsigned long loops = nsecs * get_cpu_freq() / 1000000000; + unsigned long tmp; + + __asm__ __volatile__( + " rtc %0\n" + " addl %1,%0,%1\n" + "1: rtc %0\n" + " subl %1,%0,%0\n" + " bgt %0,1b" + : "=&r" (tmp), "=r" (loops) : "1"(loops)); +} +EXPORT_SYMBOL(ndelay); diff --git a/arch/sw_64/math-emu/Makefile b/arch/sw_64/math-emu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..72e750d138e6ce9281b24b1e2c84e8907798d421 --- /dev/null +++ b/arch/sw_64/math-emu/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the FPU instruction emulation. +# + +ccflags-y := -w + +obj-$(CONFIG_MATHEMU) += math-emu.o + +math-emu-objs := math.o qrnnd.o diff --git a/arch/sw_64/math-emu/math.c b/arch/sw_64/math-emu/math.c new file mode 100644 index 0000000000000000000000000000000000000000..b578752f0730481eef0d08e28c4cff53350de228 --- /dev/null +++ b/arch/sw_64/math-emu/math.c @@ -0,0 +1,2255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Modify History + * + * who when what + * --- ---- ---- + * stone 2004-09-02 Add SIMD floating emulation code + * fire3 2008-12-27 Add SIMD floating emulation code for SW64 + */ + +#include + +#include + +#include "sfp-util.h" + +#include +#include +#include + +/* + * This is for sw64 + */ + +#define IEEE_E_STATUS_MASK IEEE_STATUS_MASK +#define IEEE_E_STATUS_TO_EXCSUM_SHIFT 0 +#define SW64_FP_DENOMAL 1 /* A denormal data */ +#define SW64_FP_NORMAL 0 /* A denormal data */ +#define SW64_FP_NAN 2 + +#define SW64_FP_NAN_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 255: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + +#define SW64_FP_NAN_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 2047: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_NAN; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + + + +#define SW64_FP_NORMAL_S(X, val) \ +do { \ + union _FP_UNION_S *_flo = \ + (union _FP_UNION_S *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +#define SW64_FP_NORMAL_D(X, val) \ +do { \ + union _FP_UNION_D *_flo = \ + (union _FP_UNION_D *)(val); \ + \ + X##_f = _flo->bits.frac; \ + X##_e = _flo->bits.exp; \ + X##_s = _flo->bits.sign; \ + \ + switch (X##_e) { \ + case 0: \ + if (_FP_FRAC_ZEROP_1(X)) \ + X##_c = SW64_FP_NORMAL; \ + else \ + X##_c = SW64_FP_DENOMAL; \ + break; \ + default: \ + X##_c = SW64_FP_NORMAL; \ + break; \ + } \ +} while (0) + +/* Operation Code for SW64 */ +#define OP_SIMD_1 0x1A +#define OP_SIMD_2 0x1B +#define OP_SIMD_MUL_ADD 0x1B +#define OP_SIMD_NORMAL 0x1A +#define OP_MUL_ADD 0x19 + +#define FNC_FMAS 0x0 +#define FNC_FMAD 0x1 +#define FNC_FMSS 0x2 +#define FNC_FMSD 0x3 +#define FNC_FNMAS 0x4 +#define FNC_FNMAD 0x5 +#define FNC_FNMSS 0x6 +#define FNC_FNMSD 0x7 + +#define FNC_VADDS 0x80 +#define FNC_VADDD 0x81 +#define FNC_VSUBS 0x82 +#define FNC_VSUBD 0x83 +#define FNC_VMULS 0x84 +#define FNC_VMULD 0x85 +#define FNC_VDIVS 0x86 +#define FNC_VDIVD 0x87 +#define FNC_VSQRTS 0x88 +#define FNC_VSQRTD 0x89 + +#define FNC_VFCMPEQ 0x8c +#define FNC_VFCMPLE 0x8d +#define FNC_VFCMPLT 0x8e +#define FNC_VFCMPUN 0x8f + +#define FNC_VCPYS 0x90 +#define FNC_VCPYSE 0x91 +#define FNC_VCPYSN 0x92 + +#define FNC_VMAS 0x0 +#define FNC_VMAD 0x1 +#define FNC_VMSS 0x2 +#define FNC_VMSD 0x3 +#define FNC_VNMAS 0x4 +#define FNC_VNMAD 0x5 +#define FNC_VNMSS 0x6 +#define FNC_VNMSD 0x7 + +long simd_fp_emul_s(unsigned long pc); +long simd_fp_emul_d(unsigned long pc); +long mul_add_fp_emul(unsigned long pc); +long simd_cmp_emul_d(unsigned long pc); + +long simd_mul_add_fp_emul_d(unsigned long pc); +long simd_mul_add_fp_emul_s(unsigned long pc); + +void read_fp_reg_s(unsigned long reg, unsigned long *p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *p1, unsigned long *p2, unsigned long *p3); +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long p1, unsigned long p2, unsigned long p3); +#define LOW_64_WORKING 1 +#define HIGH_64_WORKING 2 + +/* + * End for sw64 + */ + +#define OPC_HMC 0x00 +#define OPC_INTA 0x10 +#define OPC_INTL 0x11 +#define OPC_INTS 0x12 +#define OPC_INTM 0x13 +#define OPC_FLTC 0x14 +#define OPC_FLTV 0x15 +#define OPC_FLTI 0x16 +#define OPC_FLTL 0x17 +#define OPC_MISC 0x18 +#define OPC_JSR 0x1a + +#define FOP_SRC_S 0 +#define FOP_SRC_T 2 +#define FOP_SRC_Q 3 + +#define FOP_FNC_ADDx 0 +#define FOP_FNC_CVTQL 0 +#define FOP_FNC_SUBx 1 +#define FOP_FNC_MULx 2 +#define FOP_FNC_DIVx 3 +#define FOP_FNC_CMPxUN 4 +#define FOP_FNC_CMPxEQ 5 +#define FOP_FNC_CMPxLT 6 +#define FOP_FNC_CMPxLE 7 +#define FOP_FNC_SQRTx 11 +#define FOP_FNC_CVTxS 12 +#define FOP_FNC_CVTxT 14 +#define FOP_FNC_CVTxQ 15 + +/* this is for sw64 added by fire3*/ +#define FOP_FNC_ADDS 0 +#define FOP_FNC_ADDD 1 +#define FOP_FNC_SUBS 2 +#define FOP_FNC_SUBD 3 +#define FOP_FNC_MULS 4 +#define FOP_FNC_MULD 5 +#define FOP_FNC_DIVS 6 +#define FOP_FNC_DIVD 7 +#define FOP_FNC_SQRTS 8 +#define FOP_FNC_SQRTD 9 + +#define FOP_FNC_CMPEQ 0x10 +#define FOP_FNC_CMPLE 0x11 +#define FOP_FNC_CMPLT 0x12 +#define FOP_FNC_CMPUN 0x13 + +#define FOP_FNC_CVTSD 0x20 +#define FOP_FNC_CVTDS 0x21 +#define FOP_FNC_CVTLS 0x2D +#define FOP_FNC_CVTLD 0x2F +#define FOP_FNC_CVTDL 0x27 +#define FOP_FNC_CVTDL_G 0x22 +#define FOP_FNC_CVTDL_P 0x23 +#define FOP_FNC_CVTDL_Z 0x24 +#define FOP_FNC_CVTDL_N 0x25 + +#define FOP_FNC_CVTWL 0x28 +#define FOP_FNC_CVTLW 0x29 + +/* fire3 added end */ + + +#define MISC_TRAPB 0x0000 +#define MISC_EXCB 0x0400 + +extern unsigned long sw64_read_fp_reg(unsigned long reg); +extern void sw64_write_fp_reg(unsigned long reg, unsigned long val); +extern unsigned long sw64_read_fp_reg_s(unsigned long reg); +extern void sw64_write_fp_reg_s(unsigned long reg, unsigned long val); + + +#ifdef MODULE + +MODULE_DESCRIPTION("FP Software completion module"); + +extern long (*sw64_fp_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +extern long (*sw64_fp_emul)(unsigned long pc); + +static long (*save_emul_imprecise)(struct pt_regs *regs, unsigned long write_mask); +static long (*save_emul)(unsigned long pc); + +long do_sw_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask); +long do_sw_fp_emul(unsigned long pc); + +int init_module(void) +{ + save_emul_imprecise = sw64_fp_emul_imprecise; + save_emul = sw64_fp_emul; + sw64_fp_emul_imprecise = do_sw_fp_emul_imprecise; + sw64_fp_emul = do_sw_fp_emul; + return 0; +} + +void cleanup_module(void) +{ + sw64_fp_emul_imprecise = save_emul_imprecise; + sw64_fp_emul = save_emul; +} + +#undef sw64_fp_emul_imprecise +#define sw64_fp_emul_imprecise do_sw_fp_emul_imprecise +#undef sw64_fp_emul +#define sw64_fp_emul do_sw_fp_emul + +#endif /* MODULE */ + + +/* + * Emulate the floating point instruction at address PC. Returns -1 if the + * instruction to be emulated is illegal (such as with the opDEC trap), else + * the SI_CODE for a SIGFPE signal, else 0 if everything's ok. + * + * Notice that the kernel does not and cannot use FP regs. This is good + * because it means that instead of saving/restoring all fp regs, we simply + * stick the result of the operation into the appropriate register. + */ +long sw64_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); + + unsigned long fa, fb, fc, func, mode, mode_bk, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long opcode; + + get_user(insn, (__u32 *)pc); + opcode = (insn >> 26) & 0x3f; + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + pr_debug("======= Entering Floating mathe emulation =====\n"); + pr_debug("Floating math emulation insn = %#lx, opcode=%d, func=%d\n", insn, opcode, func); + pr_debug("SW64 hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("SW64 software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + + if (opcode == OP_SIMD_NORMAL) { /* float simd math */ + if (func == FNC_VADDS || func == FNC_VSUBS || func == FNC_VSQRTS + || func == FNC_VMULS || func == FNC_VDIVS) + si_code = simd_fp_emul_s(pc); + if (func == FNC_VADDD || func == FNC_VSUBD || func == FNC_VSQRTD + || func == FNC_VMULD || func == FNC_VDIVD) + si_code = simd_fp_emul_d(pc); + if (func == FNC_VFCMPUN || func == FNC_VFCMPLT || func == FNC_VFCMPLE + || func == FNC_VFCMPEQ) + si_code = simd_cmp_emul_d(pc); + return si_code; + } + if (opcode == OP_SIMD_MUL_ADD) {/* simd mul and add */ + func = (insn >> 10) & 0x3f; + if (func == FNC_VMAS || func == FNC_VMSS || func == FNC_VNMAS + || func == FNC_VNMSS) { + si_code = simd_mul_add_fp_emul_s(pc); + return si_code; + } + + if (func == FNC_VMAD || func == FNC_VMSD || func == FNC_VNMAD + || func == FNC_VNMSD) { + si_code = simd_mul_add_fp_emul_d(pc); + return si_code; + } + func = (insn >> 5) & 0xff; + } + + if (opcode == OP_MUL_ADD) { + si_code = mul_add_fp_emul(pc); + return si_code; + } + switch (func) { + case FOP_FNC_SUBS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SUB_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_SUBD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SUB_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_ADDS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_ADD_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_ADDD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_ADD_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_MULS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_MUL_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_MULD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_MUL_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_DIVS: + pr_debug("FOP_FNC_DIVS\n"); + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_DIV_S(SR, SA, SB); + goto pack_s; + + case FOP_FNC_DIVD: + pr_debug("FOP_FNC_DIVD\n"); + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_DIV_D(DR, DA, DB); + goto pack_d; + + case FOP_FNC_SQRTS: + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_SQRT_S(SR, SB); + goto pack_s; + case FOP_FNC_SQRTD: + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_SQRT_D(DR, DB); + goto pack_d; + } + + + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + if ((func & ~0xf) == FOP_FNC_CMPEQ) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + switch (func) { + case FOP_FNC_CMPUN: + if (res != 3) + vc = 0; + break; + case FOP_FNC_CMPEQ: + if (res) + vc = 0; + break; + case FOP_FNC_CMPLT: + if (res != -1) + vc = 0; + break; + case FOP_FNC_CMPLE: + if ((long)res > 0) + vc = 0; + break; + } + goto done_d; + } + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + if (func == FOP_FNC_CVTSD) { + vb = sw64_read_fp_reg_s(fb); + FP_UNPACK_SP(SB, &vb); + DR_c = DB_c; + DR_s = DB_s; + DR_e = DB_e + (1024 - 128); + DR_f = SB_f << (52 - 23); + goto pack_d; + } + + if (func == FOP_FNC_CVTDS) { + FP_CONV(S, D, 1, 1, SR, DB); + goto pack_s; + } + + if (func == FOP_FNC_CVTDL || func == FOP_FNC_CVTDL_G || func == FOP_FNC_CVTDL_P + || func == FOP_FNC_CVTDL_Z || func == FOP_FNC_CVTDL_N) { + mode_bk = mode; + if (func == FOP_FNC_CVTDL_Z) + mode = 0x0UL; + else if (func == FOP_FNC_CVTDL_N) + mode = 0x1UL; + else if (func == FOP_FNC_CVTDL_G) + mode = 0x2UL; + else if (func == FOP_FNC_CVTDL_P) + mode = 0x3UL; + + if (DB_c == FP_CLS_NAN && (_FP_FRAC_HIGH_RAW_D(DB) & _FP_QNANBIT_D)) { + /* AAHB Table B-2 says QNaN should not trigger INV */ + vc = 0; + } else + FP_TO_INT_ROUND_D(vc, DB, 64, 2); + mode = mode_bk; + goto done_d; + } + + vb = sw64_read_fp_reg(fb); + + switch (func) { + case FOP_FNC_CVTLW: + /* + * Notice: We can get here only due to an integer + * overflow. Such overflows are reported as invalid + * ops. We return the result the hw would have + * computed. + */ + vc = ((vb & 0xc0000000) << 32 | /* sign and msb */ + (vb & 0x3fffffff) << 29); /* rest of the int */ + FP_SET_EXCEPTION(FP_EX_INVALID); + goto done_d; + + case FOP_FNC_CVTLS: + FP_FROM_INT_S(SR, ((long)vb), 64, long); + goto pack_s; + + case FOP_FNC_CVTLD: + FP_FROM_INT_D(DR, ((long)vb), 64, long); + goto pack_d; + } + goto bad_insn; + + +pack_s: + FP_PACK_SP(&vc, SR); + + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); + sw64_write_fp_reg_s(fc, vc); + goto done; + +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vc = 0; + pr_debug("SW64 Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +done_d: + sw64_write_fp_reg(fc, vc); + goto done; + + /* + * Take the appropriate action for each possible + * floating-point result: + * + * - Set the appropriate bits in the FPCR + * - If the specified exception is enabled in the FPCR, + * return. The caller (entArith) will dispatch + * the appropriate signal to the translated program. + * + * In addition, properly track the exception state in software + * as described in the SW64 Architecture Handbook section 4.7.7.3. + */ +done: + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + pr_debug("SW64 before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long sw64_fp_emul_imprecise(struct pt_regs *regs, unsigned long write_mask) +{ + unsigned long trigger_pc = regs->pc - 4; + unsigned long insn, opcode, rc, si_code = 0; + + + /* + * Turn off the bits corresponding to registers that are the + * target of instructions that set bits in the exception + * summary register. We have some slack doing this because a + * register that is the target of a trapping instruction can + * be written at most once in the trap shadow. + * + * Branches, jumps, TRAPBs, EXCBs and calls to HMcode all + * bound the trap shadow, so we need not look any further than + * up to the first occurrence of such an instruction. + */ + while (write_mask) { + get_user(insn, (__u32 *)(trigger_pc)); + opcode = insn >> 26; + rc = insn & 0x1f; + + switch (opcode) { + case OPC_HMC: + case OPC_JSR: + case 0x30 ... 0x3f: /* branches */ + goto egress; + + case OPC_MISC: + switch (insn & 0xffff) { + case MISC_TRAPB: + case MISC_EXCB: + goto egress; + + default: + break; + } + break; + + case OPC_INTA: + case OPC_INTL: + case OPC_INTS: + case OPC_INTM: + write_mask &= ~(1UL << rc); + break; + + case OPC_FLTC: + case OPC_FLTV: + case OPC_FLTI: + case OPC_FLTL: + write_mask &= ~(1UL << (rc + 32)); + break; + } + if (!write_mask) { + /* Re-execute insns in the trap-shadow. */ + regs->pc = trigger_pc + 4; + si_code = sw64_fp_emul(trigger_pc); + goto egress; + } + trigger_pc -= 4; + } + +egress: + return si_code; +} + +#define WORKING_PART_0 0 +#define WORKING_PART_1 1 +#define WORKING_PART_2 2 +#define WORKING_PART_3 3 + + +/* + * This is for sw64 + */ + +long simd_cmp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD floating-CMP math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + break; + } + pr_debug("Before unpack va:%#lx, vb:%#lx\n", va, vb); + FP_UNPACK_RAW_DP(DA, &va); + FP_UNPACK_RAW_DP(DB, &vb); + pr_debug("DA_e:%d, _FP_FRAC_ZEROP_1(DA):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DB_e:%d, _FP_FRAC_ZEROP_1(DB):%d\n", DA_e, _FP_FRAC_ZEROP_1(DA)); + pr_debug("DA iszero:%d, DB iszero:%d\n", ((!DA_e && _FP_FRAC_ZEROP_1(DA)) ? 1 : 0), + ((!DB_e && _FP_FRAC_ZEROP_1(DB)))); + if (!DA_e && !_FP_FRAC_ZEROP_1(DA)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DA, _FP_ZEROFRAC_1); + } + if (!DB_e && !_FP_FRAC_ZEROP_1(DB)) { + FP_SET_EXCEPTION(FP_EX_DENORM); + if (FP_DENORM_ZERO) + _FP_FRAC_SET_1(DB, _FP_ZEROFRAC_1); + } + FP_CMP_D(res, DA, DB, 3); + vc = 0x4000000000000000; + /* CMPTEQ, CMPTUN don't trap on QNaN, while CMPTLT and CMPTLE do */ + if (res == 3 && (((func == FOP_FNC_CMPLT) || (func == FOP_FNC_CMPLE)) + || FP_ISSIGNAN_D(DA) || FP_ISSIGNAN_D(DB))) { + pr_debug("CMPLT CMPLE:func:%d, trap on QNaN.", func); + FP_SET_EXCEPTION(FP_EX_INVALID); + } + pr_debug("res:%d\n", res); + switch (func) { + case FNC_VFCMPUN: + if (res != 3) + vc = 0; + break; + case FNC_VFCMPEQ: + if (res) + vc = 0; + break; + case FNC_VFCMPLT: + if (res != -1) + vc = 0; + break; + case FNC_VFCMPLE: + if ((long)res > 0) + vc = 0; + break; + } +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR); FP_DECL_D(DC); + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD D-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + if (((DA_c == SW64_FP_NAN) || (DB_c == SW64_FP_NAN))) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + + switch (func) { + case FNC_VSUBD: + pr_debug("FNC_VSUBD\n"); + FP_SUB_D(DR, DA, DB); + goto pack_d; + case FNC_VMULD: + pr_debug("FNC_VMULD\n"); + FP_MUL_D(DR, DA, DB); + goto pack_d; + case FNC_VADDD: + pr_debug("FNC_VADDD\n"); + FP_ADD_D(DR, DA, DB); + goto pack_d; + case FNC_VDIVD: + pr_debug("FNC_VDIVD\n"); + FP_DIV_D(DR, DA, DB); + goto pack_d; + case FNC_VSQRTD: + pr_debug("FNC_VSQRTD\n"); + FP_SQRT_D(DR, DB); + goto pack_d; + } +pack_d: + FP_PACK_DP(&vc, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_d, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR); + + unsigned long fa, fb, fc, func, mode, src; + unsigned long res, va, vb, vc, swcr, fpcr; + __u32 insn; + long si_code; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + get_user(insn, (__u32 *)pc); + fc = (insn >> 0) & 0x1f; /* destination register */ + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 5) & 0xff; + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("======== Entering SIMD S-floating math emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("fa:%#lx,fb:%#lx,fc:%#lx,func:%#lx,mode:%#lx\n", fa, fb, fc, func, mode); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART0: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART1: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART2: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("PART3: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + + switch (func) { + case FNC_VSUBS: + pr_debug("FNC_VSUBS\n"); + FP_SUB_S(SR, SA, SB); + goto pack_s; + case FNC_VMULS: + pr_debug("FNC_VMULS\n"); + FP_MUL_S(SR, SA, SB); + goto pack_s; + case FNC_VADDS: + pr_debug("FNC_VADDS\n"); + FP_ADD_S(SR, SA, SB); + goto pack_s; + case FNC_VDIVS: + pr_debug("FNC_VDIVS\n"); + FP_DIV_S(SR, SA, SB); + goto pack_s; + case FNC_VSQRTS: + pr_debug("FNC_VSQRTS\n"); + FP_SQRT_S(SR, SB); + goto pack_s; + } +pack_s: + FP_PACK_SP(&vc, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) { + pr_debug("pack_s, vc=0 !!!!\n"); + vc = 0; + } + + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vc_p0 = vc; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vc_p1 = vc; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vc_p2 = vc; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vc_p3 = vc; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + pr_debug("fex_p0: fpcr_p0:%#lx\n", fpcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + pr_debug("fex_p1: fpcr_p1:%#lx\n", fpcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + pr_debug("fex_p2: fpcr_p2:%#lx\n", fpcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + pr_debug("fex_p3: fpcr_p3:%#lx\n", fpcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vc_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fc, vc_p0, vc_p1, vc_p2, vc_p3); + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; + +} + +static inline unsigned long negative_value(unsigned long va) +{ + return (va ^ 0x8000000000000000UL); +} + +static inline unsigned long s_negative_value(unsigned long va) +{ + return (va ^ 0x80000000UL); +} + +/* + * sw64 mul-add floating emulation + */ +long mul_add_fp_emul(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_S(S_ZERO); + FP_DECL_D(D_ZERO); + FP_DECL_S(S_TMP2); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + pr_debug("===== Entering SW64 MUL-ADD Emulation =====\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) { + va = sw64_read_fp_reg_s(fa); + vb = sw64_read_fp_reg_s(fb); + vc = sw64_read_fp_reg_s(fc); + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + } + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) { + va = sw64_read_fp_reg(fa); + vb = sw64_read_fp_reg(fb); + vc = sw64_read_fp_reg(fc); + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + } + pr_debug("va = %#lx, vb = %#lx, vc = %#lx\n", va, vb, vc); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + default: + goto bad_insn; + + } +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg_s(fd, vd); + goto done; + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + sw64_write_fp_reg(fd, vd); + +done: + pr_debug("vd = %#lx\n", vd); + if (_fex) { + /* Record exceptions in software control word. */ + swcr |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (_fex << IEEE_STATUS_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr |= ieee_swcr_to_fpcr(swcr); + wrfpcr(fpcr); /** wrfpcr will destroy vector register! */ + if (func == FNC_FMAS || func == FNC_FMSS || func == FNC_FNMAS || func == FNC_FNMSS) + sw64_write_fp_reg_s(fd, vd); + if (func == FNC_FMAD || func == FNC_FMSD || func == FNC_FNMAD || func == FNC_FNMSD) + sw64_write_fp_reg(fd, vd); + + /* Do we generate a signal? */ + _fex = _fex & swcr & IEEE_TRAP_ENABLE_MASK; + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + + return si_code; + } + + /* + * We used to write the destination register here, but DEC FORTRAN + * requires that the result *always* be written... so we do the write + * immediately after the operations above. + */ + + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + + +long simd_mul_add_fp_emul_s(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SC); FP_DECL_S(S_TMP); FP_DECL_S(SR); + FP_DECL_S(S_ZERO); + FP_DECL_S(S_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD S-floating mul-add emulation =======\n"); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + pr_debug("hardware fpcr = %#lx\n", fpcr); + read_fp_reg_s(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_s(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_s(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_s(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + pr_debug("FPCR_STATUS_MASK0 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + pr_debug("FPCR_STATUS_MASK1 : %#lx, fpcr :%#lx\n", FPCR_STATUS_MASK0, fpcr); + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_S(SA, &va); + SW64_FP_NORMAL_S(SB, &vb); + SW64_FP_NORMAL_S(SC, &vc); + if ((SA_c == SW64_FP_NORMAL) && (SB_c == SW64_FP_NORMAL) && (SC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: SA_c = %#lx, SB_c = %#lx\n", SA_c, SB_c); + } else { + SW64_FP_NAN_S(SA, &va); + SW64_FP_NAN_S(SB, &vb); + if ((SA_c == SW64_FP_NAN) && (SB_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_SP(SA, &va); + FP_UNPACK_SP(SB, &vb); + FP_UNPACK_SP(SC, &vc); + FP_UNPACK_SP(S_ZERO, &vzero); + switch (func) { + case FNC_FMAS: + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FMSS: + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMAS: /* (-va*vb) + vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_ADD_S(SR, S_TMP, SC); + goto pack_s; + case FNC_FNMSS: /* (-va*vb) - vc */ + va = s_negative_value(va); + FP_UNPACK_SP(SA, &va); + FP_MUL_S(S_TMP, SA, SB); + FP_SUB_S(SR, S_TMP, SC); + goto pack_s; + default: + goto bad_insn; + } + +pack_s: + FP_PACK_SP(&vd, SR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation S-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation S-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_s(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +long simd_mul_add_fp_emul_d(unsigned long pc) +{ + FP_DECL_EX; + FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DC); FP_DECL_D(D_TMP); FP_DECL_D(DR); + FP_DECL_D(D_ZERO); + FP_DECL_D(D_TMP2); + + unsigned long fa, fb, fc, fd, func, mode, src; + unsigned long res, va, vb, vc, vd, vtmp, vtmp2, swcr, fpcr; + __u32 insn; + long si_code; + unsigned long vzero = 0; + + get_user(insn, (__u32 *)pc); + fd = (insn >> 0) & 0x1f; /* destination register */ + fc = (insn >> 5) & 0x1f; + fb = (insn >> 16) & 0x1f; + fa = (insn >> 21) & 0x1f; + func = (insn >> 10) & 0x3f; + + fpcr = rdfpcr(); + mode = (fpcr >> FPCR_DYN_SHIFT) & 0x3; + + unsigned long va_p0, va_p1, va_p2, va_p3; + unsigned long vb_p0, vb_p1, vb_p2, vb_p3; + unsigned long vc_p0, vc_p1, vc_p2, vc_p3; + unsigned long vd_p0, vd_p1, vd_p2, vd_p3; + unsigned long fex_p0, fex_p1, fex_p2, fex_p3; + + int working_part; + + pr_debug("======== Entering SIMD D-floating mul-add emulation =======\n"); + pr_debug("hardware fpcr = %#lx\n", fpcr); + swcr = swcr_update_status(current_thread_info()->ieee_state, fpcr); + pr_debug("software swcr = %#lx\n", swcr); + read_fp_reg_d(fa, &va_p0, &va_p1, &va_p2, &va_p3); + read_fp_reg_d(fb, &vb_p0, &vb_p1, &vb_p2, &vb_p3); + read_fp_reg_d(fc, &vc_p0, &vc_p1, &vc_p2, &vc_p3); + read_fp_reg_d(fd, &vd_p0, &vd_p1, &vd_p2, &vd_p3); + pr_debug("va_p0:%#lx, va_p1:%#lx, va_p2:%#lx, va_p3:%#lx\n", va_p0, va_p1, va_p2, va_p3); + pr_debug("vb_p0:%#lx, vb_p1:%#lx, vb_p2:%#lx, vb_p3:%#lx\n", vb_p0, vb_p1, vb_p2, vb_p3); + pr_debug("vc_p0:%#lx, vc_p1:%#lx, vc_p2:%#lx, vc_p3:%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + pr_debug("vd_p0:%#lx, vd_p1:%#lx, vd_p2:%#lx, vd_p3:%#lx\n", vd_p0, vd_p1, vd_p2, vd_p3); + working_part = WORKING_PART_0; +simd_working: + _fex = 0; + switch (working_part) { + case WORKING_PART_0: + pr_debug("WORKING_PART_0\n"); + va = va_p0; + vb = vb_p0; + vc = vc_p0; + vd = vd_p0; + if ((fpcr & FPCR_STATUS_MASK0) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("LOW: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_1: + pr_debug("WORKING_PART_1\n"); + va = va_p1; + vb = vb_p1; + vc = vc_p1; + vd = vd_p1; + if ((fpcr & FPCR_STATUS_MASK1) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_2: + pr_debug("WORKING_PART_2\n"); + va = va_p2; + vb = vb_p2; + vc = vc_p2; + vd = vd_p2; + if ((fpcr & FPCR_STATUS_MASK2) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + case WORKING_PART_3: + pr_debug("WORKING_PART_3\n"); + va = va_p3; + vb = vb_p3; + vc = vc_p3; + vd = vd_p3; + if ((fpcr & FPCR_STATUS_MASK3) == 0) { + SW64_FP_NORMAL_D(DA, &va); + SW64_FP_NORMAL_D(DB, &vb); + SW64_FP_NORMAL_D(DC, &vc); + if ((DA_c == SW64_FP_NORMAL) && (DB_c == SW64_FP_NORMAL) && (DC_c == SW64_FP_NORMAL)) + goto next_working_s; + else + pr_debug("HIGH: DA_c = %#lx, DB_c = %#lx\n", DA_c, DB_c); + } else { + SW64_FP_NAN_D(DA, &va); + SW64_FP_NAN_D(DB, &vb); + SW64_FP_NAN_D(DC, &vc); + if ((DA_c == SW64_FP_NAN) && (DB_c == SW64_FP_NAN) && (DC_c == SW64_FP_NAN)) + goto next_working_s; + } + break; + } + + FP_UNPACK_DP(DA, &va); + FP_UNPACK_DP(DB, &vb); + FP_UNPACK_DP(DC, &vc); + FP_UNPACK_DP(D_ZERO, &vzero); + + switch (func) { + case FNC_FMAD: + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FMSD: + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMAD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_ADD_D(DR, D_TMP, DC); + goto pack_d; + case FNC_FNMSD: + va = negative_value(va); + FP_UNPACK_DP(DA, &va); + FP_MUL_D(D_TMP, DA, DB); + FP_SUB_D(DR, D_TMP, DC); + + goto pack_d; + default: + goto bad_insn; + } + +pack_d: + FP_PACK_DP(&vd, DR); + if ((_fex & FP_EX_UNDERFLOW) && (swcr & IEEE_MAP_UMZ)) + vd = 0; + pr_debug("SW64 SIMD Emulation D-floating _fex=%#lx, va=%#lx, vb=%#lx, vc=%#lx\n", _fex, va, vb, vc); + pr_debug("SW64 SIMD Emulation D-floating mode=%#lx,func=%#lx, swcr=%#lx\n", mode, func, swcr); +next_working_s: + switch (working_part) { + case WORKING_PART_0: + working_part = WORKING_PART_1; + vd_p0 = vd; + fex_p0 = _fex; + goto simd_working; + case WORKING_PART_1: + working_part = WORKING_PART_2; + vd_p1 = vd; + fex_p1 = _fex; + goto simd_working; + case WORKING_PART_2: + working_part = WORKING_PART_3; + vd_p2 = vd; + fex_p2 = _fex; + goto simd_working; + case WORKING_PART_3: + vd_p3 = vd; + fex_p3 = _fex; + goto done; + } +done: + if (fex_p0 || fex_p1 || fex_p2 || fex_p3) { + unsigned long fpcr_p0, fpcr_p1, fpcr_p2, fpcr_p3; + unsigned long swcr_p0, swcr_p1, swcr_p2, swcr_p3; + + fpcr_p0 = fpcr_p1 = fpcr_p2 = fpcr_p3 = 0; + swcr_p0 = swcr_p1 = swcr_p2 = swcr_p3 = swcr; + /* manage fpcr_p0 */ + if (fex_p0) { + swcr_p0 |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p0 << IEEE_STATUS0_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p0 = fpcr; + fpcr_p0 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p0 |= ieee_swcr_to_fpcr(swcr_p0); + } + + if (fex_p1) { + swcr_p1 |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p1 << IEEE_STATUS1_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p1 = fpcr; + fpcr_p1 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p1 |= ieee_swcr_to_fpcr(swcr_p1); + } + + if (fex_p2) { + swcr_p2 |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p2 << IEEE_STATUS2_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p2 = fpcr; + fpcr_p2 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p2 |= ieee_swcr_to_fpcr(swcr_p2); + } + + if (fex_p3) { + swcr_p3 |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + current_thread_info()->ieee_state + |= (fex_p3 << IEEE_STATUS3_TO_EXCSUM_SHIFT); + + /* Update hardware control register. */ + fpcr_p3 = fpcr; + fpcr_p3 &= (~FPCR_MASK | FPCR_DYN_MASK); + fpcr_p3 |= ieee_swcr_to_fpcr(swcr_p3); + } + + fpcr = fpcr_p0 | fpcr_p1 | fpcr_p2 | fpcr_p3; + pr_debug("fex_p0 = %#lx\n", fex_p0); + pr_debug("fex_p1 = %#lx\n", fex_p1); + pr_debug("fex_p2 = %#lx\n", fex_p2); + pr_debug("fex_p3 = %#lx\n", fex_p3); + pr_debug("SIMD emulation almost finished.before write fpcr = %#lx\n", fpcr); + wrfpcr(fpcr); + + pr_debug("Before write fp: vp_p0=%#lx, vc_p1=%#lx, vc_p2=%#lx, vc_p3=%#lx\n", vc_p0, vc_p1, vc_p2, vc_p3); + write_fp_reg_d(fd, vd_p0, vd_p1, vd_p2, vd_p3); /* write to fd */ + + /* Do we generate a signal? */ + _fex = (fex_p0 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p1 & swcr & IEEE_TRAP_ENABLE_MASK) + | (fex_p2 & swcr & IEEE_TRAP_ENABLE_MASK) | (fex_p3 & swcr & IEEE_TRAP_ENABLE_MASK); + si_code = 0; + if (_fex) { + if (_fex & IEEE_TRAP_ENABLE_DNO) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_INE) + si_code = FPE_FLTRES; + if (_fex & IEEE_TRAP_ENABLE_UNF) + si_code = FPE_FLTUND; + if (_fex & IEEE_TRAP_ENABLE_OVF) + si_code = FPE_FLTOVF; + if (_fex & IEEE_TRAP_ENABLE_DZE) + si_code = FPE_FLTDIV; + if (_fex & IEEE_TRAP_ENABLE_INV) + si_code = FPE_FLTINV; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return si_code; + } + pr_debug("SIMD finished.. si_code:%#lx\n", si_code); + return 0; + +bad_insn: + pr_err("%s: Invalid FP insn %#x at %#lx\n", __func__, insn, pc); + return -1; +} + +void read_fp_reg_s(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[2]; + + sw64_read_simd_fp_m_s(reg, fp); + *val_p0 = fp[0] & 0xffffffffUL; + *val_p1 = (fp[0] >> 32) & 0xffffffffUL; + *val_p2 = fp[1] & 0xffffffffUL; + *val_p3 = (fp[1] >> 32) & 0xffffffffUL; +} + +void read_fp_reg_d(unsigned long reg, unsigned long *val_p0, + unsigned long *val_p1, unsigned long *val_p2, unsigned long *val_p3) +{ + unsigned long fp[4]; + + sw64_read_simd_fp_m_d(reg, fp); + *val_p0 = fp[0]; + *val_p1 = fp[1]; + *val_p2 = fp[2]; + *val_p3 = fp[3]; +} + +void write_fp_reg_s(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + unsigned long fp[2]; + + fp[0] = ((val_p1 & 0xffffffffUL) << 32) | (val_p0 & 0xffffffffUL); + fp[1] = ((val_p3 & 0xffffffffUL) << 32) | (val_p2 & 0xffffffffUL); + sw64_write_simd_fp_reg_s(reg, fp[0], fp[1]); +} + +void write_fp_reg_d(unsigned long reg, unsigned long val_p0, + unsigned long val_p1, unsigned long val_p2, unsigned long val_p3) +{ + sw64_write_simd_fp_reg_d(reg, val_p0, val_p1, val_p2, val_p3); +} diff --git a/arch/sw_64/math-emu/qrnnd.S b/arch/sw_64/math-emu/qrnnd.S new file mode 100644 index 0000000000000000000000000000000000000000..1e732f2e68c086d33bc2be097d5ede9bd041ff4f --- /dev/null +++ b/arch/sw_64/math-emu/qrnnd.S @@ -0,0 +1,133 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + # __udiv_qrnnd + # Copyright (C) 1992, 1994, 1995, 2000 Free Software Foundation, Inc. + + # This file is part of GCC. + + .set noreorder + .set noat + + .text + + .globl __udiv_qrnnd + .ent __udiv_qrnnd +__udiv_qrnnd: + .frame $30, 0, $26, 0 + .prologue 0 + + # ldiq $2,16 + ldi $2, 16($31) + blt $19, $largedivisor + +$loop1: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $19, $17, $20 + subl $17, $19, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop1 + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$largedivisor: + and $18, 1, $4 + + srl $18, 1, $18 + sll $17, 63, $3 + or $3, $18, $18 + srl $17, 1, $17 + + and $19, 1, $6 + srl $19, 1, $5 + addl $5, $6, $5 + +$loop2: cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + cmplt $18, 0, $3 + addl $17, $17, $17 + bis $17, $3, $17 + addl $18, $18, $18 + cmpule $5, $17, $20 + subl $17, $5, $3 + selne $20, $3, $17, $17 + bis $18, $20, $18 + subl $2, 1, $2 + bgt $2, $loop2 + + addl $17, $17, $17 + addl $4, $17, $17 + bne $6, $Odd + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + +$Odd: + # q' in $18. r' in $17 + addl $17, $18, $17 + + cmpult $17, $18, $3 # $3 := carry from addl + subl $17, $19, $at + addl $18, $3, $18 + selne $3, $at, $17, $17 + + cmpult $17, $19, $3 + addl $18, 1, $at + seleq $3, $at, $18, $18 + subl $17, $19, $at + seleq $3, $at, $17, $17 + + stl $17, 0($16) + bis $31, $18, $0 + ret $31, ($26), 1 + + .end __udiv_qrnnd diff --git a/arch/sw_64/math-emu/sfp-util.h b/arch/sw_64/math-emu/sfp-util.h new file mode 100644 index 0000000000000000000000000000000000000000..0769c0223e0d7e43e0b73fbd45882333e95f765f --- /dev/null +++ b/arch/sw_64/math-emu/sfp-util.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_MATH_EMU_SFP_UTIL_H +#define _SW64_MATH_EMU_SFP_UTIL_H + +#include +#include +#include +#include +#include + +#define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) + (bl), (sh) = (ah) + (bh) + ((sl) < (al))) + +#define sub_ddmmss(sh, sl, ah, al, bh, bl) \ + ((sl) = (al) - (bl), (sh) = (ah) - (bh) - ((al) < (bl))) + +#define umul_ppmm(wh, wl, u, v) \ + __asm__ ("mull %2, %3, %1; umulh %2, %3, %0" \ + : "=r" ((UDItype)(wh)), \ + "=&r" ((UDItype)(wl)) \ + : "r" ((UDItype)(u)), \ + "r" ((UDItype)(v))) + +#define udiv_qrnnd(q, r, n1, n0, d) \ +do { unsigned long __r; \ + (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ + (r) = __r; \ +} while (0) +extern unsigned long __udiv_qrnnd(unsigned long *, unsigned long, + unsigned long, unsigned long); + +#define UDIV_NEEDS_NORMALIZATION 1 + +#define abort() goto bad_insn + +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN -1 +#endif +#define __BYTE_ORDER __LITTLE_ENDIAN + +#endif /* _SW64_MATH_EMU_SFP_UTIL_H */ diff --git a/arch/sw_64/mm/Makefile b/arch/sw_64/mm/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..8b9d6e4d2ebfb31bda1ee41cb00c163cd70ce577 --- /dev/null +++ b/arch/sw_64/mm/Makefile @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux sw_64-specific parts of the memory manager. +# + +#ccflags-y := -Werror + +obj-y := init.o fault.o physaddr.o mmap.o extable.o + +obj-$(CONFIG_NUMA) += numa.o +ifeq ($(CONFIG_SUBARCH_C4),y) +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage_c4.o +else +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +endif +obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += thp.o diff --git a/arch/sw_64/mm/extable.c b/arch/sw_64/mm/extable.c new file mode 100644 index 0000000000000000000000000000000000000000..d2678e12a1b1cb85ab257d6260fa33f0a7c36179 --- /dev/null +++ b/arch/sw_64/mm/extable.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +int fixup_exception(struct pt_regs *regs, unsigned long pc) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(pc); + if (fixup) { + unsigned int valreg = fixup->fixup.bits.valreg; + unsigned int errreg = fixup->fixup.bits.errreg; + + if (valreg != 31) + regs->regs[valreg] = 0; + if (errreg != 31) + regs->regs[errreg] = -EFAULT; + pc += fixup->fixup.bits.nextinsn; + regs->pc = pc; + + return 1; + } + return 0; +} diff --git a/arch/sw_64/mm/fault.c b/arch/sw_64/mm/fault.c new file mode 100644 index 0000000000000000000000000000000000000000..e76560a7edca3fb796482aa91daae69146f6852f --- /dev/null +++ b/arch/sw_64/mm/fault.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +#include +#include +#include + +#include + +__read_mostly bool segv_debug_enabled; + +#ifdef CONFIG_KPROBES +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + int ret = 0; + /* kprobe_running() needs smp_processor_id() */ + if (!user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, mmcsr)) + ret = 1; + preempt_enable(); + } + return ret; +} +#else +static inline int notify_page_fault(struct pt_regs *regs, unsigned long mmcsr) +{ + return 0; +} +#endif + +extern void die(char *, struct pt_regs *, long); +extern void show_regs(struct pt_regs *regs); + +void show_all_vma(void) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + MA_STATE(mas, 0, 0, 0); + + if (!mm) + return; + + mas.tree = &mm->mm_mt; + + for (int i = 0; (vma = mas_find(&mas, ULONG_MAX)) != NULL; i++) { + unsigned long start = vma->vm_start; + unsigned long end = vma->vm_end; + struct file *file = vma->vm_file; + + if (file) + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, file = %s, name = %s\n", + i, start, end, (end - start), vma->vm_flags, + file->f_path.dentry->d_name.name, current->comm); + else + pr_info("vma[%d]: [%#lx, %#lx], len = %#lx, flags = %#lx, name = %s\n", + i, start, end, (end - start), vma->vm_flags, current->comm); + } +} + +/* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to handle_mm_fault(). + * + * mmcsr: + * 0 = translation not valid + * 1 = access violation + * 2 = fault-on-read + * 3 = fault-on-execute + * 4 = fault-on-write + * + * cause: + * -1 = instruction fetch + * 0 = load + * 1 = store + * + * Registers $9 through $15 are saved in a block just prior to `regs' and + * are saved and restored around the call to allow exception code to + * modify them. + */ + +unsigned long show_va_to_pa(struct mm_struct *mm, unsigned long addr) +{ + pgd_t *pgd = NULL; + p4d_t *p4d = NULL; + pud_t *pud = NULL; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + unsigned long ret = 0UL; + + pgd = pgd_offset(mm, addr); + if (pgd_none(*pgd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx\n", addr, pgd_val(*pgd)); + goto out; + } + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, p4d = %#lx\n", + addr, pgd_val(*pgd), p4d_val(*p4d)); + goto out; + } + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud)); + goto out; + } + pmd = pmd_offset(pud, addr); + if (pmd_none(*pmd)) { + ret = 0; + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx\n", + addr, pgd_val(*pgd), pud_val(*pud), pmd_val(*pmd)); + goto out; + + } + pte = pte_offset_map(pmd, addr); + if (pte_present(*pte)) { + ret = (unsigned long)pfn_to_virt(pte_pfn(*pte)); + pr_debug("addr = %#lx, pgd = %#lx, pud = %#lx, pmd = %#lx, pte = %#lx, ret = %#lx\n", + addr, *(unsigned long *)pgd, *(unsigned long *)pud, + *(unsigned long *)pmd, *(unsigned long *)pte, ret); + } +out: + return ret; +} + +extern int do_match(unsigned long address, unsigned long mmcsr, long cause, struct pt_regs *regs); + +asmlinkage void notrace +do_page_fault(unsigned long address, unsigned long mmcsr, + long cause, struct pt_regs *regs) +{ + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + int si_code = SEGV_MAPERR; + vm_fault_t fault; + unsigned int flags = FAULT_FLAG_DEFAULT; + + if (notify_page_fault(regs, mmcsr)) + return; + + if (unlikely(mmcsr >= MMCSR__DA_MATCH)) { + if (do_match(address, mmcsr, cause, regs) == 1) + return; + } + + if (unlikely(mmcsr == MMCSR__ACV1)) { + if (!user_mode(regs)) + goto no_context; + else { + mmap_read_unlock(mm); + goto bad_area; + } + } + + /* + * If we're in an interrupt context, or have no user context, + * we must not take the fault. + */ + if (!mm || faulthandler_disabled()) + goto no_context; + + if (user_mode(regs)) + flags |= FAULT_FLAG_USER; + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); + +retry: + vma = lock_mm_and_find_vma(mm, address, regs); + if (!vma) + goto bad_area_nosemaphore; + + /* + * Ok, we have a good vm_area for this memory access, so + * we can handle it. + */ + si_code = SEGV_ACCERR; + if (cause < 0) { + if (!(vma->vm_flags & VM_EXEC)) + goto bad_area; + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) + goto bad_area; + } else { + if (!(vma->vm_flags & VM_WRITE)) + goto bad_area; + flags |= FAULT_FLAG_WRITE; + } + + /* + * If for any reason at all we couldn't handle the fault, + * make sure we exit gracefully rather than endlessly redo + * the fault. + */ + fault = handle_mm_fault(vma, address, flags, regs); + + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return; + } + + /* The fault is fully completed (including releasing mmap lock) */ + if (fault & VM_FAULT_COMPLETED) + return; + + if (unlikely(fault & VM_FAULT_ERROR)) { + if (fault & VM_FAULT_OOM) + goto out_of_memory; + else if (fault & VM_FAULT_SIGSEGV) + goto bad_area; + else if (fault & VM_FAULT_SIGBUS) + goto do_sigbus; + BUG(); + } + + if (fault & VM_FAULT_MAJOR) { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, + regs, address); + current->maj_flt++; + } else { + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, + regs, address); + current->min_flt++; + } + + if (fault & VM_FAULT_RETRY) { + flags |= FAULT_FLAG_TRIED; + + /* No need to mmap_read_unlock(mm) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + + mmap_read_unlock(mm); + + return; + + /* + * Something tried to access memory that isn't in our memory map. + * Fix it, but check if it's kernel or user first. + */ + bad_area: + mmap_read_unlock(mm); + + bad_area_nosemaphore: + if (user_mode(regs)) + goto do_sigsegv; + + no_context: + /* Are we prepared to handle this fault as an exception? */ + if (fixup_exception(regs, regs->pc)) + return; + + /* + * Oops. The kernel tried to access some bad page. We'll have to + * terminate things with extreme prejudice. + */ + pr_alert("Unable to handle kernel paging request at virtual address %016lx\n", + address); + die("Oops", regs, cause); + make_task_dead(SIGKILL); + + /* + * We ran out of memory, or some other thing happened to us that + * made us unable to handle the page fault gracefully. + */ + out_of_memory: + mmap_read_unlock(mm); + if (!user_mode(regs)) + goto no_context; + pagefault_out_of_memory(); + return; + + do_sigbus: + mmap_read_unlock(mm); + /* + * Send a sigbus, regardless of whether we were in kernel + * or user mode. + */ + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *) address); + if (!user_mode(regs)) + goto no_context; + return; + + do_sigsegv: + force_sig_fault(SIGSEGV, si_code, (void __user *) address); + + if (unlikely(segv_debug_enabled)) { + pr_info("fault: want to send_segv: pid %d, cause = %#lx, mmcsr = %#lx, address = %#lx, pc %#lx\n", + current->pid, cause, mmcsr, address, regs->pc); + show_regs(regs); + show_all_vma(); + } +} diff --git a/arch/sw_64/mm/hugetlbpage.c b/arch/sw_64/mm/hugetlbpage.c new file mode 100644 index 0000000000000000000000000000000000000000..fae1fa8bf7df50342b2608d33286619b57fe2d1c --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage.c @@ -0,0 +1,313 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW64 Huge TLB Page Support for Kernel. + */ + +#include +#include +#include +#include + +#include +#include + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_VALID | _PAGE_LEAF)) != _PAGE_VALID; +} + +int pud_huge(pud_t pud) +{ + return 0; +} + +pte_t *sw64_256m_hugepte_alloc(struct mm_struct *mm, pud_t *pud, unsigned long addr) +{ + int i; + struct page *page; + pmd_t *pmd; + pte_t *pte = NULL; + + pmd = pmd_alloc(mm, pud, addr); + if (pmd == NULL) + return NULL; + + pte = pte_alloc_map(mm, pmd, addr); + if (pte == NULL) + return NULL; + + page = virt_to_page(pte); + pmd_val(*pmd) = pmd_val(*pmd) | _PAGE_LEAF | _PAGE_CONT; + for (i = 1; i < 32; i++) + pmd_val(*(pmd+i)) = pmd_val(*pmd); + return pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (pud) { + if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE << 5)) { + pte = sw64_256m_hugepte_alloc(mm, pud, addr); + } else { + pr_warn("Unsupported page size %lx\n", sz); + return NULL; + } + } + BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + if (pgd_present(*pgd)) { + p4d = p4d_offset(pgd, addr); + if (p4d_present(*p4d)) { + pud = pud_offset(p4d, addr); + if (pud_present(*pud)) { + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) + return NULL; + if (pmd_val(*pmd) & _PAGE_CONT) + pte = pte_offset_map(pmd, addr); + else + pte = (pte_t *) pmd; + } + } + } + return pte; +} + +static inline int sw64_huge_pmd_bad(pmd_t pmd) +{ + return !(((pmd_val(pmd) & ~_PFN_MASK) == _PAGE_TABLE) || + ((pmd_val(pmd) & _PAGE_CONT) == _PAGE_CONT)); +} + +static inline int sw64_huge_pmd_none_or_clear_bad(pmd_t *pmd) +{ + if (pmd_none(*pmd)) + return 1; + if (unlikely(sw64_huge_pmd_bad(*pmd))) { + pmd_clear_bad(pmd); + return 1; + } + return 0; +} + +static void sw64_huge_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long addr) +{ + if ((((unsigned long)pmd & 0xffUL) == 0) && + ((pmd_val(*pmd) & _PAGE_CONT) == _PAGE_CONT)) { + pgtable_t token = pmd_pgtable(*pmd); + + pmd_clear(pmd); + pte_free_tlb(tlb, token, addr); + mm_dec_nr_ptes(tlb->mm); + } else { + pmd_clear(pmd); + } +} + +static inline void sw64_huge_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pmd_t *pmd; + unsigned long next; + unsigned long start; + + start = addr; + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (sw64_huge_pmd_none_or_clear_bad(pmd)) + continue; + sw64_huge_free_pte_range(tlb, pmd, addr); + } while (pmd++, addr = next, addr != end); + + start &= PUD_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PUD_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pmd = pmd_offset(pud, start); + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); + mm_dec_nr_pmds(tlb->mm); +} + +static inline void sw64_huge_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d, + unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + pud_t *pud; + unsigned long next; + unsigned long start; + + start = addr; + pud = pud_offset(p4d, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + sw64_huge_free_pmd_range(tlb, pud, addr, next, floor, ceiling); + } while (pud++, addr = next, addr != end); + + start &= PGDIR_MASK; + if (start < floor) + return; + if (ceiling) { + ceiling &= PGDIR_MASK; + if (!ceiling) + return; + } + if (end - 1 > ceiling - 1) + return; + + pud = pud_offset(p4d, start); + p4d_clear(p4d); + pud_free_tlb(tlb, pud, start); + mm_dec_nr_puds(tlb->mm); +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + +unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} + +#if (defined(CONFIG_FORCE_MAX_ZONEORDER) && (CONFIG_FORCE_MAX_ZONEORDER >= 16)) +static __init int sw64_256m_hugetlb_init(void) +{ + if (!size_to_hstate(1UL << (PMD_SHIFT + 5))) + hugetlb_add_hstate(PMD_SHIFT + 5 - PAGE_SHIFT); + return 0; +} +arch_initcall(sw64_256m_hugetlb_init); +#endif +#endif /* CONFIG_HUGETLB_PAGE */ + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + switch (size) { + case PMD_SIZE: + case (PMD_SIZE<<5): + return true; + } + + return false; +} diff --git a/arch/sw_64/mm/hugetlbpage_c4.c b/arch/sw_64/mm/hugetlbpage_c4.c new file mode 100644 index 0000000000000000000000000000000000000000..913389cd257704f67f0bdaa9be97754d15a9b8a9 --- /dev/null +++ b/arch/sw_64/mm/hugetlbpage_c4.c @@ -0,0 +1,452 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SW_64 Huge TLB Page Support for Kernel. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* + * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal + * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. + * Otherwise, returns 0. + */ +int pmd_huge(pmd_t pmd) +{ + return !pmd_none(pmd) && + (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} + +int pud_huge(pud_t pud) +{ + return !pud_none(pud) && + (pud_val(pud) & (_PAGE_PRESENT|_PAGE_LEAF)) != _PAGE_PRESENT; +} +EXPORT_SYMBOL(pud_huge); + +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 0; + + *pgsize = size; + + switch (size) { + case PUD_SIZE: + case PMD_SIZE: + contig_ptes = 1; + break; + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + default: + break; + } + + return contig_ptes; +} + +static pte_t get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, + unsigned long pgsize, unsigned long ncontig) +{ + pte_t orig_pte = huge_ptep_get(ptep); + unsigned long i; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + return orig_pte; +} + +static pte_t get_clear_contig_flush(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); + return orig_pte; +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pte_t *pte = NULL; + + pgd = pgd_offset(mm, addr); + p4d = p4d_alloc(mm, pgd, addr); + pud = pud_alloc(mm, p4d, addr); + if (!pud) + return NULL; + + if (sz == PUD_SIZE) { + pte = (pte_t *)pud; + } else if (sz == PMD_SIZE) { + if (want_pmd_share(vma, addr) && pud_none(*pud)) + pte = huge_pmd_share(mm, vma, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); + } else if (sz == (PMD_SIZE * CONT_PMDS)) { + pte = (pte_t *)pmd_alloc(mm, pud, addr); + WARN_ON(addr & (sz - 1)); + } + + WARN_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); + return pte; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, + unsigned long sz) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd = NULL; + + pgd = pgd_offset(mm, addr); + if (!pgd_present(*pgd)) + return NULL; + + p4d = p4d_offset(pgd, addr); + if (!p4d_present(*p4d)) + return NULL; + + pud = pud_offset(p4d, addr); + + if (sz != PUD_SIZE && pud_none(*pud)) + return NULL; + /* hugepage or swap? */ + if (pud_huge(*pud) || !pud_present(*pud)) + return (pte_t *)pud; + /* table; check the next level */ + + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmd = pmd_offset(pud, addr); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(*pmd)) + return NULL; + if (pmd_huge(*pmd) || !pmd_present(*pmd)) + return (pte_t *)pmd; + + return NULL; +} + +pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t pagesize = 1UL << shift; + + if (pagesize == CONT_PMD_SIZE) { + entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); + } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + } + return entry; +} + +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + size_t pgsize; + int i; + int ncontig; + unsigned long pfn; + pgprot_t hugeprot; + + /* + * Code needs to be expanded to handle huge swap and migration + * entries. Needed for HUGETLB and MEMORY_FAILURE. + */ + WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + ncontig = num_contig_ptes(sz, &pgsize); + pfn = pte_pfn(pte); + hugeprot = pte_pgprot(pte); + + get_and_clear(mm, addr, ptep, pgsize, ncontig); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte(ptep, pte); +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long pfn; + pgprot_t hugeprot; + int ncontig, i; + size_t pgsize; + pte_t pte; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + ncontig = CONT_PMDS; + + pte = get_and_clear(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + hugeprot = pte_pgprot(pte); + pfn = pte_pfn(pte); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int ncontig; + size_t pgsize; + pte_t orig_pte = huge_ptep_get(ptep); + + if (!pte_cont(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + ncontig = CONT_PMDS; + + return get_and_clear(mm, addr, ptep, pgsize, ncontig); +} + +pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + struct mm_struct *mm = vma->vm_mm; + size_t pgsize; + int ncontig; + + if (!pte_cont(READ_ONCE(*ptep))) + return ptep_clear_flush(vma, addr, ptep); + + ncontig = CONT_PMDS; + return get_clear_contig_flush(mm, addr, ptep, pgsize, ncontig); +} + +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = huge_ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; + } + + return 0; +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int ncontig, i; + size_t pgsize = 0; + unsigned long pfn = pte_pfn(pte); + pgprot_t hugeprot; + pte_t orig_pte; + + if (!pte_cont(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = CONT_PMDS; + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_and_clear(vma->vm_mm, addr, ptep, pgsize, ncontig); + flush_tlb_fix_spurious_fault(vma, addr, ptep); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + hugeprot = pte_pgprot(pte); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize) + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); + + return 1; +} + +#ifdef CONFIG_HUGETLB_PAGE +static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, + unsigned long addr, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + + info.flags = 0; + info.length = len; + info.low_limit = current->mm->mmap_legacy_base; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} + +static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, + unsigned long addr0, unsigned long len, + unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; + unsigned long addr; + + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = current->mm->mmap_base; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + addr = vm_unmapped_area(&info); + + /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + addr = vm_unmapped_area(&info); + } + + return addr; +} + + unsigned long +hugetlb_get_unmapped_area(struct file *file, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + + if (len & ~huge_page_mask(h)) + return -EINVAL; + if (len > TASK_SIZE) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) + return hugetlb_get_unmapped_area_bottomup(file, addr, len, + pgoff, flags); + else + return hugetlb_get_unmapped_area_topdown(file, addr, len, + pgoff, flags); +} +#endif /* CONFIG_HUGETLB_PAGE */ + +static __init int setup_hugepagesz(char *opt) +{ + unsigned long ps = memparse(opt, &opt); + + switch (ps) { + case PUD_SIZE: + case PMD_SIZE * CONT_PMDS: + case PMD_SIZE: + hugetlb_add_hstate(ilog2(ps) - PAGE_SHIFT); + return 1; + } + + pr_err("hugepagesz: Unsupported page size %lu M\n", + ps >> 20); + return 0; +} +__setup("hugepagesz=", setup_hugepagesz); diff --git a/arch/sw_64/mm/init.c b/arch/sw_64/mm/init.c new file mode 100644 index 0000000000000000000000000000000000000000..1f402809128f21d133e92dc436026f58cc6a6e5a --- /dev/null +++ b/arch/sw_64/mm/init.c @@ -0,0 +1,338 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 1995 Linus Torvalds + */ + +/* 2.3.x zone allocator, 1999 Andrea Arcangeli */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +struct mem_desc_t mem_desc; +#ifndef CONFIG_NUMA +struct numa_node_desc_t numa_nodes_desc[1]; +#endif /* CONFIG_NUMA */ + +/* + * empty_zero_page is a special page that is used for + * zero-initialized data and COW. + */ +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); +pg_data_t *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); + +pgd_t swapper_pg_dir[1024] __aligned(PAGE_SIZE); +static pud_t vmalloc_pud[1024] __aligned(PAGE_SIZE); + +static phys_addr_t mem_start; +static phys_addr_t mem_size_limit; + +#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE +unsigned long memory_block_size_bytes(void) +{ + if (is_in_guest()) + return MIN_MEMORY_BLOCK_SIZE_VM_MEMHP; + else + return MIN_MEMORY_BLOCK_SIZE; +} +#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ + +static int __init setup_mem_size(char *p) +{ + char *oldp; + unsigned long start, size; + + start = 0; + oldp = p; + size = memparse(p, &p); + if (p == oldp) + return -EINVAL; + + if (*p == '@') + start = memparse(p + 1, &p); + + mem_start = start; + mem_size_limit = size; + return 0; +} +early_param("mem", setup_mem_size); + +#if defined(CONFIG_SUBARCH_C3B) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret, *init; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + init = pgd_offset(&init_mm, 0UL); + if (ret) + pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]); + + return ret; +} +#elif defined(CONFIG_SUBARCH_C4) +pgd_t * +pgd_alloc(struct mm_struct *mm) +{ + pgd_t *ret; + + ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + + return ret; +} +#endif + +/* Set up initial PCB, VPTB, and other such nicities. */ + +static inline void +switch_to_system_map(void) +{ + memset(swapper_pg_dir, 0, PAGE_SIZE); + update_ptbr_sys(virt_to_phys(swapper_pg_dir)); + tbiv(); +} + +void __init callback_init(void) +{ + pgd_t *pgd; + p4d_t *p4d; + + switch_to_system_map(); + + /* Allocate one PGD and one PUD. */ + pgd = pgd_offset_k(VMALLOC_START); + p4d = p4d_offset(pgd, VMALLOC_START); + p4d_populate(&init_mm, p4d, (pud_t *)vmalloc_pud); +} + +void __init zone_sizes_init(void) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES]; + + memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); + +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); +#endif + max_zone_pfns[ZONE_NORMAL] = max_low_pfn; + + free_area_init(max_zone_pfns); +} + +/* + * paging_init() sets up the memory map. + */ +void __init paging_init(void) +{ +} + +void __init mem_detect(void) +{ + int i; + + mem_desc.phys_base = 0; + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) + mem_desc.phys_size += socket_desc[i].socket_mem; + } + + if (mem_start >= NODE0_START) { + mem_desc.base = mem_start; + } else { + mem_desc.base = NODE0_START; + mem_size_limit -= NODE0_START - mem_start; + } + + if (mem_size_limit && mem_size_limit < mem_desc.phys_size - NODE0_START) + mem_desc.size = mem_size_limit; + else + mem_desc.size = mem_desc.phys_size - NODE0_START; +} + +void __init sw64_memblock_init(void) +{ + memblock_add(mem_desc.base, mem_desc.size); + + memblock_remove(1ULL << MAX_PHYSMEM_BITS, PHYS_ADDR_MAX); + + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); + + memblock_allow_resize(); + memblock_initialized = true; + process_memmap(); + + /* Make sure kernel text is in memory range. */ + memblock_add(__pa_symbol(_text), _end - _text); + memblock_reserve(__pa_symbol(_text), _end - _text); + + /* Make sure initrd is in memory range. */ + if (sunway_boot_params->initrd_start) { + phys_addr_t base = __boot_pa(sunway_boot_params->initrd_start); + phys_addr_t size = sunway_boot_params->initrd_size; + + memblock_add(base, size); + memblock_reserve(base, size); + } + + /* end of DRAM range may have been changed */ + max_pfn = max_low_pfn = PFN_DOWN(memblock_end_of_DRAM()); +} + +#ifndef CONFIG_NUMA +void __init sw64_numa_init(void) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + memblock_set_node(mem_desc.base, mem_desc.size, &memblock.memory, 0); + nd_pa = memblock_phys_alloc(nd_size, SMP_CACHE_BYTES); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != 0) + pr_info("NODE_DATA(%d) on node %d\n", 0, tnid); + + node_data[0] = nd; + memset(NODE_DATA(0), 0, sizeof(pg_data_t)); + NODE_DATA(0)->node_id = 0; + NODE_DATA(0)->node_start_pfn = mem_desc.base >> PAGE_SHIFT; + NODE_DATA(0)->node_spanned_pages = mem_desc.size >> PAGE_SHIFT; + node_set_online(0); +} +#endif /* CONFIG_NUMA */ + +void __init +mem_init(void) +{ + set_max_mapnr(max_low_pfn); + high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); +#ifdef CONFIG_SWIOTLB + swiotlb_init(true, SWIOTLB_VERBOSE); +#endif + memblock_free_all(); +} + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node, altmap); +} + +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +} +#endif + +#ifdef CONFIG_HAVE_MEMBLOCK +#ifndef MIN_MEMBLOCK_ADDR +#define MIN_MEMBLOCK_ADDR __pa(PAGE_OFFSET) +#endif +#ifndef MAX_MEMBLOCK_ADDR +#define MAX_MEMBLOCK_ADDR ((phys_addr_t)~0) +#endif +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + const u64 phys_offset = MIN_MEMBLOCK_ADDR; + + if (acpi_disabled) { + if (!PAGE_ALIGNED(base)) { + if (size < PAGE_SIZE - (base & ~PAGE_MASK)) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + size -= PAGE_SIZE - (base & ~PAGE_MASK); + base = PAGE_ALIGN(base); + } + size &= PAGE_MASK; + + if (base > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base + size - 1 > MAX_MEMBLOCK_ADDR) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + ((u64)MAX_MEMBLOCK_ADDR) + 1, base + size); + size = MAX_MEMBLOCK_ADDR - base + 1; + } + + if (base + size < phys_offset) { + pr_warn("Ignoring memory block 0x%llx - 0x%llx\n", + base, base + size); + return; + } + + if (base < phys_offset) { + pr_warn("Ignoring memory range 0x%llx - 0x%llx\n", + base, phys_offset); + size -= phys_offset - base; + base = phys_offset; + } + memblock_add(base, size); + } else + return; +} +#endif + +#ifdef CONFIG_MEMORY_HOTPLUG +int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + int ret; + + ret = __add_pages(nid, start_pfn, nr_pages, params); + if (ret) + pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n", + __func__, ret); + + return ret; +} + +void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); +} +#endif + +static const pgprot_t protection_map[16] = { + [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW), + [VM_WRITE] = _PAGE_P(_PAGE_FOE), + [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE), + [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR), + [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW), + [VM_EXEC | VM_WRITE] = _PAGE_P(0), + [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0), + [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW | + _PAGE_FOR), + [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW), + [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE), + [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR), + [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW), + [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0), + [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0) +}; +DECLARE_VM_GET_PAGE_PROT diff --git a/arch/sw_64/mm/mmap.c b/arch/sw_64/mm/mmap.c new file mode 100644 index 0000000000000000000000000000000000000000..a7a189fc36d675c44bdf4f9192867de1409dc480 --- /dev/null +++ b/arch/sw_64/mm/mmap.c @@ -0,0 +1,102 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + struct vm_unmapped_area_info info; + unsigned long limit; + + /* Support 32 bit heap. */ + if (current->personality & ADDR_LIMIT_32BIT) + limit = 0x80000000; + else + limit = TASK_SIZE; + + if (len > limit) + return -ENOMEM; + + if (flags & MAP_FIXED) { + if (addr + len > TASK_SIZE) + return -EINVAL; + + return addr; + } + + if (addr) { + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vm_start_gap(vma))) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = limit; + info.align_mask = 0; + info.align_offset = pgoff << PAGE_SHIFT; + + return vm_unmapped_area(&info); +} + +unsigned long arch_mmap_rnd(void) +{ + unsigned long rnd; + + /* 8MB for 32bit, 256MB for 64bit */ + if (current->personality & ADDR_LIMIT_32BIT) + rnd = get_random_long() & 0x7ffffful; + else + rnd = get_random_long() & 0xffffffful; + + return rnd << PAGE_SHIFT; +} + +/* + * This function, called very early during the creation of a new process VM + * image, sets up which VM layout function to use: + */ +void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) +{ + unsigned long random_factor = 0UL; + + if (current->flags & PF_RANDOMIZE) + random_factor = arch_mmap_rnd(); + + /* + * Fall back to the standard layout if the personality bit is set, or + * if the expected stack growth is unlimited: + */ + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; + mm->get_unmapped_area = arch_get_unmapped_area; +} + +SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, + unsigned long, prot, unsigned long, flags, unsigned long, fd, + unsigned long, off) +{ + unsigned long ret = -EINVAL; + + if ((off + PAGE_ALIGN(len)) < off) + goto out; + if (off & ~PAGE_MASK) + goto out; + ret = ksys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); + out: + return ret; +} diff --git a/arch/sw_64/mm/numa.c b/arch/sw_64/mm/numa.c new file mode 100644 index 0000000000000000000000000000000000000000..fcf1f97a7840fae1232654735e23c4e29432b56c --- /dev/null +++ b/arch/sw_64/mm/numa.c @@ -0,0 +1,466 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DISCONTIGMEM NUMA sw64 support. + */ + +#include +#include +#include +#include + +#include + +int cpu_to_node_map[NR_CPUS]; +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +struct numa_node_desc_t numa_nodes_desc[MAX_NUMNODES]; +nodemask_t numa_nodes_parsed __initdata; + +static int numa_distance_cnt; +static u8 *numa_distance; +int numa_off; + +static __init int numa_setup(char *opt) +{ + if (!opt) + return -EINVAL; + if (!strncmp(opt, "off", 3)) + numa_off = 1; + return 0; +} +early_param("numa", numa_setup); + +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: cpumask_of_node() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +static void __init setup_node_to_cpumask_map(void) +{ + int node; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); + + /* allocate and clear the mapping */ + for (node = 0; node < nr_node_ids; node++) { + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); + cpumask_clear(node_to_cpumask_map[node]); + } + + /* cpumask_of_node() will now work */ + pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); +} + +/** + * numa_add_memblk - Set node id to memblk + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int ret; + + ret = memblock_set_node(start, (end - start), &memblock.memory, nid); + if (ret < 0) { + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", + start, (end - 1), nid); + return ret; + } + + node_set(nid, numa_nodes_parsed); + return ret; +} + +/** + * Initialize NODE_DATA for a node on the local memory + */ +static void __init setup_node_data(int nid, unsigned long start_pfn, unsigned long end_pfn) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (start_pfn >= end_pfn) + pr_info("Initmem setup node %d []\n", nid); + + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#018llx-%#018llx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +/** + * numa_free_distance + * + * Free current distance table. + */ +void __init numa_free_distance(void) +{ + size_t size; + + if (!numa_distance) + return; + + size = numa_distance_cnt * numa_distance_cnt * + sizeof(numa_distance[0]); + + memblock_free(numa_distance, size); + numa_distance_cnt = 0; + numa_distance = NULL; +} + +/** + * + * Create a new NUMA distance table. + * + */ +static int __init numa_alloc_distance(void) +{ + size_t size; + phys_addr_t phys; + int i, j; + + size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); + phys = memblock_phys_alloc(size, PAGE_SIZE); + if (WARN_ON(!phys)) + return -ENOMEM; + + numa_distance = __va(phys); + numa_distance_cnt = nr_node_ids; + + /* fill with the default distances */ + for (i = 0; i < numa_distance_cnt; i++) + for (j = 0; j < numa_distance_cnt; j++) { + numa_distance[i * numa_distance_cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + } + + pr_info("Initialized distance table, cnt=%d\n", numa_distance_cnt); + + return 0; +} + +/** + * numa_set_distance - Set inter node NUMA distance from node to node. + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. + * If distance table doesn't exist, a warning is printed. + * + * If @from or @to is higher than the highest known node or lower than zero + * or @distance doesn't make sense, the call is ignored. + * + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + pr_warn_once("Warning: distance table not allocated yet\n"); + return; + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +/** + * Return NUMA distance @from to @to + */ +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +static int __init numa_register_nodes(void) +{ + int nid; + struct memblock_region *mblk; + + /* Check that valid nid is set to memblks */ + for_each_mem_region(mblk) { + pr_info("memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + if (mblk->nid == NUMA_NO_NODE || mblk->nid >= MAX_NUMNODES) { + pr_warn("Warning: invalid memblk node %d [mem %#018llx-%#018llx]\n", + mblk->nid, mblk->base, + mblk->base + mblk->size - 1); + return -EINVAL; + } + } + + /* Finally register nodes */ + for_each_node_mask(nid, numa_nodes_parsed) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + setup_node_data(nid, start_pfn, end_pfn); + node_set_online(nid); + } + + /* Setup online nodes to actual nodes */ + node_possible_map = numa_nodes_parsed; + + return 0; +} + +static int __init numa_init(int (*init_func)(void)) +{ + int ret; + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + numa_free_distance(); + + ret = numa_alloc_distance(); + if (ret < 0) + return ret; + + ret = init_func(); + if (ret < 0) + return ret; + + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); + return -EINVAL; + } + + ret = numa_register_nodes(); + if (ret < 0) + return ret; + + setup_node_to_cpumask_map(); + + return 0; +} + +static void __init get_numa_info_socket(void) +{ + int i; + + phys_addr_t base = 0; + + for (i = 0; i < MAX_NUMSOCKETS; i++) { + if (socket_desc[i].is_online) { + numa_nodes_desc[i].base = base; + numa_nodes_desc[i].size = socket_desc[i].socket_mem; + base += numa_nodes_desc[i].size; + } + } +} + +static int __init manual_numa_init(void) +{ + int ret, nid; + struct memblock_region *mblk; + phys_addr_t node_base, node_size, node_end; + + if (numa_off) { + pr_info("NUMA disabled\n"); /* Forced off on command line. */ + pr_info("Faking one node at [mem %#018llx-%#018llx]\n", + memblock_start_of_DRAM(), memblock_end_of_DRAM() - 1); + for_each_mem_region(mblk) { + ret = numa_add_memblk(0, mblk->base, mblk->base + mblk->size); + if (!ret) + continue; + + pr_err("NUMA init failed\n"); + return ret; + } + } else { + get_numa_info_socket(); + + for (nid = 0; nid < MAX_NUMNODES; nid++) { + node_base = numa_nodes_desc[nid].base; + node_size = numa_nodes_desc[nid].size; + node_end = node_base + node_size; + ret = 0; + + if (!node_end) + continue; + + for_each_mem_region(mblk) { + if (mblk->base >= node_base && mblk->base < node_end) { + if (mblk->base + mblk->size < node_end) + ret = numa_add_memblk(nid, mblk->base, mblk->base + mblk->size); + else + ret = numa_add_memblk(nid, mblk->base, node_end); + } + } + + if (!node_size) { + memblock_add_node(node_base, node_size, nid, MEMBLOCK_NONE); + node_set(nid, numa_nodes_parsed); + pr_info("Setup empty node %d from %#llx\n", nid, node_base); + } + + if (!ret) + continue; + + pr_err("NUMA init failed for node %d, [mem %#018llx-%#018llx]", + nid, node_base, node_end - 1); + } + } + + return 0; +} + +void __init sw64_numa_init(void) +{ + if (!numa_off) { + if (!acpi_disabled && !numa_init(acpi_numa_init)) + return; + if (acpi_disabled && !numa_init(of_numa_init)) + return; + } + + numa_init(manual_numa_init); +} + +void cpu_set_node(void) +{ + int i; + + if (numa_off) { + for (i = 0; i < nr_cpu_ids; i++) + cpu_to_node_map[i] = 0; + } else { + int rr, default_node, cid; + + rr = first_node(node_online_map); + for (i = 0; i < nr_cpu_ids; i++) { + cid = cpu_to_rcid(i); + default_node = rcid_to_domain_id(cid); + if (node_online(default_node)) { + cpu_to_node_map[i] = default_node; + } else { + cpu_to_node_map[i] = rr; + rr = next_node(rr, node_online_map); + if (rr == MAX_NUMNODES) + rr = first_node(node_online_map); + } + } + } + /* + * Setup numa_node for cpu 0 before per_cpu area for booting. + * Actual setup of numa_node will be done in native_smp_prepare_cpus(). + */ + set_cpu_numa_node(0, cpu_to_node_map[0]); +} + +void numa_store_cpu_info(unsigned int cpu) +{ + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); +} + +void __init early_map_cpu_to_node(unsigned int cpu, int nid) +{ + /* fallback to node 0 */ + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) + nid = 0; + + cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const struct cpumask *cpumask_of_node(int node) +{ + + if (node == NUMA_NO_NODE) { + pr_warn("%s: NUMA_NO_NODE\n", __func__); + return cpu_all_mask; + } + + if (WARN_ON(node < 0 || node >= nr_node_ids)) { + pr_warn("%s: invalid node %d\n", __func__, node); + return cpu_none_mask; + } + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) { + pr_warn("%s: uninitialized node %d\n", __func__, node); + return cpu_online_mask; + } + + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); +#endif + +static void numa_update_cpu(unsigned int cpu, bool remove) +{ + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void numa_add_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, false); +} + +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); + set_cpu_numa_node(cpu, NUMA_NO_NODE); +} diff --git a/arch/sw_64/mm/physaddr.c b/arch/sw_64/mm/physaddr.c new file mode 100644 index 0000000000000000000000000000000000000000..3c6ecb8ee86ab65cf0575977aab841e4161d05f9 --- /dev/null +++ b/arch/sw_64/mm/physaddr.c @@ -0,0 +1,39 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +unsigned long __phys_addr(unsigned long addr) +{ + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + return addr; +} +EXPORT_SYMBOL(__phys_addr); + +bool __virt_addr_valid(unsigned long addr) +{ + if (addr < PAGE_OFFSET) + return false; + addr &= ~PAGE_OFFSET; + return pfn_valid(addr >> PAGE_SHIFT); +} +EXPORT_SYMBOL(__virt_addr_valid); + +#ifdef CONFIG_SUBARCH_C3B +#define LEGACY_BOOT_VA 0xffffffff80000000 +unsigned long __boot_phys_addr(unsigned long addr) +{ + if (addr >= LEGACY_BOOT_VA) { + addr &= ~LEGACY_BOOT_VA; + VIRTUAL_BUG_ON(addr >= KERNEL_IMAGE_SIZE); + } else { + VIRTUAL_BUG_ON(addr < PAGE_OFFSET); + addr &= ~PAGE_OFFSET; + VIRTUAL_BUG_ON(!phys_addr_valid(addr)); + } + return addr; +} +#endif diff --git a/arch/sw_64/mm/thp.c b/arch/sw_64/mm/thp.c new file mode 100644 index 0000000000000000000000000000000000000000..833bb59f79d0e9f01fb9813eec8fc5cb24df9da3 --- /dev/null +++ b/arch/sw_64/mm/thp.c @@ -0,0 +1,55 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + int changed = !pmd_same(*pmdp, entry); + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + if (changed && dirty) { + *pmdp = entry; + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } + + return changed; +} +int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmdp) +{ + int ret = 0; + + if (pmd_young(*pmdp)) + ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, + (unsigned long *)pmdp); + return ret; +} + +int pmdp_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int young; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + + young = pmdp_test_and_clear_young(vma, address, pmdp); + if (young) + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + + return young; +} +void pmdp_splitting_flush(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + int set; + + VM_BUG_ON(address & ~HPAGE_PMD_MASK); + set = !test_and_set_bit(_PAGE_BIT_SPLITTING, (unsigned long *)pmdp); + if (set) { + /* need tlb flush only to serialize against gup-fast */ + flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); + } +} diff --git a/arch/sw_64/net/Makefile b/arch/sw_64/net/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d4663b4bf509894e62c3b02c69726ee5717c2dd4 --- /dev/null +++ b/arch/sw_64/net/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Arch-specific network modules +# +obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o diff --git a/arch/sw_64/net/bpf_jit.h b/arch/sw_64/net/bpf_jit.h new file mode 100644 index 0000000000000000000000000000000000000000..929036d8ea6b10daec13166c1e87f63165d99f1a --- /dev/null +++ b/arch/sw_64/net/bpf_jit.h @@ -0,0 +1,368 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#ifndef _SW64_NET_BPF_JIT_H +#define _SW64_NET_BPF_JIT_H + +/* SW64 instruction field shift */ +#define SW64_BPF_OPCODE_OFFSET 26 +#define SW64_BPF_RA_OFFSET 21 +#define SW64_BPF_RB_OFFSET 16 +#define SW64_BPF_SIMPLE_ALU_IMM_OFFSET 13 +#define SW64_BPF_SIMPLE_ALU_FUNC_OFFSET 5 +#define SW64_BPF_SIMPLE_ALU_RC_OFFSET 0 +#define SW64_BPF_LS_FUNC_OFFSET 12 + +/* SW64 instruction opcodes */ +#define SW64_BPF_OPCODE_CALL 0x01 +#define SW64_BPF_OPCODE_RET 0x02 +#define SW64_BPF_OPCODE_JMP 0x03 +#define SW64_BPF_OPCODE_BR 0x04 +#define SW64_BPF_OPCODE_BSR 0x05 +#define SW64_BPF_OPCODE_MISC 0x06 +#define SW64_BPF_OPCODE_LOCK 0x08 +#define SW64_BPF_OPCODE_ALU_REG 0x10 +#define SW64_BPF_OPCODE_ALU_IMM 0x12 +#define SW64_BPF_OPCODE_LDBU 0x20 +#define SW64_BPF_OPCODE_LDHU 0x21 +#define SW64_BPF_OPCODE_LDW 0x22 +#define SW64_BPF_OPCODE_LDL 0x23 +#define SW64_BPF_OPCODE_STB 0x28 +#define SW64_BPF_OPCODE_STH 0x29 +#define SW64_BPF_OPCODE_STW 0x2A +#define SW64_BPF_OPCODE_STL 0x2B +#define SW64_BPF_OPCODE_BEQ 0x30 +#define SW64_BPF_OPCODE_BNE 0x31 +#define SW64_BPF_OPCODE_BLT 0x32 +#define SW64_BPF_OPCODE_BLE 0x33 +#define SW64_BPF_OPCODE_BGT 0x34 +#define SW64_BPF_OPCODE_BGE 0x35 +#define SW64_BPF_OPCODE_BLBC 0x36 +#define SW64_BPF_OPCODE_BLBS 0x37 +#define SW64_BPF_OPCODE_LDI 0x3E +#define SW64_BPF_OPCODE_LDIH 0x3F + +/* SW64 MISC instructions function codes */ +#define SW64_BPF_FUNC_MISC_RD_F 0x1000 +#define SW64_BPF_FUNC_MISC_WR_F 0x1020 + +/* SW64 LOCK instructions function codes */ +#define SW64_BPF_FUNC_LOCK_LLDW 0x0 +#define SW64_BPF_FUNC_LOCK_LLDL 0x1 +#define SW64_BPF_FUNC_LOCK_LSTW 0x8 +#define SW64_BPF_FUNC_LOCK_LSTL 0x9 + +/* SW64 ALU instructions function codes */ +#define SW64_BPF_FUNC_ALU_ADDW 0x00 +#define SW64_BPF_FUNC_ALU_SUBW 0x01 +#define SW64_BPF_FUNC_ALU_ADDL 0x08 +#define SW64_BPF_FUNC_ALU_SUBL 0x09 +#define SW64_BPF_FUNC_ALU_MULW 0x10 +#define SW64_BPF_FUNC_ALU_MULL 0x18 +#define SW64_BPF_FUNC_ALU_CMPEQ 0x28 +#define SW64_BPF_FUNC_ALU_CMPLT 0x29 +#define SW64_BPF_FUNC_ALU_CMPLE 0x2A +#define SW64_BPF_FUNC_ALU_CMPULT 0x2B +#define SW64_BPF_FUNC_ALU_CMPULE 0x2C +#define SW64_BPF_FUNC_ALU_AND 0x38 +#define SW64_BPF_FUNC_ALU_BIC 0x39 +#define SW64_BPF_FUNC_ALU_BIS 0x3A +#define SW64_BPF_FUNC_ALU_ORNOT 0x3B +#define SW64_BPF_FUNC_ALU_XOR 0x3C +#define SW64_BPF_FUNC_ALU_EQV 0x3D +#define SW64_BPF_FUNC_ALU_SLL 0x48 +#define SW64_BPF_FUNC_ALU_SRL 0x49 +#define SW64_BPF_FUNC_ALU_SRA 0x4A +#define SW64_BPF_FUNC_ALU_ZAP 0x68 +#define SW64_BPF_FUNC_ALU_ZAPNOT 0x69 +#define SW64_BPF_FUNC_ALU_SEXTB 0x6A +#define SW64_BPF_FUNC_ALU_SEXTH 0x6B + +/* special instuction used in jit_fill_hole() */ +#define SW64_BPF_ILLEGAL_INSN (0x1ff00000) /* pri_ret/b $31 */ + +enum sw64_bpf_registers { + SW64_BPF_REG_V0 = 0, /* keep return value */ + SW64_BPF_REG_T0 = 1, + SW64_BPF_REG_T1 = 2, + SW64_BPF_REG_T2 = 3, + SW64_BPF_REG_T3 = 4, + SW64_BPF_REG_T4 = 5, + SW64_BPF_REG_T5 = 6, + SW64_BPF_REG_T6 = 7, + SW64_BPF_REG_T7 = 8, + SW64_BPF_REG_S0 = 9, /* callee saved */ + SW64_BPF_REG_S1 = 10, /* callee saved */ + SW64_BPF_REG_S2 = 11, /* callee saved */ + SW64_BPF_REG_S3 = 12, /* callee saved */ + SW64_BPF_REG_S4 = 13, /* callee saved */ + SW64_BPF_REG_S5 = 14, /* callee saved */ + SW64_BPF_REG_S6 = 15, /* callee saved */ + SW64_BPF_REG_FP = 15, /* frame pointer if necessary */ + SW64_BPF_REG_A0 = 16, /* argument 0 */ + SW64_BPF_REG_A1 = 17, /* argument 1 */ + SW64_BPF_REG_A2 = 18, /* argument 2 */ + SW64_BPF_REG_A3 = 19, /* argument 3 */ + SW64_BPF_REG_A4 = 20, /* argument 4 */ + SW64_BPF_REG_A5 = 21, /* argument 5 */ + SW64_BPF_REG_T8 = 22, + SW64_BPF_REG_T9 = 23, + SW64_BPF_REG_T10 = 24, + SW64_BPF_REG_T11 = 25, + SW64_BPF_REG_RA = 26, /* callee saved, keep retuen address */ + SW64_BPF_REG_T12 = 27, + SW64_BPF_REG_PV = 27, + SW64_BPF_REG_AT = 28, /* reserved by assembler */ + SW64_BPF_REG_GP = 29, /* global pointer */ + SW64_BPF_REG_SP = 30, /* callee saved, stack pointer */ + SW64_BPF_REG_ZR = 31 /* read 0 */ +}; + +/* SW64 load and store instructions */ +#define SW64_BPF_LDBU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDBU, dst, rb, offset16) +#define SW64_BPF_LDHU(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDHU, dst, rb, offset16) +#define SW64_BPF_LDW(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDW, dst, rb, offset16) +#define SW64_BPF_LDL(dst, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDL, dst, rb, offset16) +#define SW64_BPF_STB(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STB, src, rb, offset16) +#define SW64_BPF_STH(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STH, src, rb, offset16) +#define SW64_BPF_STW(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STW, src, rb, offset16) +#define SW64_BPF_STL(src, rb, offset16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_STL, src, rb, offset16) +#define SW64_BPF_LDI(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDI, dst, rb, imm16) +#define SW64_BPF_LDIH(dst, rb, imm16) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_LDIH, dst, rb, imm16) + +/* SW64 lock instructions */ +#define SW64_BPF_LLDW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDW) +#define SW64_BPF_LLDL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LLDL) +#define SW64_BPF_LSTW(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTW) +#define SW64_BPF_LSTL(ra, rb, offset16) \ + sw64_bpf_gen_format_ls_func(SW64_BPF_OPCODE_LOCK, \ + ra, rb, offset16, SW64_BPF_FUNC_LOCK_LSTL) +#define SW64_BPF_RD_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_RD_F) +#define SW64_BPF_WR_F(ra) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_MISC, \ + ra, SW64_BPF_REG_ZR, SW64_BPF_FUNC_MISC_WR_F) + +/* SW64 ALU instructions REG format */ +#define SW64_BPF_ADDW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_REG(rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + SW64_BPF_REG_ZR, rb, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 ALU instructions IMM format */ +#define SW64_BPF_ADDW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDW) +#define SW64_BPF_ADDL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ADDL) +#define SW64_BPF_SUBW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBW) +#define SW64_BPF_SUBL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_SUBL) +#define SW64_BPF_MULW_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULW) +#define SW64_BPF_MULL_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_MULL) +#define SW64_BPF_ZAP_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAP) +#define SW64_BPF_ZAPNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ZAPNOT) +#define SW64_BPF_SEXTB_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTB) +#define SW64_BPF_SEXTH_IMM(imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + SW64_BPF_REG_ZR, imm8, dst, SW64_BPF_FUNC_ALU_SEXTH) + +/* SW64 bit shift instructions REG format */ +#define SW64_BPF_SLL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_REG(src, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + src, rb, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 bit shift instructions IMM format */ +#define SW64_BPF_SLL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SLL) +#define SW64_BPF_SRL_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRL) +#define SW64_BPF_SRA_IMM(src, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + src, imm8, dst, SW64_BPF_FUNC_ALU_SRA) + +/* SW64 control instructions */ +#define SW64_BPF_CALL(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_CALL, ra, rb, 0) +#define SW64_BPF_RET(rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_RET, SW64_BPF_REG_ZR, rb, 0) +#define SW64_BPF_JMP(ra, rb) \ + sw64_bpf_gen_format_ls(SW64_BPF_OPCODE_JMP, ra, rb, 0) +#define SW64_BPF_BR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BR, ra, offset) +#define SW64_BPF_BSR(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BSR, ra, offset) +#define SW64_BPF_BEQ(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BEQ, ra, offset) +#define SW64_BPF_BNE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BNE, ra, offset) +#define SW64_BPF_BLT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLT, ra, offset) +#define SW64_BPF_BLE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLE, ra, offset) +#define SW64_BPF_BGT(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGT, ra, offset) +#define SW64_BPF_BGE(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BGE, ra, offset) +#define SW64_BPF_BLBC(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBC, ra, offset) +#define SW64_BPF_BLBS(ra, offset) \ + sw64_bpf_gen_format_br(SW64_BPF_OPCODE_BLBS, ra, offset) + +/* SW64 bit logic instructions REG format */ +#define SW64_BPF_AND_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 bit logic instructions IMM format */ +#define SW64_BPF_AND_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_AND) +#define SW64_BPF_ANDNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIC) +#define SW64_BPF_BIS_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_BIS) +#define SW64_BPF_ORNOT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_ORNOT) +#define SW64_BPF_XOR_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_XOR) +#define SW64_BPF_EQV_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_EQV) + +/* SW64 compare instructions REG format */ +#define SW64_BPF_CMPEQ_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_REG(ra, rb, dst) \ + sw64_bpf_gen_format_simple_alu_reg(SW64_BPF_OPCODE_ALU_REG, \ + ra, rb, dst, SW64_BPF_FUNC_ALU_CMPULE) + +/* SW64 compare instructions imm format */ +#define SW64_BPF_CMPEQ_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPEQ) +#define SW64_BPF_CMPLT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLT) +#define SW64_BPF_CMPLE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPLE) +#define SW64_BPF_CMPULT_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULT) +#define SW64_BPF_CMPULE_IMM(ra, imm8, dst) \ + sw64_bpf_gen_format_simple_alu_imm(SW64_BPF_OPCODE_ALU_IMM, \ + ra, imm8, dst, SW64_BPF_FUNC_ALU_CMPULE) + +#endif /* _SW64_NET_BPF_JIT_H */ diff --git a/arch/sw_64/net/bpf_jit_comp.c b/arch/sw_64/net/bpf_jit_comp.c new file mode 100644 index 0000000000000000000000000000000000000000..31202dd0f9cf8dd8fd51d0d30c94ea422d74c8b7 --- /dev/null +++ b/arch/sw_64/net/bpf_jit_comp.c @@ -0,0 +1,1455 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BPF JIT compiler for SW64 + * + * Copyright (C) Mao Minkai + * Author: Mao Minkai + * + * This file is taken from arch/arm64/net/bpf_jit_comp.c + * Copyright (C) 2014-2016 Zi Shen Lim + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include + +#include + +#include "bpf_jit.h" + +#define TCALL_CNT (MAX_BPF_JIT_REG + 0) + +static const int bpf2sw64[] = { + /* return value from in-kernel function, and exit value from eBPF */ + [BPF_REG_0] = SW64_BPF_REG_V0, + /* arguments from eBPF program to in-kernel function */ + [BPF_REG_1] = SW64_BPF_REG_A0, + [BPF_REG_2] = SW64_BPF_REG_A1, + [BPF_REG_3] = SW64_BPF_REG_A2, + [BPF_REG_4] = SW64_BPF_REG_A3, + [BPF_REG_5] = SW64_BPF_REG_A4, + /* callee saved registers that in-kernel function will preserve */ + [BPF_REG_6] = SW64_BPF_REG_S0, + [BPF_REG_7] = SW64_BPF_REG_S1, + [BPF_REG_8] = SW64_BPF_REG_S2, + [BPF_REG_9] = SW64_BPF_REG_S3, + /* read-only frame pointer to access stack */ + [BPF_REG_FP] = SW64_BPF_REG_FP, + /* tail_call_cnt */ + [TCALL_CNT] = SW64_BPF_REG_S4, + /* temporary register for blinding constants */ + [BPF_REG_AX] = SW64_BPF_REG_T11, +}; + +struct jit_ctx { + const struct bpf_prog *prog; + int idx; // JITed instruction index + int current_tmp_reg; + int epilogue_offset; + int *insn_offset; // [bpf_insn_idx] = jited_insn_idx + int exentry_idx; + u32 *image; // JITed instruction + u32 stack_size; +}; + +struct sw64_jit_data { + struct bpf_binary_header *header; + u8 *image; // bpf instruction + struct jit_ctx ctx; +}; + +static inline u32 sw64_bpf_gen_format_br(int opcode, enum sw64_bpf_registers ra, u32 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + return opcode | ra | (disp & 0x1fffff); +} + +static inline u32 sw64_bpf_gen_format_ls(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + return opcode | ra | rb | (disp & 0xffff); +} + +static inline u32 sw64_bpf_gen_format_ls_func(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, u16 disp, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + function = function << SW64_BPF_LS_FUNC_OFFSET; + return opcode | ra | rb | function | (disp & 0xfff); +} + +static inline u32 sw64_bpf_gen_format_simple_alu_reg(int opcode, enum sw64_bpf_registers ra, + enum sw64_bpf_registers rb, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + rb = rb << SW64_BPF_RB_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | rb | function | rc; +} + +static inline u32 sw64_bpf_gen_format_simple_alu_imm(int opcode, enum sw64_bpf_registers ra, + u32 imm, enum sw64_bpf_registers rc, int function) +{ + opcode = opcode << SW64_BPF_OPCODE_OFFSET; + ra = ra << SW64_BPF_RA_OFFSET; + imm = (imm & 0xff) << SW64_BPF_SIMPLE_ALU_IMM_OFFSET; + rc = rc << SW64_BPF_SIMPLE_ALU_RC_OFFSET; + function = function << SW64_BPF_SIMPLE_ALU_FUNC_OFFSET; + return opcode | ra | imm | function | rc; +} + +static inline void emit(const u32 insn, struct jit_ctx *ctx) +{ + if (ctx->image != NULL) + ctx->image[ctx->idx] = insn; + + ctx->idx++; +} + +static inline int get_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg++; + /* Do not use 22-25. Should be more than enough. */ + if (unlikely(ctx->current_tmp_reg == 8)) { + pr_err("eBPF JIT %s[%d]: not enough temporary registers!\n", + current->comm, current->pid); + return -1; + } + return ctx->current_tmp_reg; +} + +static inline void put_tmp_reg(struct jit_ctx *ctx) +{ + ctx->current_tmp_reg--; + if (ctx->current_tmp_reg == 21) + ctx->current_tmp_reg = 7; +} + +static void emit_sw64_ldu32(const int dst, const u32 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= U32_MAX - S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 30) & 3; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 30, dst), ctx); + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_lds32(const int dst, const s32 imm, struct jit_ctx *ctx) +{ + s16 hi = imm >> 16; + s16 lo = imm & 0xffff; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + emit(SW64_BPF_LDIH(dst, SW64_BPF_REG_ZR, hi), ctx); + if (lo & 0x8000) { // sign bit is 1 + lo = lo & 0x7fff; + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } else { // sign bit is 0 + if (lo) + emit(SW64_BPF_LDI(dst, dst, lo), ctx); + } + + put_tmp_reg(ctx); +} + +static void emit_sw64_ldu64(const int dst, const u64 imm, struct jit_ctx *ctx) +{ + u16 imm_tmp; + u8 reg_tmp = get_tmp_reg(ctx); + + if (!imm) { + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, dst), ctx); + put_tmp_reg(ctx); + return; + } + + if (imm <= U32_MAX) { + put_tmp_reg(ctx); + return emit_sw64_ldu32(dst, (u32)imm, ctx); + } + + if (imm >= (U64_MAX - S16_MAX) || imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + put_tmp_reg(ctx); + return; + } + + imm_tmp = (imm >> 60) & 0xf; + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm_tmp), ctx); + if (imm_tmp) + emit(SW64_BPF_SLL_IMM(dst, 60, dst), ctx); + + imm_tmp = (imm >> 45) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 45, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 30) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 30, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = (imm >> 15) & 0x7fff; + if (imm_tmp) { + emit(SW64_BPF_LDI(reg_tmp, SW64_BPF_REG_ZR, imm_tmp), ctx); + emit(SW64_BPF_SLL_IMM(reg_tmp, 15, reg_tmp), ctx); + emit(SW64_BPF_ADDL_REG(dst, reg_tmp, dst), ctx); + } + + imm_tmp = imm & 0x7fff; + if (imm_tmp) + emit(SW64_BPF_LDI(dst, dst, imm_tmp), ctx); + + put_tmp_reg(ctx); +} + +/* Do not change!!! See arch/sw_64/lib/divide.S for more detail */ +#define REG(x) "$"str(x) +#define str(x) #x +#define DIV_RET_ADDR 23 +#define DIVIDEND 24 +#define DIVISOR 25 +#define RESULT 27 + +#include +static void emit_sw64_divmod(const int dst, const int src, struct jit_ctx *ctx, u8 code) +{ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, dst, DIVIDEND), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, DIVISOR), ctx); + switch (BPF_CLASS(code)) { + case BPF_ALU: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divwu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remwu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_ZAP_IMM(RESULT, 0xf0, dst), ctx); + break; + case BPF_ALU64: + switch (BPF_OP(code)) { + case BPF_DIV: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__divlu, ctx); + break; + case BPF_MOD: + emit_sw64_ldu64(SW64_BPF_REG_PV, (u64)__remlu, ctx); + break; + } + emit(SW64_BPF_CALL(DIV_RET_ADDR, SW64_BPF_REG_PV), ctx); + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, RESULT, dst), ctx); + break; + } +} + +#undef REG +#undef str +#undef DIVIDEND +#undef DIVISOR +#undef RESULT + +/* STX XADD: lock *(u32 *)(dst + off) += src */ +static void emit_sw64_xadd32(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDW(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDW_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTW(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +/* STX XADD: lock *(u64 *)(dst + off) += src */ +static void emit_sw64_xadd64(const int src, int dst, s16 off, struct jit_ctx *ctx) +{ + int atomic_start; + int atomic_end; + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + u8 tmp3 = get_tmp_reg(ctx); + + if (off < -0x800 || off > 0x7ff) { + emit(SW64_BPF_LDI(tmp1, dst, off), ctx); + dst = tmp1; + off = 0; + } + + atomic_start = ctx->idx; + emit(SW64_BPF_LLDL(tmp2, dst, off), ctx); + emit(SW64_BPF_LDI(tmp3, SW64_BPF_REG_ZR, 1), ctx); + emit(SW64_BPF_WR_F(tmp3), ctx); + emit(SW64_BPF_ADDL_REG(tmp2, src, tmp2), ctx); + if (ctx->idx & 1) + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + emit(SW64_BPF_LSTL(tmp2, dst, off), ctx); + emit(SW64_BPF_RD_F(tmp3), ctx); + atomic_end = ctx->idx; + emit(SW64_BPF_BEQ(tmp3, atomic_start - atomic_end - 1), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe16(const int dst, struct jit_ctx *ctx) +{ + u8 tmp = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp), ctx); + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SRL_IMM(tmp, 8, tmp), ctx); + emit(SW64_BPF_SLL_IMM(dst, 8, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp, dst), ctx); + + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe32(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x8, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x4, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x2, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x1, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 24, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void emit_sw64_htobe64(const int dst, struct jit_ctx *ctx) +{ + u8 tmp1 = get_tmp_reg(ctx); + u8 tmp2 = get_tmp_reg(ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x80, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 56, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x40, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x20, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x10, tmp1), ctx); + emit(SW64_BPF_SRL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x08, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 8, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x04, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 24, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x02, tmp1), ctx); + emit(SW64_BPF_SLL_IMM(tmp1, 40, tmp1), ctx); + emit(SW64_BPF_BIS_REG(tmp2, tmp1, tmp2), ctx); + + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x01, dst), ctx); + emit(SW64_BPF_SLL_IMM(dst, 56, dst), ctx); + emit(SW64_BPF_BIS_REG(dst, tmp2, dst), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); +} + +static void jit_fill_hole(void *area, unsigned int size) +{ + unsigned long c = SW64_BPF_ILLEGAL_INSN; + + c |= c << 32; + __constant_c_memset(area, c, size); +} + +static int offset_to_epilogue(const struct jit_ctx *ctx); +static int bpf2sw64_offset(int bpf_idx, s32 off, const struct jit_ctx *ctx) +{ + int from = ctx->insn_offset[bpf_idx + 1]; + int to = ctx->insn_offset[bpf_idx + 1 + off]; + + if (ctx->image == NULL) + return 0; + + return to - from; +} + +static int offset_to_epilogue(const struct jit_ctx *ctx) +{ + if (ctx->image == NULL) + return 0; + + return ctx->epilogue_offset - ctx->idx; +} + +/* For tail call, jump to set up function call stack */ +#define PROLOGUE_OFFSET 11 + +static void build_prologue(struct jit_ctx *ctx, bool was_classic) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Save callee-saved registers */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -64), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_STL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_STL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_STL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_STL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_STL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_STL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_STL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + + /* Set up BPF prog stack base register */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_SP, fp), ctx); + if (!was_classic) + /* Initialize tail_call_cnt */ + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, tcc), ctx); + + /* Set up function call stack */ + ctx->stack_size = (ctx->prog->aux->stack_depth + 15) & (~15); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, -ctx->stack_size), ctx); +} + +static void build_epilogue(struct jit_ctx *ctx) +{ + const u8 r6 = bpf2sw64[BPF_REG_6]; + const u8 r7 = bpf2sw64[BPF_REG_7]; + const u8 r8 = bpf2sw64[BPF_REG_8]; + const u8 r9 = bpf2sw64[BPF_REG_9]; + const u8 fp = bpf2sw64[BPF_REG_FP]; + const u8 tcc = bpf2sw64[TCALL_CNT]; + + /* Destroy function call stack */ + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + + /* Restore callee-saved registers */ + emit(SW64_BPF_LDL(SW64_BPF_REG_RA, SW64_BPF_REG_SP, 0), ctx); + emit(SW64_BPF_LDL(fp, SW64_BPF_REG_SP, 8), ctx); + emit(SW64_BPF_LDL(r6, SW64_BPF_REG_SP, 16), ctx); + emit(SW64_BPF_LDL(r7, SW64_BPF_REG_SP, 24), ctx); + emit(SW64_BPF_LDL(r8, SW64_BPF_REG_SP, 32), ctx); + emit(SW64_BPF_LDL(r9, SW64_BPF_REG_SP, 40), ctx); + emit(SW64_BPF_LDL(tcc, SW64_BPF_REG_SP, 48), ctx); + emit(SW64_BPF_LDL(SW64_BPF_REG_GP, SW64_BPF_REG_SP, 56), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, 64), ctx); + + /* Return */ + emit(SW64_BPF_RET(SW64_BPF_REG_RA), ctx); +} + +static int emit_bpf_tail_call(struct jit_ctx *ctx) +{ + /* bpf_tail_call(void *ctx, struct bpf_map *prog_array_map, u32 index) */ + const u8 r2 = bpf2sw64[BPF_REG_2]; /* struct bpf_array *array */ + const u8 r3 = bpf2sw64[BPF_REG_3]; /* u32 index */ + + const u8 tmp = get_tmp_reg(ctx); + const u8 prg = get_tmp_reg(ctx); + const u8 tcc = bpf2sw64[TCALL_CNT]; + u64 offset; + static int out_idx; +#define out_offset (ctx->image ? (out_idx - ctx->idx - 1) : 0) + + /* if (index >= array->map.max_entries) + * goto out; + */ + offset = offsetof(struct bpf_array, map.max_entries); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &map.max_entries */ + emit(SW64_BPF_LDW(tmp, tmp, 0), ctx); /* tmp = *tmp = map.max_entries */ + emit(SW64_BPF_ZAP_IMM(tmp, 0xf0, tmp), ctx); /* map.max_entries is u32 */ + emit(SW64_BPF_ZAP_IMM(r3, 0xf0, r3), ctx); /* index is u32 */ + emit(SW64_BPF_CMPULE_REG(tmp, r3, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + + /* if (tail_call_cnt > MAX_TAIL_CALL_CNT) + * goto out; + * tail_call_cnt++; + */ + emit_sw64_ldu64(tmp, MAX_TAIL_CALL_CNT, ctx); + emit(SW64_BPF_CMPULT_REG(tmp, tcc, tmp), ctx); + emit(SW64_BPF_BNE(tmp, out_offset), ctx); + emit(SW64_BPF_ADDL_IMM(tcc, 1, tcc), ctx); + + /* prog = array->ptrs[index]; + * if (prog == NULL) + * goto out; + */ + offset = offsetof(struct bpf_array, ptrs); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(r2, tmp, tmp), ctx); /* tmp = r2 + tmp = &ptrs[0] */ + emit(SW64_BPF_SLL_IMM(r3, 3, prg), ctx); /* prg = r3 * 8, each entry is a pointer */ + emit(SW64_BPF_ADDL_REG(tmp, prg, prg), ctx); /* prg = tmp + prg = &ptrs[index] */ + emit(SW64_BPF_LDL(prg, prg, 0), ctx); /* prg = *prg = ptrs[index] = prog */ + emit(SW64_BPF_BEQ(prg, out_offset), ctx); + + /* goto *(prog->bpf_func + prologue_offset); */ + offset = offsetof(struct bpf_prog, bpf_func); + emit_sw64_ldu64(tmp, offset, ctx); + emit(SW64_BPF_ADDL_REG(prg, tmp, tmp), ctx); /* tmp = prg + tmp = &bpf_func */ + emit(SW64_BPF_LDL(tmp, tmp, 0), ctx); /* tmp = *tmp = bpf_func */ + emit(SW64_BPF_BEQ(tmp, out_offset), ctx); + emit(SW64_BPF_LDI(tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx); + emit(SW64_BPF_LDI(SW64_BPF_REG_SP, SW64_BPF_REG_SP, ctx->stack_size), ctx); + emit(SW64_BPF_JMP(SW64_BPF_REG_ZR, tmp), ctx); + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + + /* out */ + if (ctx->image == NULL) + out_idx = ctx->idx; + if (ctx->image != NULL && out_idx <= 0) + return -1; +#undef out_offset + return 0; +} + +/* For accesses to BTF pointers, add an entry to the exception table */ +static int add_exception_handler(const struct bpf_insn *insn, + struct jit_ctx *ctx, + int dst_reg) +{ + off_t offset; + unsigned long pc; + struct exception_table_entry *ex; + + if (!ctx->image) + /* First pass */ + return 0; + + if (!ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM) + return 0; + + if (WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries)) + return -EINVAL; + + ex = &ctx->prog->aux->extable[ctx->exentry_idx]; + pc = (unsigned long)&ctx->image[ctx->idx - 1]; + + offset = (long)&ex->insn - pc; + ex->insn = offset; + + ex->fixup.bits.nextinsn = sizeof(u32); + ex->fixup.bits.valreg = dst_reg; + ex->fixup.bits.errreg = SW64_BPF_REG_ZR; + + ctx->exentry_idx++; + return 0; +} + +/* JITs an eBPF instruction. + * Returns: + * 0 - successfully JITed an 8-byte eBPF instruction. + * >0 - successfully JITed a 16-byte eBPF instruction. + * <0 - failed to JIT. + */ +static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) +{ + const u8 code = insn->code; + u8 dst = bpf2sw64[insn->dst_reg]; + u8 src = bpf2sw64[insn->src_reg]; + const u8 tmp1 __maybe_unused = get_tmp_reg(ctx); + const u8 tmp2 __maybe_unused = get_tmp_reg(ctx); + const s16 off = insn->off; + const s32 imm = insn->imm; + const int bpf_idx = insn - ctx->prog->insnsi; + s32 jmp_offset; + u64 func; + struct bpf_insn insn1; + u64 imm64; + int ret; + + switch (code) { + case BPF_ALU | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_X: + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, src, dst), ctx); + break; + case BPF_ALU | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_X: + emit(SW64_BPF_ADDL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_X: + emit(SW64_BPF_SUBL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_MUL | BPF_X: + emit(SW64_BPF_MULW_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_X: + emit(SW64_BPF_MULL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_X: + emit_sw64_divmod(dst, src, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_X: + emit(SW64_BPF_SLL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_RSH | BPF_X: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + fallthrough; + case BPF_ALU64 | BPF_RSH | BPF_X: + emit(SW64_BPF_SRL_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_ARSH | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_X: + emit(SW64_BPF_SRA_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_X: + emit(SW64_BPF_AND_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_X: + emit(SW64_BPF_BIS_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_X: + emit(SW64_BPF_XOR_REG(dst, src, dst), ctx); + break; + case BPF_ALU | BPF_NEG: + emit(SW64_BPF_SUBW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_NEG: + emit(SW64_BPF_SUBL_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + break; + case BPF_ALU | BPF_END | BPF_TO_LE: + switch (imm) { + case 16: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0x3, dst), ctx); + break; + case 32: + emit(SW64_BPF_ZAPNOT_IMM(dst, 0xf, dst), ctx); + break; + case 64: + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_LE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + case BPF_ALU | BPF_END | BPF_TO_BE: + switch (imm) { + case 16: + emit_sw64_htobe16(dst, ctx); + break; + case 32: + emit_sw64_htobe32(dst, ctx); + break; + case 64: + emit_sw64_htobe64(dst, ctx); + break; + default: + pr_err("eBPF JIT %s[%d]: BPF_TO_BE unknown size\n", + current->comm, current->pid); + return -EINVAL; + } + break; + + case BPF_ALU | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_ldu32(dst, imm, ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MOV | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) + emit(SW64_BPF_LDI(dst, SW64_BPF_REG_ZR, imm), ctx); + else + emit_sw64_lds32(dst, imm, ctx); + break; + case BPF_ALU | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_ADDW_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ADD | BPF_K: + if (imm >= S16_MIN && imm <= S16_MAX) { + emit(SW64_BPF_LDI(dst, dst, imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_ADDL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_SUB | BPF_K: + if (imm >= -S16_MAX && imm <= -S16_MIN) { + emit(SW64_BPF_LDI(dst, dst, -imm), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SUBL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_MUL | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_MULL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_MULL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_DIV | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_DIV | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_MOD | BPF_K: + emit_sw64_ldu32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU64 | BPF_MOD | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + emit_sw64_divmod(dst, tmp1, ctx, code); + break; + case BPF_ALU | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_LSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SLL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SLL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_RSH | BPF_K: + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU64 | BPF_RSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRL_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRL_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_ARSH | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, dst), ctx); + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_SRA_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_SRA_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_AND | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_AND_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_AND_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_OR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_BIS_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_BIS_REG(dst, tmp1, dst), ctx); + } + break; + case BPF_ALU | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_ldu32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_ALU64 | BPF_XOR | BPF_K: + if (imm >= 0 && imm <= U8_MAX) { + emit(SW64_BPF_XOR_IMM(dst, imm, dst), ctx); + } else { + emit_sw64_lds32(tmp1, imm, ctx); + emit(SW64_BPF_XOR_REG(dst, tmp1, dst), ctx); + } + break; + + case BPF_JMP | BPF_JA: + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_X: + case BPF_JMP32 | BPF_JGT | BPF_X: + case BPF_JMP32 | BPF_JLT | BPF_X: + case BPF_JMP32 | BPF_JGE | BPF_X: + case BPF_JMP32 | BPF_JLE | BPF_X: + case BPF_JMP32 | BPF_JNE | BPF_X: + case BPF_JMP32 | BPF_JSGT | BPF_X: + case BPF_JMP32 | BPF_JSLT | BPF_X: + case BPF_JMP32 | BPF_JSGE | BPF_X: + case BPF_JMP32 | BPF_JSLE | BPF_X: + case BPF_JMP32 | BPF_JSET | BPF_X: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, src, tmp1), ctx); + src = tmp1; + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JLT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JLE | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSLT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + case BPF_JMP | BPF_JSLE | BPF_X: + case BPF_JMP | BPF_JSET | BPF_X: + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(src, dst, tmp1), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, src, tmp1), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(src, dst, tmp1), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, src, tmp1), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, src, tmp1), ctx); + emit(SW64_BPF_XOR_IMM(tmp1, 1, tmp1), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, src, tmp1), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(src, dst, tmp1), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, src, tmp1), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, src, tmp1), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp1, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP32 | BPF_JEQ | BPF_K: + case BPF_JMP32 | BPF_JGT | BPF_K: + case BPF_JMP32 | BPF_JLT | BPF_K: + case BPF_JMP32 | BPF_JGE | BPF_K: + case BPF_JMP32 | BPF_JLE | BPF_K: + case BPF_JMP32 | BPF_JNE | BPF_K: + case BPF_JMP32 | BPF_JSGT | BPF_K: + case BPF_JMP32 | BPF_JSLT | BPF_K: + case BPF_JMP32 | BPF_JSGE | BPF_K: + case BPF_JMP32 | BPF_JSLE | BPF_K: + case BPF_JMP32 | BPF_JSET | BPF_K: + emit(SW64_BPF_ADDW_REG(SW64_BPF_REG_ZR, dst, tmp2), ctx); + dst = tmp2; + fallthrough; + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JLT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JLE | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSLT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + case BPF_JMP | BPF_JSLE | BPF_K: + case BPF_JMP | BPF_JSET | BPF_K: + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_OP(code)) { + case BPF_JEQ: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGT: + emit(SW64_BPF_CMPULT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLT: + emit(SW64_BPF_CMPULT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JGE: + emit(SW64_BPF_CMPULE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JLE: + emit(SW64_BPF_CMPULE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JNE: + emit(SW64_BPF_CMPEQ_REG(dst, tmp1, tmp2), ctx); + emit(SW64_BPF_XOR_IMM(tmp2, 1, tmp2), ctx); + break; + case BPF_JSGT: + emit(SW64_BPF_CMPLT_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLT: + emit(SW64_BPF_CMPLT_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSGE: + emit(SW64_BPF_CMPLE_REG(tmp1, dst, tmp2), ctx); + break; + case BPF_JSLE: + emit(SW64_BPF_CMPLE_REG(dst, tmp1, tmp2), ctx); + break; + case BPF_JSET: + emit(SW64_BPF_AND_REG(dst, tmp1, tmp2), ctx); + break; + } + jmp_offset = bpf2sw64_offset(bpf_idx, off, ctx); + if (jmp_offset >= -0x100000 && jmp_offset <= 0xfffff) { + emit(SW64_BPF_BNE(tmp2, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_JMP out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_JMP | BPF_CALL: + func = (u64)__bpf_call_base + imm; + if ((func & ~(KERNEL_IMAGE_SIZE - 1)) != __START_KERNEL_map) + /* calling bpf program, switch to vmalloc addr */ + func = (func & U32_MAX) | VMALLOC_START; + emit_sw64_ldu64(SW64_BPF_REG_PV, func, ctx); + emit(SW64_BPF_CALL(SW64_BPF_REG_RA, SW64_BPF_REG_PV), ctx); + break; + + case BPF_JMP | BPF_TAIL_CALL: + if (emit_bpf_tail_call(ctx)) + return -EFAULT; + break; + + case BPF_JMP | BPF_EXIT: + // if this is the last bpf instruction, skip to epilogue + if (bpf_idx == ctx->prog->len - 1) + break; + jmp_offset = offset_to_epilogue(ctx) - 1; + // epilogue is always at the end, must jump forward + if (jmp_offset >= -1 && jmp_offset <= 0xfffff) { + if (ctx->image && !jmp_offset) + // if this is the last jited instruction, generate nop + emit(SW64_BPF_BIS_REG(SW64_BPF_REG_ZR, SW64_BPF_REG_ZR, SW64_BPF_REG_ZR), ctx); + else + emit(SW64_BPF_BR(SW64_BPF_REG_ZR, jmp_offset), ctx); + } else { + pr_err("eBPF JIT %s[%d]: BPF_EXIT out of range, %d instructions\n", + current->comm, current->pid, jmp_offset); + return -EINVAL; + } + break; + + case BPF_LD | BPF_IMM | BPF_DW: + insn1 = insn[1]; + imm64 = ((u64)insn1.imm << 32) | (u32)imm; + emit_sw64_ldu64(dst, imm64, ctx); + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 1; + + /* LDX: dst = *(size *)(src + off) */ + case BPF_LDX | BPF_MEM | BPF_W: + case BPF_LDX | BPF_MEM | BPF_H: + case BPF_LDX | BPF_MEM | BPF_B: + case BPF_LDX | BPF_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_DW: + case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM | BPF_H: + case BPF_LDX | BPF_PROBE_MEM | BPF_B: + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_LDW(dst, src, off), ctx); + emit(SW64_BPF_ZAP_IMM(dst, 0xf0, dst), ctx); + break; + case BPF_H: + emit(SW64_BPF_LDHU(dst, src, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_LDBU(dst, src, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_LDL(dst, src, off), ctx); + break; + } + + ret = add_exception_handler(insn, ctx, dst); + if (ret) + return ret; + break; + + /* ST: *(size *)(dst + off) = imm */ + case BPF_ST | BPF_MEM | BPF_W: + case BPF_ST | BPF_MEM | BPF_H: + case BPF_ST | BPF_MEM | BPF_B: + case BPF_ST | BPF_MEM | BPF_DW: + /* Load imm to a register then store it */ + emit_sw64_lds32(tmp1, imm, ctx); + switch (BPF_SIZE(code)) { + case BPF_W: + emit(SW64_BPF_STW(tmp1, dst, off), ctx); + break; + case BPF_H: + emit(SW64_BPF_STH(tmp1, dst, off), ctx); + break; + case BPF_B: + emit(SW64_BPF_STB(tmp1, dst, off), ctx); + break; + case BPF_DW: + emit(SW64_BPF_STL(tmp1, dst, off), ctx); + break; + } + break; + + /* STX: *(size *)(dst + off) = src */ + case BPF_STX | BPF_MEM | BPF_W: + emit(SW64_BPF_STW(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_H: + emit(SW64_BPF_STH(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_B: + emit(SW64_BPF_STB(src, dst, off), ctx); + break; + case BPF_STX | BPF_MEM | BPF_DW: + emit(SW64_BPF_STL(src, dst, off), ctx); + break; + + /* STX XADD: lock *(u32 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_W: + emit_sw64_xadd32(src, dst, off, ctx); + break; + /* STX XADD: lock *(u64 *)(dst + off) += src */ + case BPF_STX | BPF_XADD | BPF_DW: + emit_sw64_xadd64(src, dst, off, ctx); + break; + + default: + pr_err("eBPF JIT %s[%d]: unknown opcode 0x%02x\n", + current->comm, current->pid, code); + return -EINVAL; + } + + put_tmp_reg(ctx); + put_tmp_reg(ctx); + return 0; +} + +static int build_body(struct jit_ctx *ctx) +{ + const struct bpf_prog *prog = ctx->prog; + int i; + + for (i = 0; i < prog->len; i++) { + const struct bpf_insn *insn = &prog->insnsi[i]; + int ret; + + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->idx; + ret = build_insn(insn, ctx); + if (ret < 0) + return ret; + while (ret > 0) { + i++; + if (ctx->image == NULL) + ctx->insn_offset[i] = ctx->insn_offset[i - 1]; + ret--; + } + } + + return 0; +} + +static int validate_code(struct jit_ctx *ctx) +{ + int i; + + for (i = 0; i < ctx->idx; i++) { + if (ctx->image[i] == SW64_BPF_ILLEGAL_INSN) + return -1; + } + + if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries)) + return -1; + + return 0; +} + +static inline void bpf_flush_icache(void *start, void *end) +{ + flush_icache_range((unsigned long)start, (unsigned long)end); +} + +struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) +{ + struct bpf_prog *tmp, *orig_prog = prog; + struct bpf_binary_header *header; + struct sw64_jit_data *jit_data; + bool was_classic = bpf_prog_was_classic(prog); + bool tmp_blinded = false; + bool extra_pass = false; + struct jit_ctx ctx; + int image_size, prog_size, extable_size; + u8 *image_ptr; + + if (!prog->jit_requested) + return orig_prog; + + tmp = bpf_jit_blind_constants(prog); + /* If blinding was requested and we failed during blinding, + * we must fall back to the interpreter. + */ + if (IS_ERR(tmp)) + return orig_prog; + if (tmp != prog) { + tmp_blinded = true; + prog = tmp; + } + + jit_data = prog->aux->jit_data; + if (!jit_data) { + jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); + if (!jit_data) { + prog = orig_prog; + goto out; + } + prog->aux->jit_data = jit_data; + } + if (jit_data->ctx.insn_offset) { + ctx = jit_data->ctx; + image_ptr = jit_data->image; + header = jit_data->header; + extra_pass = true; + prog_size = sizeof(u32) * ctx.idx; + goto skip_init_ctx; + } + memset(&ctx, 0, sizeof(ctx)); + ctx.prog = prog; + + ctx.insn_offset = kcalloc(prog->len + 1, sizeof(int), GFP_KERNEL); + if (ctx.insn_offset == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 1. Initial fake pass to compute ctx->idx. */ + + /* Fake pass to fill in ctx->offset. */ + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + prog = orig_prog; + goto out_off; + } + + ctx.insn_offset[prog->len] = ctx.epilogue_offset = ctx.idx; + build_epilogue(&ctx); + + extable_size = prog->aux->num_exentries * + sizeof(struct exception_table_entry); + + /* Now we know the actual image size. */ + /* And we need extra 8 bytes for lock instructions alignment */ + prog_size = sizeof(u32) * ctx.idx + 8; + image_size = prog_size + extable_size; + header = bpf_jit_binary_alloc(image_size, &image_ptr, + sizeof(u32), jit_fill_hole); + if (header == NULL) { + prog = orig_prog; + goto out_off; + } + + /* 2. Now, the actual pass. */ + + /* lock instructions need 8-byte alignment */ + ctx.image = (u32 *)(((unsigned long)image_ptr + 7) & (~7)); + if (extable_size) + prog->aux->extable = (void *)image_ptr + prog_size; +skip_init_ctx: + ctx.idx = 0; + ctx.exentry_idx = 0; + + build_prologue(&ctx, was_classic); + + if (build_body(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + build_epilogue(&ctx); + + /* 3. Extra pass to validate JITed code. */ + if (validate_code(&ctx)) { + bpf_jit_binary_free(header); + prog = orig_prog; + goto out_off; + } + + /* And we're done. */ + if (bpf_jit_enable > 1) + bpf_jit_dump(prog->len, prog_size, 2, ctx.image); + + bpf_flush_icache(header, ctx.image + ctx.idx); + + if (!prog->is_func || extra_pass) { + bpf_jit_binary_lock_ro(header); + } else { + jit_data->ctx = ctx; + jit_data->image = image_ptr; + jit_data->header = header; + } + prog->bpf_func = (void *)ctx.image; + prog->jited = 1; + prog->jited_len = prog_size; + if (ctx.current_tmp_reg) { + pr_err("eBPF JIT %s[%d]: unreleased temporary regsters %d\n", + current->comm, current->pid, ctx.current_tmp_reg); + } + + if (!prog->is_func || extra_pass) { +out_off: + kfree(ctx.insn_offset); + kfree(jit_data); + prog->aux->jit_data = NULL; + } +out: + if (tmp_blinded) + bpf_jit_prog_release_other(prog, prog == orig_prog ? + tmp : orig_prog); + return prog; +} diff --git a/arch/sw_64/pci/Makefile b/arch/sw_64/pci/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..327efb163b12f7f517c94f4591550fb50688f441 --- /dev/null +++ b/arch/sw_64/pci/Makefile @@ -0,0 +1,8 @@ +PDX-License-Identifier: GPL-2.0 +# +# Makefile for the linux kernel. +# + +obj-y += pci.o pci-legacy.o pci-sysfs.o +obj-$(CONFIG_ACPI) += acpi.o +obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/sw_64/pci/acpi.c b/arch/sw_64/pci/acpi.c new file mode 100644 index 0000000000000000000000000000000000000000..1353994320b3cfa15cbded2383382e79b0051e3c --- /dev/null +++ b/arch/sw_64/pci/acpi.c @@ -0,0 +1,245 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +struct pci_root_info { + struct acpi_pci_root_info info; + struct pci_config_window *cfg; +}; + +static void pci_acpi_release_root_info(struct acpi_pci_root_info *ci) +{ + struct pci_root_info *pci_ri; + + pci_ri = container_of(ci, struct pci_root_info, info); + pci_ecam_free(pci_ri->cfg); + kfree(ci->ops); + kfree(pci_ri); +} + +int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) +{ + struct pci_config_window *cfg = bus->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct acpi_pci_root *root = acpi_driver_data(adev); + + return root->segment; +} + +/** + * Lookup the MCFG table entry corresponding to the current + * PCI host controller, and set up config space mapping. + */ +static struct pci_config_window * +pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root) +{ + struct device *dev = &root->device->dev; + struct pci_config_window *cfg = NULL; + const struct pci_ecam_ops *ecam_ops = NULL; + struct resource *bus_res = &root->secondary; + struct resource cfg_res; + struct acpi_device *adev = NULL; + int ret = 0, bus_shift = 0; + u16 seg = root->segment; + + ret = pci_mcfg_lookup(root, &cfg_res, &ecam_ops); + if (ret < 0) { + dev_err(dev, "%04x:%pR ECAM region not found\n", seg, bus_res); + return NULL; + } + + /** + * Do the quirk of bus shift here, since we can not + * know the ECAM addr in MCFG table when fill mcfg_quirks + */ + bus_shift = ecam_ops->bus_shift; + cfg_res.start = root->mcfg_addr + (bus_res->start << bus_shift); + cfg_res.end = cfg_res.start + ((resource_size(bus_res)) << bus_shift) - 1; + cfg_res.flags = IORESOURCE_MEM; + + /** + * ECAM area considered as the mem resource of the current + * PCI host controller, we'd better record this resource + * in ACPI namespace(_CRS). + */ + adev = acpi_resource_consumer(&cfg_res); + if (adev) + dev_info(dev, "ECAM area %pR reserved by %s\n", &cfg_res, + dev_name(&adev->dev)); + else + dev_info(dev, "Note: ECAM area %pR not reserved in ACPI namespace\n", + &cfg_res); + + cfg = pci_ecam_create(dev, &cfg_res, bus_res, ecam_ops); + if (IS_ERR(cfg)) { + dev_err(dev, "%04x:%pR error %ld mapping ECAM\n", seg, bus_res, + PTR_ERR(cfg)); + return NULL; + } + + return cfg; +} + +static int pci_acpi_prepare_root_resources(struct acpi_pci_root_info *ci) +{ + int status = 0; + acpi_status rc; + unsigned long long mem_space_base = 0; + struct resource_entry *entry = NULL, *tmp = NULL; + struct acpi_device *device = ci->bridge; + + /** + * Get host bridge resources via _CRS method, the return value + * is the num of resource parsed. + */ + status = acpi_pci_probe_root_resources(ci); + if (status > 0) { + /** + * To distinguish between mem and pre_mem, firmware only pass the + * lower 32bits of mem via acpi and use vendor specific "MEMH" to + * record the upper 32 bits of mem. + * + * Get the upper 32 bits here. + */ + rc = acpi_evaluate_integer(ci->bridge->handle, + "MEMH", NULL, &mem_space_base); + if (rc != AE_OK) { + dev_err(&device->dev, "unable to retrieve MEMH\n"); + return -EEXIST; + } + + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + if (entry->res->flags & IORESOURCE_MEM) { + if (!(entry->res->end & 0xFFFFFFFF00000000ULL)) { + /* Patch the mem resource with upper 32 bits */ + entry->res->start |= (mem_space_base << 32); + entry->res->end |= (mem_space_base << 32); + } else { + /** + * Add PREFETCH and MEM_64 flags for pre_mem, + * so that we can distinguish between mem and + * pre_mem. + */ + entry->res->flags |= IORESOURCE_PREFETCH; + entry->res->flags |= IORESOURCE_MEM_64; + } + } + + dev_dbg(&device->dev, + "host bridge resource: 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + } + return status; + } + + /** + * If not successfully parse resources, destroy + * resources which have been parsed. + */ + resource_list_for_each_entry_safe(entry, tmp, &ci->resources) { + dev_info(&device->dev, + "host bridge resource(ignored): 0x%llx-0x%llx flags [0x%lx]\n", + entry->res->start, entry->res->end, entry->res->flags); + resource_list_destroy_entry(entry); + } + + return 0; +} + +/** + * This function is called from ACPI code and used to + * setup PCI host controller. + */ +struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) +{ + struct pci_bus *bus = NULL, *child = NULL; + struct pci_root_info *pci_ri = NULL; + struct acpi_pci_root_ops *root_ops = NULL; + int domain = root->segment; + int busnum = root->secondary.start; + + pci_ri = kzalloc(sizeof(*pci_ri), GFP_KERNEL); + if (!pci_ri) + goto out_of_mem_0; + + root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); + if (!root_ops) + goto out_of_mem_1; + + pci_ri->cfg = pci_acpi_setup_ecam_mapping(root); + if (!pci_ri->cfg) + goto setup_ecam_err; + + root_ops->release_info = pci_acpi_release_root_info; + root_ops->prepare_resources = pci_acpi_prepare_root_resources; + root_ops->pci_ops = (struct pci_ops *)&pci_ri->cfg->ops->pci_ops; + + bus = pci_find_bus(domain, busnum); + if (bus) { + memcpy(bus->sysdata, pci_ri->cfg, sizeof(struct pci_config_window)); + kfree(pci_ri->cfg); + kfree(pci_ri); + kfree(root_ops); + } else { + bus = acpi_pci_root_create(root, root_ops, &pci_ri->info, pci_ri->cfg); + + /** + * No need to do kfree here, because acpi_pci_root_create will free + * mem alloced when it cannot create pci_bus. + */ + if (!bus) + return NULL; + + /* Some quirks for pci controller of Sunway after scanning Root Complex */ + sw64_pci_root_bridge_scan_finish_up(pci_find_host_bridge(bus)); + + pci_bus_size_bridges(bus); + pci_bus_assign_resources(bus); + + list_for_each_entry(child, &bus->children, node) + pcie_bus_configure_settings(child); + } + + return bus; + +setup_ecam_err: + kfree(root_ops); +out_of_mem_1: + kfree(pci_ri); +out_of_mem_0: + pr_warn("RC [%04x:%02x:] failed (out of memory or setup ecam error)!\n", + domain, busnum); + + return NULL; +} + +int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + if (!acpi_disabled) { + struct pci_config_window *cfg = bridge->sysdata; + struct acpi_device *adev = to_acpi_device(cfg->parent); + struct pci_controller *hose = cfg->priv; + struct device *bus_dev = &bridge->bus->dev; + + ACPI_COMPANION_SET(&bridge->dev, adev); + set_dev_node(bus_dev, hose->node); + + /* Some quirks for pci controller of Sunway before scanning Root Complex */ + sw64_pci_root_bridge_prepare(bridge); + } + + return 0; +} + +void pcibios_add_bus(struct pci_bus *bus) +{ + acpi_pci_add_bus(bus); +} + +void pcibios_remove_bus(struct pci_bus *bus) +{ + acpi_pci_remove_bus(bus); +} diff --git a/arch/sw_64/pci/msi.c b/arch/sw_64/pci/msi.c new file mode 100644 index 0000000000000000000000000000000000000000..fc2c122c37efa0c9d84dfcbdd8bc615305f032d9 --- /dev/null +++ b/arch/sw_64/pci/msi.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +int msi_compose_msg(unsigned int irq, struct msi_msg *msg) +{ + msg->address_hi = (unsigned int)(MSIX_MSG_ADDR >> 32); + msg->address_lo = (unsigned int)(MSIX_MSG_ADDR & 0xffffffff); + msg->data = irq; + return irq; +} + +void sw64_irq_noop(struct irq_data *d) +{ +} + +void arch_teardown_msi_irq(unsigned int irq) +{ +} diff --git a/arch/sw_64/pci/pci-legacy.c b/arch/sw_64/pci/pci-legacy.c new file mode 100644 index 0000000000000000000000000000000000000000..2a44463db0a42e99f1bc86c84a24762d06e36afc --- /dev/null +++ b/arch/sw_64/pci/pci-legacy.c @@ -0,0 +1,508 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include + +#include +#include + +unsigned long rc_linkup; + +/* + * The PCI controller list. + */ + +struct pci_controller *hose_head, **hose_tail = &hose_head; +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus); + +static int __init +pcibios_init(void) +{ + if (acpi_disabled) + sw64_init_pci(); + return 0; +} +subsys_initcall(pcibios_init); + +void __init pcibios_claim_one_bus(struct pci_bus *b) +{ + struct pci_dev *dev; + struct pci_bus *child_bus; + + list_for_each_entry(dev, &b->devices, bus_list) { + int i; + + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + struct resource *r = &dev->resource[i]; + + if (r->parent || !r->start || !r->flags) + continue; + if (r->flags & IORESOURCE_PCI_FIXED) { + if (pci_claim_resource(dev, i) == 0) + continue; + + pci_claim_bridge_resource(dev, i); + } + } + } + + list_for_each_entry(child_bus, &b->children, node) + pcibios_claim_one_bus(child_bus); +} + +static void __init +pcibios_claim_console_setup(void) +{ + struct pci_bus *b; + + list_for_each_entry(b, &pci_root_buses, node) + pcibios_claim_one_bus(b); +} + +int __weak chip_pcie_configure(struct pci_controller *hose) +{ + return 0; +} + +unsigned char last_bus = PCI0_BUS; +void __init common_init_pci(void) +{ + struct pci_controller *hose; + struct pci_host_bridge *bridge; + struct pci_bus *bus; + unsigned int init_busnr; + int need_domain_info = 0; + int ret; + unsigned long offset; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + bridge = pci_alloc_host_bridge(0); + if (!bridge) + continue; + hose->busn_space->start = last_bus; + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + offset = hose->mem_space->start - PCI_32BIT_MEMIO; + if (is_in_host()) + hose->first_busno = last_bus + 1; + else + hose->first_busno = last_bus; + pci_add_resource_offset(&bridge->windows, hose->mem_space, offset); + pci_add_resource_offset(&bridge->windows, hose->io_space, hose->io_space->start); + pci_add_resource_offset(&bridge->windows, hose->pre_mem_space, 0); + pci_add_resource_offset(&bridge->windows, hose->busn_space, 0); + bridge->dev.parent = NULL; + bridge->sysdata = hose; + bridge->busnr = hose->busn_space->start; + bridge->ops = &sw64_pci_ops; + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_map_irq; + + ret = pci_scan_root_bus_bridge(bridge); + if (ret) { + pci_free_host_bridge(bridge); + continue; + } + + bus = hose->bus = bridge->bus; + hose->need_domain_info = need_domain_info; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + + hose->last_busno = hose->busn_space->end = last_bus; + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + } + + pcibios_claim_console_setup(); + + if (is_in_host()) { + list_for_each_entry(bus, &pci_root_buses, node) + pcibios_reserve_legacy_regions(bus); + } + + pr_info("SW arch assign unassigned resources.\n"); + + pci_assign_unassigned_resources(); + + for (hose = hose_head; hose; hose = hose->next) { + bus = hose->bus; + if (bus) + pci_bus_add_devices(bus); + } +} + +struct pci_controller * __init +alloc_pci_controller(void) +{ + struct pci_controller *hose; + + hose = memblock_alloc(sizeof(*hose), SMP_CACHE_BYTES); + + *hose_tail = hose; + hose_tail = &hose->next; + + return hose; +} + +struct resource * __init +alloc_resource(void) +{ + struct resource *res; + + res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES); + + return res; +} + +static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + resource_size_t offset; + struct resource *res; + + pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus)); + + /* Check for IO */ + if (!(hose->io_space->flags & IORESOURCE_IO)) + goto no_io; + offset = (unsigned long)hose->io_space->start; + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + BUG_ON(res == NULL); + res->name = "Legacy IO"; + res->flags = IORESOURCE_IO; + res->start = offset; + res->end = (offset + 0xfff) & 0xfffffffffffffffful; + pr_debug("Candidate legacy IO: %pR\n", res); + if (request_resource(hose->io_space, res)) { + pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + +no_io: + return; +} + +/* PCIe RC operations */ +int sw6_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +int sw6_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw6_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (test_bit(hose->node * 8 + hose->index, &rc_linkup)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +int sw6_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw6_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/* + *sw6_pcie_valid_device - Check if a valid device is present on bus + *@bus: PCI Bus structure + *@devfn: device/function + * + *Return: 'true' on success and 'false' if invalid device is found + */ +static bool sw6_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/* + *sw6_pcie_map_bus - Get configuration base + *@bus: PCI Bus structure + *@devfn: Device/function + *@where: Offset from base + * + *Return: Base address of the configuration space needed to be + *accessed. + */ +static void __iomem *sw6_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw6_pcie_valid_device(bus, devfn)) + return NULL; + + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +struct pci_ops sw64_pci_ops = { + .map_bus = sw6_pcie_map_bus, + .read = sw6_pcie_config_read, + .write = sw6_pcie_config_write, +}; + +int sw64_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return sw64_chip_init->pci_init.map_irq(dev, slot, pin); +} + +static void __init +sw64_init_host(unsigned long node, unsigned long index) +{ + struct pci_controller *hose; + int ret = 0; + + hose = alloc_pci_controller(); + if (!hose) { + pr_warn("alloc NODE %ld RC %ld hose failed\n", node, index); + return; + } + hose->iommu_enable = false; + hose->io_space = alloc_resource(); + hose->mem_space = alloc_resource(); + hose->pre_mem_space = alloc_resource(); + hose->busn_space = alloc_resource(); + hose->index = index; + hose->node = node; + + sw64_chip_init->pci_init.hose_init(hose); + + if (sw64_chip_init->pci_init.set_rc_piu) + sw64_chip_init->pci_init.set_rc_piu(node, index); + + ret = sw64_chip_init->pci_init.check_pci_linkup(node, index); + if (ret == 0) { + /* Root Complex downstream port is link up */ + set_bit(node * 8 + index, &rc_linkup); //8-bit per node + } +} + +void __weak set_devint_wken(int node) {} +void __weak set_adr_int(int node) {} + +void __init sw64_init_arch(void) +{ + if (IS_ENABLED(CONFIG_PCI)) { + unsigned long node, cpu_num; + unsigned long rc_enable; + char id[8], msg[64]; + int i; + + cpu_num = sw64_chip->get_cpu_num(); + + for (node = 0; node < cpu_num; node++) { + if (is_in_host()) { + set_devint_wken(node); + set_adr_int(node); + } + } + + if (!acpi_disabled) + return; + + pr_info("SW arch PCI initialize!\n"); + for (node = 0; node < cpu_num; node++) { + rc_enable = sw64_chip_init->pci_init.get_rc_enable(node); + if (rc_enable == 0) { + pr_notice("PCIe is disabled on node %ld\n", node); + continue; + } + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_enable >> i) & 0x1) + sw64_init_host(node, i); + } + if ((rc_linkup >> node * 8) & 0xff) { + memset(msg, 0, 64); + sprintf(msg, "Node %ld: RC [ ", node); + for (i = 0; i < MAX_NR_RCS; i++) { + if ((rc_linkup >> (i + node * 8)) & 1) { + memset(id, 0, 8); + sprintf(id, "%d ", i); + strcat(msg, id); + } + } + strcat(msg, "] link up"); + pr_info("%s\n", msg); + } else { + pr_info("Node %ld: no RC link up\n", node); + } + } + } +} + +void __weak set_pcieport_service_irq(int node, int index) {} + +static void __init sw64_init_intx(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + if (sw64_chip_init->pci_init.set_intx) + sw64_chip_init->pci_init.set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +void __init sw64_init_irq(void) +{ + struct pci_controller *hose; + + /* Scan all of the recorded PCI controllers. */ + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) + sw64_init_intx(hose); +} + +void __init +sw64_init_pci(void) +{ + pci_add_flags(PCI_REASSIGN_ALL_BUS); + common_init_pci(); + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/pci/pci-sysfs.c b/arch/sw_64/pci/pci-sysfs.c new file mode 100644 index 0000000000000000000000000000000000000000..5b52a534fa808ebf1c8b1548ef1acaa1a905b96f --- /dev/null +++ b/arch/sw_64/pci/pci-sysfs.c @@ -0,0 +1,359 @@ +// SPDX-License-Identifier: GPL-2.0 +/** + * Sw_64 PCI resource files. + * + * Loosely based on generic HAVE_PCI_MMAP implementation in + * drivers/pci/pci-sysfs.c + */ + +#include + +static int hose_mmap_page_range(struct pci_controller *hose, + struct vm_area_struct *vma, + enum pci_mmap_state mmap_type, int sparse) +{ + unsigned long base; + + if (mmap_type == pci_mmap_mem) + base = sparse ? hose->sparse_mem_base : hose->dense_mem_base; + else + base = sparse ? hose->sparse_io_base : hose->dense_io_base; + + vma->vm_pgoff |= base >> PAGE_SHIFT; + + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static int __pci_mmap_fits(struct pci_dev *pdev, int num, + struct vm_area_struct *vma, int sparse) +{ + unsigned long nr, start, size; + int shift = sparse ? 5 : 0; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((pci_resource_len(pdev, num) - 1) >> (PAGE_SHIFT - shift)) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on %s BAR %d (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + pci_name(pdev), num, size); + return 0; +} + +/** + * pci_mmap_resource - map a PCI resource into user memory space + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * @sparse: address space type + * + * Use the bus mapping routines to map a PCI resource into userspace. + */ +static int pci_mmap_resource(struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma, int sparse) +{ + struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj)); + struct resource *res = attr->private; + enum pci_mmap_state mmap_type; + struct pci_bus_region bar; + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) + if (res == &pdev->resource[i]) + break; + if (i >= PCI_ROM_RESOURCE) + return -ENODEV; + + if (res->flags & IORESOURCE_MEM && iomem_is_exclusive(res->start)) + return -EINVAL; + + if (!__pci_mmap_fits(pdev, i, vma, sparse)) + return -EINVAL; + + pcibios_resource_to_bus(pdev->bus, &bar, res); + vma->vm_pgoff += bar.start >> (PAGE_SHIFT - (sparse ? 5 : 0)); + mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; + + return hose_mmap_page_range(pdev->sysdata, vma, mmap_type, sparse); +} + +static int pci_mmap_resource_sparse(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 1); +} + +static int pci_mmap_resource_dense(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + return pci_mmap_resource(kobj, attr, vma, 0); +} + +/** + * pci_remove_resource_files - cleanup resource files + * @dev: dev to cleanup + * + * If we created resource files for @dev, remove them from sysfs and + * free their resources. + */ +void pci_remove_resource_files(struct pci_dev *pdev) +{ + int i; + + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + struct bin_attribute *res_attr; + + res_attr = pdev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + + res_attr = pdev->res_attr_wc[i]; + if (res_attr) { + sysfs_remove_bin_file(&pdev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +static int sparse_mem_mmap_fits(struct pci_dev *pdev, int num) +{ + struct pci_bus_region bar; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + long dense_offset; + unsigned long sparse_size; + + pcibios_resource_to_bus(pdev->bus, &bar, &pdev->resource[num]); + + /* + * All core logic chips have 4G sparse address space, except + * CIA which has 16G (see xxx_SPARSE_MEM and xxx_DENSE_MEM + * definitions in asm/core_xxx.h files). This corresponds + * to 128M or 512M of the bus space. + */ + dense_offset = (long)(hose->dense_mem_base - hose->sparse_mem_base); + sparse_size = dense_offset >= 0x400000000UL ? 0x20000000 : 0x8000000; + + return bar.end < sparse_size; +} + +static int pci_create_one_attr(struct pci_dev *pdev, int num, char *name, + char *suffix, struct bin_attribute *res_attr, + unsigned long sparse) +{ + size_t size = pci_resource_len(pdev, num); + + sprintf(name, "resource%d%s", num, suffix); + res_attr->mmap = sparse ? pci_mmap_resource_sparse : + pci_mmap_resource_dense; + res_attr->attr.name = name; + res_attr->attr.mode = 0600; + res_attr->size = sparse ? size << 5 : size; + res_attr->private = &pdev->resource[num]; + return sysfs_create_bin_file(&pdev->dev.kobj, res_attr); +} + +static int pci_create_attr(struct pci_dev *pdev, int num) +{ + /* allocate attribute structure, piggyback attribute name */ + int retval, nlen1, nlen2 = 0, res_count = 1; + unsigned long sparse_base, dense_base; + struct bin_attribute *attr; + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + char *suffix, *attr_name; + + suffix = ""; + nlen1 = 10; + + if (pdev->resource[num].flags & IORESOURCE_MEM) { + sparse_base = hose->sparse_mem_base; + dense_base = hose->dense_mem_base; + if (sparse_base && !sparse_mem_mmap_fits(pdev, num)) { + sparse_base = 0; + suffix = "_dense"; + nlen1 = 16; /* resourceN_dense */ + } + } else { + sparse_base = hose->sparse_io_base; + dense_base = hose->dense_io_base; + } + + if (sparse_base) { + suffix = "_sparse"; + nlen1 = 17; + if (dense_base) { + nlen2 = 16; /* resourceN_dense */ + res_count = 2; + } + } + + attr = kzalloc(sizeof(*attr) * res_count + nlen1 + nlen2, GFP_ATOMIC); + if (!attr) + return -ENOMEM; + + attr_name = (char *)(attr + res_count); + pdev->res_attr[num] = attr; + retval = pci_create_one_attr(pdev, num, attr_name, suffix, attr, + sparse_base); + if (retval || res_count == 1) + return retval; + + /* Create dense file */ + attr_name += nlen1; + attr++; + pdev->res_attr_wc[num] = attr; + return pci_create_one_attr(pdev, num, attr_name, "_dense", attr, 0); +} + +/** + * pci_create_resource_files - create resource files in sysfs for @dev + * @dev: dev in question + * + * Walk the resources in @dev creating files for each resource available. + */ +int pci_create_resource_files(struct pci_dev *pdev) +{ + int i; + int retval; + + /* Expose the PCI resources from this device as files */ + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + + /* skip empty resources */ + if (!pci_resource_len(pdev, i)) + continue; + + retval = pci_create_attr(pdev, i); + if (retval) { + pci_remove_resource_files(pdev); + return retval; + } + } + return 0; +} + +/* Legacy I/O bus mapping stuff. */ + +static int __legacy_mmap_fits(struct pci_controller *hose, + struct vm_area_struct *vma, + unsigned long res_size, int sparse) +{ + unsigned long nr, start, size; + + nr = vma_pages(vma); + start = vma->vm_pgoff; + size = ((res_size - 1) >> PAGE_SHIFT) + 1; + + if (start < size && size - start >= nr) + return 1; + WARN(1, "process \"%s\" tried to map%s 0x%08lx-0x%08lx on hose %ld (size 0x%08lx)\n", + current->comm, sparse ? " sparse" : "", start, start + nr, + hose->index, size); + return 0; +} + +static inline int has_sparse(struct pci_controller *hose, + enum pci_mmap_state mmap_type) +{ + unsigned long base; + + base = (mmap_type == pci_mmap_mem) ? hose->sparse_mem_base : + hose->sparse_io_base; + + return base != 0; +} + +int pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma, + enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int sparse = has_sparse(hose, mmap_type); + unsigned long res_size; + + res_size = (mmap_type == pci_mmap_mem) ? bus->legacy_mem->size : + bus->legacy_io->size; + if (!__legacy_mmap_fits(hose, vma, res_size, sparse)) + return -EINVAL; + + return hose_mmap_page_range(hose, vma, mmap_type, sparse); +} + +/** + * pci_adjust_legacy_attr - adjustment of legacy file attributes + * @b: bus to create files under + * @mmap_type: I/O port or memory + * + * Adjust file name and size for sparse mappings. + */ +void pci_adjust_legacy_attr(struct pci_bus *bus, enum pci_mmap_state mmap_type) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (!has_sparse(hose, mmap_type)) + return; + + if (mmap_type == pci_mmap_mem) { + bus->legacy_mem->attr.name = "legacy_mem_sparse"; + bus->legacy_mem->size <<= 5; + } else { + bus->legacy_io->attr.name = "legacy_io_sparse"; + bus->legacy_io->size <<= 5; + } +} + +/* Legacy I/O bus read/write functions */ +int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + *((u8 *)val) = inb(port); + return 1; + case 2: + if (port & 1) + return -EINVAL; + *((u16 *)val) = inw(port); + return 2; + case 4: + if (port & 3) + return -EINVAL; + *((u32 *)val) = inl(port); + return 4; + } + return -EINVAL; +} + +int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + port += hose->io_space->start; + + switch (size) { + case 1: + outb(port, val); + return 1; + case 2: + if (port & 1) + return -EINVAL; + outw(port, val); + return 2; + case 4: + if (port & 3) + return -EINVAL; + outl(port, val); + return 4; + } + return -EINVAL; +} diff --git a/arch/sw_64/pci/pci.c b/arch/sw_64/pci/pci.c new file mode 100644 index 0000000000000000000000000000000000000000..3db9816e19f1c8facca61e627b2ee465fe404082 --- /dev/null +++ b/arch/sw_64/pci/pci.c @@ -0,0 +1,436 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +/* + * raw_pci_read/write - Platform-specific PCI config space access. + */ +int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->read(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val) +{ + struct pci_bus *bus_tmp = pci_find_bus(domain, bus); + + if (bus_tmp) + return bus_tmp->ops->write(bus_tmp, devfn, reg, len, val); + + return -EINVAL; +} + +resource_size_t pcibios_default_alignment(void) +{ + if (is_in_guest()) + return PAGE_SIZE; + else + return 0; +} + +/** + * Just declaring that the power-of-ten prefixes are actually the + * power-of-two ones doesn't make it true :) + */ +#define KB 1024 +#define MB (1024*KB) +#define GB (1024*MB) + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + struct pci_dev *dev = data; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long alignto; + resource_size_t start = res->start; + + if (res->flags & IORESOURCE_IO) { + /* Make sure we start at our min on all hoses */ + if (start - hose->io_space->start < PCIBIOS_MIN_IO) + start = PCIBIOS_MIN_IO + hose->io_space->start; + /* + * Put everything into 0x00-0xff region modulo 0x400 + */ + if (start & 0x300) + start = (start + 0x3ff) & ~0x3ff; + } else if (res->flags & IORESOURCE_MEM) { + /* Make sure we start at our min on all hoses */ + if (start - hose->mem_space->start < PCIBIOS_MIN_MEM) + start = PCIBIOS_MIN_MEM + hose->mem_space->start; + /* + * The following holds at least for the Low Cost + * SW64 implementation of the PCI interface: + * + * In sparse memory address space, the first + * octant (16MB) of every 128MB segment is + * aliased to the very first 16 MB of the + * address space (i.e., it aliases the ISA + * memory address space). Thus, we try to + * avoid allocating PCI devices in that range. + * Can be allocated in 2nd-7th octant only. + * Devices that need more than 112MB of + * address space must be accessed through + * dense memory space only! + */ + + /* Align to multiple of size of minimum base. */ + alignto = max_t(resource_size_t, 0x1000UL, align); + start = ALIGN(start, alignto); + if (hose->sparse_mem_base && size <= 7 * 16*MB) { + if (((start / (16*MB)) & 0x7) == 0) { + start &= ~(128*MB - 1); + start += 16*MB; + start = ALIGN(start, alignto); + } + if (start/(128*MB) != (start + size - 1)/(128*MB)) { + start &= ~(128*MB - 1); + start += (128 + 16)*MB; + start = ALIGN(start, alignto); + } + } + } + + return start; +} + +#undef KB +#undef MB +#undef GB + +char *pcibios_setup(char *str) +{ + return str; +} + +void pcibios_fixup_bus(struct pci_bus *bus) +{ + /* Propagate hose info into the subordinate devices. */ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + struct pci_dev *dev = bus->self; + + if (!dev || bus->number == hose->first_busno) { + bus->resource[0] = hose->io_space; + bus->resource[1] = hose->mem_space; + bus->resource[2] = hose->pre_mem_space; + } +} + +/** + * Provide information on locations of various I/O regions in physical + * memory. Do this on a per-card basis so that we choose the right hose. + */ +asmlinkage long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) +{ + struct pci_controller *hose; + + hose = bus_num_to_pci_controller(bus); + if (hose == NULL) + return -ENODEV; + + switch (which & ~IOBASE_FROM_HOSE) { + case IOBASE_HOSE: + return hose->index; + case IOBASE_SPARSE_MEM: + return hose->sparse_mem_base; + case IOBASE_DENSE_MEM: + return hose->dense_mem_base; + case IOBASE_SPARSE_IO: + return hose->sparse_io_base; + case IOBASE_DENSE_IO: + return hose->dense_io_base; + case IOBASE_ROOT_BUS: + return hose->bus->number; + } + + return -EOPNOTSUPP; +} + +void pci_iounmap(struct pci_dev *dev, void __iomem *addr) +{ +} +EXPORT_SYMBOL(pci_iounmap); + +void __init reserve_mem_for_pci(void) +{ + int ret; + unsigned long base = PCI_32BIT_MEMIO; + + ret = add_memmap_region(base, PCI_32BIT_MEMIO_SIZE, memmap_pci); + if (ret) { + pr_err("reserved pages for pcie memory space failed\n"); + return; + } + + pr_info("reserved pages for pcie memory space %lx:%lx\n", base >> PAGE_SHIFT, + (base + PCI_32BIT_MEMIO_SIZE) >> PAGE_SHIFT); +} + +const struct dma_map_ops *dma_ops; +EXPORT_SYMBOL(dma_ops); + +/* Quirks */ +static void quirk_isa_bridge(struct pci_dev *dev) +{ + dev->class = PCI_CLASS_BRIDGE_ISA << 8; +} +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge); + +/* + * Early fix up the Root Complex settings + */ +static void fixup_root_complex(struct pci_dev *dev) +{ + int i; + struct pci_bus *bus = dev->bus; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + hose->self_busno = hose->busn_space->start; + + if (likely(bus->number == hose->self_busno)) { + if (IS_ENABLED(CONFIG_HOTPLUG_PCI_PCIE)) { + /* Check Root Complex port again */ + dev->is_hotplug_bridge = 0; + dev->current_state = PCI_D0; + } + + dev->class &= 0xff; + dev->class |= PCI_CLASS_BRIDGE_PCI << 8; + for (i = 0; i < PCI_NUM_RESOURCES; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; + dev->resource[i].flags = IORESOURCE_PCI_FIXED; + } + } + atomic_inc(&dev->enable_cnt); + + dev->no_msi = 1; +} + +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_JN, PCI_DEVICE_ID_SW64_ROOT_BRIDGE, fixup_root_complex); + +static int setup_bus_dma_cb(struct pci_dev *pdev, void *data) +{ + pdev->dev.bus_dma_limit = DMA_BIT_MASK(32); + return 0; +} + +static void fix_bus_dma_limit(struct pci_dev *dev) +{ + pci_walk_bus(dev->subordinate, setup_bus_dma_cb, NULL); + pr_info("Set zx200 bus_dma_limit to 32-bit\n"); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ZHAOXIN, 0x071f, fix_bus_dma_limit); + +#ifdef CONFIG_DCA +static void enable_sw_dca(struct pci_dev *dev) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long node, rc_index, dca_ctl, dca_conf; + int i; + + if (dev->class >> 8 != PCI_CLASS_NETWORK_ETHERNET) + return; + + node = hose->node; + rc_index = hose->index; + + for (i = 0; i < 256; i++) { + dca_conf = read_piu_ior1(node, rc_index, DEVICEID0 + (i << 7)); + if (dca_conf >> 63) + continue; + else { + dca_conf = (1UL << 63) | (dev->bus->number << 8) | dev->devfn; + pr_info("dca device index %d, dca_conf = %#lx\n", i, dca_conf); + write_piu_ior1(node, rc_index, DEVICEID0 + (i << 7), dca_conf); + break; + } + } + + dca_ctl = read_piu_ior1(node, rc_index, DCACONTROL); + if (dca_ctl & 0x1) { + dca_ctl = 0x2; + write_piu_ior1(node, rc_index, DCACONTROL, dca_ctl); + pr_info("Node %ld RC %ld enable DCA 1.0\n", node, rc_index); + } +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, enable_sw_dca); +#endif + +/** + * There are some special aspects to the Root Complex of Sunway: + * 1. Root Complex config space base addr is different + * from EP config space base addr. + * 2. For the case of multiple Root Complex, different + * Root Complex have config space base addr. + * + * These means that even if multiple Root Complex share + * the same segment group number, their bus numbers can + * still overlap. + * + * But due to a Xorg related issue, we can not overlap + * the bus numbers of multiple Root Complex. So, after + * scanning the Root Complex, use "last_bus" to record + * the next bus number of the current maximum used bus + * number, and use it as the start bus number of the + * next Root Complex to be scanned. + * + * A question: when there is too much RCs, may 256 bus + * numbers be insufficient? + */ +static unsigned char last_bus; + +void sw64_pci_root_bridge_prepare(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct resource_entry *entry = NULL; + struct pci_bus *bus = bridge->bus; + unsigned long flags = 0; + unsigned int init_busnr = 0; + + hose = pci_bus_to_pci_controller(bus); + + resource_list_for_each_entry(entry, &bridge->windows) { + flags = entry->res->flags; + if (flags & IORESOURCE_IO) { + entry->offset = entry->res->start; + hose->io_space = entry->res; + } else if (flags & IORESOURCE_BUS) { + entry->res->start = last_bus; + hose->busn_space = entry->res; + } else if (flags & IORESOURCE_MEM) { + if (!(flags & IORESOURCE_PREFETCH)) { + entry->offset = entry->res->start - PCI_32BIT_MEMIO; + hose->mem_space = entry->res; + } else + hose->pre_mem_space = entry->res; + } + } + + /** + * We scan Root Complex and update bus num in kernel, + * not in firmware. Firmware just pass 0x0-0xff via _CRS. + * + * So, need to update bus num of pci host bridge here. + */ + bridge->busnr = last_bus; + dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus), last_bus); + + /** + * At this point, pci_bus has been created and use old + * bridge->busnr, so need to update bus->number here. + */ + bus->number = last_bus; + + bridge->swizzle_irq = pci_common_swizzle; + bridge->map_irq = sw64_pci_map_irq; + + init_busnr = (0xff << 16) + ((last_bus + 1) << 8) + (last_bus); + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + hose->first_busno = last_bus + (is_in_host() ? 1 : 0); + + pci_add_flags(PCI_REASSIGN_ALL_BUS); +} + +static void sw64_pci_root_bridge_reserve_legacy_io(struct pci_host_bridge *bridge) +{ + struct pci_bus *bus = bridge->bus; + struct resource_entry *entry = NULL; + struct resource *res = NULL; + + resource_list_for_each_entry(entry, &bridge->windows) { + if (!(entry->res->flags & IORESOURCE_IO)) + continue; + + res = kzalloc(sizeof(struct resource), GFP_KERNEL); + if (res == NULL) { + pr_err("alloc resource for legacy io out of mem\n"); + return; + } + + res->name = "legacy io"; + res->flags = IORESOURCE_IO; + res->start = entry->res->start; + res->end = (res->start + 0xFFF) & 0xFFFFFFFFFFFFFFFFUL; + + pr_info("reserving legacy io %pR for domain %04x\n", + res, pci_domain_nr(bus)); + if (request_resource(entry->res, res)) { + pr_err("pci %04x:%02x reserve legacy io %pR failed\n", + pci_domain_nr(bus), bus->number, res); + kfree(res); + } + } +} + +void sw64_pci_root_bridge_scan_finish_up(struct pci_host_bridge *bridge) +{ + struct pci_controller *hose = NULL; + struct pci_bus *bus = NULL; + unsigned int init_busnr = 0; + + bus = bridge->bus; + + hose = pci_bus_to_pci_controller(bus); + hose->bus = bus; + + if (is_in_host()) + last_bus = chip_pcie_configure(hose); + else { + while (pci_find_bus(pci_domain_nr(bus), last_bus)) + last_bus++; + } + + hose->last_busno = last_bus; + hose->busn_space->end = last_bus; + + init_busnr = read_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS); + init_busnr &= ~(0xff << 16); + init_busnr |= last_bus << 16; + write_rc_conf(hose->node, hose->index, RC_PRIMARY_BUS, init_busnr); + + pci_bus_update_busn_res_end(bus, last_bus); + last_bus++; + + pr_info("bus number update to %u\n", last_bus); + + if (is_in_host()) + sw64_pci_root_bridge_reserve_legacy_io(bridge); + + /** + * Root Complex of SW64 does not support ASPM, causing + * control field(_OSC) unable to be updated. + * + * Related logic can be found in "negotiate_os_control". + */ + bridge->native_aer = 1; + bridge->native_pme = 1; + + /** + * Since some buggy firmwares may configure invalid bridge bus numbers, + * the kernel re-assigns all PCI bus numbers when scan Root Complex. + * + * However, users may trigger a pci bus rescan in the userspace by the + * command below: + * + * > echo 1 > /sys/bus/pci/rescan + * + * Unexpected errors may occur on the endpoint devices due to the re-assign + * bus numbers of upstream bridges. + * + * To work around this problem, the flag PCI_REASSIGN_ALL_BUS is set before + * scanning Root Complex and cleared after scanning Root Complex. + */ + pci_clear_flags(PCI_REASSIGN_ALL_BUS); +} diff --git a/arch/sw_64/platform/Makefile b/arch/sw_64/platform/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..4c0edceb4a2c1f4f7c8a5ee16617e80161b771a1 --- /dev/null +++ b/arch/sw_64/platform/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += cpufreq_xuelang.o diff --git a/arch/sw_64/platform/cpufreq_xuelang.c b/arch/sw_64/platform/cpufreq_xuelang.c new file mode 100644 index 0000000000000000000000000000000000000000..1259e58dc874ffa691d189dfd46d4e120cdb2cef --- /dev/null +++ b/arch/sw_64/platform/cpufreq_xuelang.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#include +#include +#include + +/* Minimum CLK support */ +enum { + DC_0, DC_1, DC_2, DC_3, DC_4, DC_5, DC_6, DC_7, DC_8, + DC_9, DC_10, DC_11, DC_12, DC_13, DC_14, DC_15, DC_RESV +}; + +struct cpufreq_frequency_table freq_table[] = { + {0, 200, CPUFREQ_ENTRY_INVALID}, + {0, DC_1, CPUFREQ_ENTRY_INVALID}, + {0, DC_2, 0}, + {0, DC_3, 0}, + {0, DC_4, 0}, + {0, DC_5, 0}, + {0, DC_6, 0}, + {0, DC_7, 0}, + {0, DC_8, 0}, + {0, DC_9, 0}, + {0, DC_10, 0}, + {0, DC_11, 0}, + {0, DC_12, 0}, + {0, DC_13, 0}, + {0, DC_14, 0}, + {0, DC_15, 0}, + {-1, DC_RESV, CPUFREQ_TABLE_END}, +}; + + +static struct platform_device sw64_cpufreq_device = { + .name = "sw64_cpufreq", + .id = -1, +}; + +static int __init sw64_cpufreq_init(void) +{ + int i; + unsigned char external_clk; + unsigned long max_rate, freq_off; + + max_rate = get_cpu_freq() / 1000; + + external_clk = *((unsigned char *)__va(MB_EXTCLK)); + + if (external_clk == 240) + freq_off = 60000; + else + freq_off = 50000; + + /* clock table init */ + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (i == 1) + freq_table[i].driver_data = freq_off * 24; + if (i == 2) + freq_table[i].frequency = freq_off * 36; + if (i > 2) + freq_table[i].frequency = freq_off * 38 + ((i - 3) * freq_off); + + if (freq_table[i].frequency == max_rate) + freq_table[i + 1].frequency = CPUFREQ_TABLE_END; + } + + return platform_device_register(&sw64_cpufreq_device); +} +arch_initcall(sw64_cpufreq_init); + +char curruent_policy[CPUFREQ_NAME_LEN]; + +static struct clk cpu_clk = { + .name = "cpu_clk", + .flags = CLK_ALWAYS_ENABLED | CLK_RATE_PROPAGATES, + .rate = 2400000000, +}; + +struct clk *sw64_clk_get(struct device *dev, const char *id) +{ + return &cpu_clk; +} +EXPORT_SYMBOL(sw64_clk_get); + +unsigned int __sw64_cpufreq_get(struct cpufreq_policy *policy) +{ + int i; + u64 val; + struct cpufreq_frequency_table *ft = policy->freq_table; + + val = sw64_io_read(0, CLK_CTL) >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; ft[i].frequency != CPUFREQ_TABLE_END; i++) { + if (val == i) + return ft[i].frequency; + } + return 0; +} +EXPORT_SYMBOL(__sw64_cpufreq_get); + +void sw64_set_rate(unsigned int index) +{ + unsigned int i, val; + int cpu_num; + + cpu_num = sw64_chip->get_cpu_num(); + + for (i = 0; i < cpu_num; i++) { + sw64_io_write(i, CLK_CTL, CORE_CLK2_R | CORE_CLK2_V | CLK_PRT); + val = sw64_io_read(i, CLK_CTL); + + sw64_io_write(i, CLK_CTL, val | index << CORE_PLL2_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, CORE_CLK2_V | CLK_PRT + | index << CORE_PLL2_CFG_SHIFT); + val = sw64_io_read(i, CLK_CTL); + + /* LV1 select PLL1/PLL2 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXA | CLK_LV1_SEL_PRT); + + /* Set CLK_CTL PLL0 */ + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_R | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + udelay(1); + + sw64_io_write(i, CLK_CTL, val | CORE_CLK0_V + | index << CORE_PLL0_CFG_SHIFT); + + /* LV1 select PLL0/PLL1 */ + sw64_io_write(i, CLU_LV1_SEL, CLK_LV1_SEL_MUXB | CLK_LV1_SEL_PRT); + } +} +EXPORT_SYMBOL_GPL(sw64_set_rate); diff --git a/arch/sw_64/tools/.gitignore b/arch/sw_64/tools/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..f73e86272b7616f0c2ce1d704e1966da94aed182 --- /dev/null +++ b/arch/sw_64/tools/.gitignore @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +relocs diff --git a/arch/sw_64/tools/Makefile b/arch/sw_64/tools/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..66f55b035e223cc3f9073c6fbd252385293e4475 --- /dev/null +++ b/arch/sw_64/tools/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 + +hostprogs += relocs +relocs-objs += relocs.o +relocs-objs += relocs_main.o +PHONY += relocs +relocs: $(obj)/relocs + @: diff --git a/arch/sw_64/tools/relocs.c b/arch/sw_64/tools/relocs.c new file mode 100644 index 0000000000000000000000000000000000000000..ec0ed422a8369172d2db92550dfed98619419961 --- /dev/null +++ b/arch/sw_64/tools/relocs.c @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "relocs.h" + +#define ELF_BITS 64 + +#define ELF_MACHINE EM_SW64 +#define ELF_MACHINE_NAME "SW64" +#define SHT_REL_TYPE SHT_RELA +#define Elf_Rel Elf64_Rela + +#define ELF_CLASS ELFCLASS64 +#define ELF_R_SYM(val) ELF64_R_SYM(val) +#define ELF_R_TYPE(val) ELF64_R_TYPE(val) +#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o) +#define ELF_ST_BIND(o) ELF64_ST_BIND(o) +#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o) + +#define ElfW(type) _ElfW(ELF_BITS, type) +#define _ElfW(bits, type) __ElfW(bits, type) +#define __ElfW(bits, type) Elf##bits##_##type + +#define Elf_Addr ElfW(Addr) +#define Elf_Ehdr ElfW(Ehdr) +#define Elf_Phdr ElfW(Phdr) +#define Elf_Shdr ElfW(Shdr) +#define Elf_Sym ElfW(Sym) + +static Elf_Ehdr ehdr; + +struct relocs { + uint32_t *offset; + unsigned long count; + unsigned long size; +}; + +static struct relocs relocs; + +struct section { + Elf_Shdr shdr; + struct section *link; + Elf_Sym *symtab; + Elf_Rel *reltab; + char *strtab; + long shdr_offset; +}; +static struct section *secs; + +static const char * const regex_sym_kernel = { +/* Symbols matching these regex's should never be relocated */ + "^(__crc_)", +}; + +static regex_t sym_regex_c; + +static int regex_skip_reloc(const char *sym_name) +{ + return !regexec(&sym_regex_c, sym_name, 0, NULL, 0); +} + +static void regex_init(void) +{ + char errbuf[128]; + int err; + + err = regcomp(&sym_regex_c, regex_sym_kernel, + REG_EXTENDED|REG_NOSUB); + + if (err) { + regerror(err, &sym_regex_c, errbuf, sizeof(errbuf)); + die("%s", errbuf); + } +} + +static const char *rel_type(unsigned int type) +{ + static const char * const type_name[] = { +#define REL_TYPE(X)[X] = #X + REL_TYPE(R_SW64_NONE), + REL_TYPE(R_SW64_REFQUAD), + REL_TYPE(R_SW64_LITERAL), + REL_TYPE(R_SW64_LITUSE), + REL_TYPE(R_SW64_GPDISP), + REL_TYPE(R_SW64_BRADDR), + REL_TYPE(R_SW64_HINT), + REL_TYPE(R_SW64_SREL32), + REL_TYPE(R_SW64_GPRELHIGH), + REL_TYPE(R_SW64_GPRELLOW), +#undef REL_TYPE + }; + const char *name = "unknown type rel type name"; + + if (type < ARRAY_SIZE(type_name) && type_name[type]) + name = type_name[type]; + return name; +} + +static const char *sec_name(unsigned int shndx) +{ + const char *sec_strtab; + const char *name; + + sec_strtab = secs[ehdr.e_shstrndx].strtab; + if (shndx < ehdr.e_shnum) + name = sec_strtab + secs[shndx].shdr.sh_name; + else if (shndx == SHN_ABS) + name = "ABSOLUTE"; + else if (shndx == SHN_COMMON) + name = "COMMON"; + else + name = ""; + return name; +} + +static struct section *sec_lookup(const char *secname) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) + if (strcmp(secname, sec_name(i)) == 0) + return &secs[i]; + + return NULL; +} + +static const char *sym_name(const char *sym_strtab, Elf_Sym *sym) +{ + const char *name; + + if (sym->st_name) + name = sym_strtab + sym->st_name; + else + name = sec_name(sym->st_shndx); + return name; +} + +#define le16_to_cpu(val) (val) +#define le32_to_cpu(val) (val) +#define le64_to_cpu(val) (val) + +#define cpu_to_le16(val) (val) +#define cpu_to_le32(val) (val) +#define cpu_to_le64(val) (val) + +static uint16_t elf16_to_cpu(uint16_t val) +{ + return le16_to_cpu(val); +} + +static uint32_t elf32_to_cpu(uint32_t val) +{ + return le32_to_cpu(val); +} + +static uint32_t cpu_to_elf32(uint32_t val) +{ + return cpu_to_le32(val); +} + +#define elf_half_to_cpu(x) elf16_to_cpu(x) +#define elf_word_to_cpu(x) elf32_to_cpu(x) + +#if ELF_BITS == 64 +static uint64_t elf64_to_cpu(uint64_t val) +{ + return le64_to_cpu(val); +} +#define elf_addr_to_cpu(x) elf64_to_cpu(x) +#define elf_off_to_cpu(x) elf64_to_cpu(x) +#define elf_xword_to_cpu(x) elf64_to_cpu(x) +#else +#define elf_addr_to_cpu(x) elf32_to_cpu(x) +#define elf_off_to_cpu(x) elf32_to_cpu(x) +#define elf_xword_to_cpu(x) elf32_to_cpu(x) +#endif + +static void read_ehdr(FILE *fp) +{ + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + die("Cannot read ELF header: %s\n", strerror(errno)); + + if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0) + die("No ELF magic\n"); + + if (ehdr.e_ident[EI_CLASS] != ELF_CLASS) + die("Not a %d bit executable\n", ELF_BITS); + + if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) && + (ehdr.e_ident[EI_DATA] != ELFDATA2MSB)) + die("Unknown ELF Endianness\n"); + + if (ehdr.e_ident[EI_VERSION] != EV_CURRENT) + die("Unknown ELF version\n"); + + /* Convert the fields to native endian */ + ehdr.e_type = elf_half_to_cpu(ehdr.e_type); + ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine); + ehdr.e_version = elf_word_to_cpu(ehdr.e_version); + ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry); + ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff); + ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff); + ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags); + ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize); + ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize); + ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum); + ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize); + ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum); + ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx); + + if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN)) + die("Unsupported ELF header type\n"); + + if (ehdr.e_machine != ELF_MACHINE) + die("Not for %s\n", ELF_MACHINE_NAME); + + if (ehdr.e_version != EV_CURRENT) + die("Unknown ELF version\n"); + + if (ehdr.e_ehsize != sizeof(Elf_Ehdr)) + die("Bad Elf header size\n"); + + if (ehdr.e_phentsize != sizeof(Elf_Phdr)) + die("Bad program header entry\n"); + + if (ehdr.e_shentsize != sizeof(Elf_Shdr)) + die("Bad section header entry\n"); + + if (ehdr.e_shstrndx >= ehdr.e_shnum) + die("String table index out of bounds\n"); +} + +static void read_shdrs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); + if (!secs) + die("Unable to allocate %d section headers\n", ehdr.e_shnum); + + if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno)); + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + sec->shdr_offset = ftell(fp); + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name); + sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type); + sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags); + sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr); + sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset); + sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size); + sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link); + sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info); + sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign); + sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize); + if (sec->shdr.sh_link < ehdr.e_shnum) + sec->link = &secs[sec->shdr.sh_link]; + } +} + +static void read_strtabs(FILE *fp) +{ + int i; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_STRTAB) + continue; + + sec->strtab = malloc(sec->shdr.sh_size); + if (!sec->strtab) + die("malloc of %d bytes for strtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + } +} + +static void read_symtabs(FILE *fp) +{ + int i, j; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_SYMTAB) + continue; + + sec->symtab = malloc(sec->shdr.sh_size); + if (!sec->symtab) + die("malloc of %d bytes for symtab failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) { + Elf_Sym *sym = &sec->symtab[j]; + + sym->st_name = elf_word_to_cpu(sym->st_name); + sym->st_value = elf_addr_to_cpu(sym->st_value); + sym->st_size = elf_xword_to_cpu(sym->st_size); + sym->st_shndx = elf_half_to_cpu(sym->st_shndx); + } + } +} + +static void read_relocs(FILE *fp) +{ + static unsigned long base; + int i, j; + + if (!base) { + struct section *sec = sec_lookup(".text"); + + if (!sec) + die("Could not find .text section\n"); + + base = sec->shdr.sh_addr; + } + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + sec->reltab = malloc(sec->shdr.sh_size); + if (!sec->reltab) + die("malloc of %d bytes for relocs failed\n", + sec->shdr.sh_size); + + if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr.sh_offset, strerror(errno)); + + if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) != + sec->shdr.sh_size) + die("Cannot read symbol table: %s\n", strerror(errno)); + + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + + rel->r_offset = elf_addr_to_cpu(rel->r_offset); + /* Set offset into kernel image */ + rel->r_offset -= base; + /* Convert SW64 RELA format - only the symbol + * index needs converting to native endianness + */ + rel->r_info = elf_xword_to_cpu(rel->r_info); +#if (SHT_REL_TYPE == SHT_RELA) + rel->r_addend = elf_xword_to_cpu(rel->r_addend); +#endif + } + } +} + +static void remove_relocs(FILE *fp) +{ + int i; + Elf_Shdr shdr; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fread(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot read ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + + /* Set relocation section size to 0, effectively removing it. + * This is necessary due to lack of support for relocations + * in objcopy when creating 32bit elf from 64bit elf. + */ + shdr.sh_size = 0; + + if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0) + die("Seek to %d failed: %s\n", + sec->shdr_offset, strerror(errno)); + + if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1) + die("Cannot write ELF section headers %d/%d: %s\n", + i, ehdr.e_shnum, strerror(errno)); + } +} + +static void add_reloc(struct relocs *r, uint32_t offset, unsigned int type) +{ + /* Relocation representation in binary table: + * |76543210|76543210|76543210|76543210| + * | Type | offset from _text >> 2 | + */ + offset >>= 2; + if (offset > 0x00FFFFFF) + die("Kernel image exceeds maximum size for relocation!\n"); + + offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24); + + if (r->count == r->size) { + unsigned long newsize = r->size + 50000; + void *mem = realloc(r->offset, newsize * sizeof(r->offset[0])); + + if (!mem) + die("realloc failed\n"); + + r->offset = mem; + r->size = newsize; + } + r->offset[r->count++] = offset; +} + +static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel, + Elf_Sym *sym, const char *symname)) +{ + int i; + + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; + int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL_TYPE) + continue; + sec_symtab = sec->link; + sec_applies = &secs[sec->shdr.sh_info]; + if (!(sec_applies->shdr.sh_flags & SHF_ALLOC)) + continue; + + sh_symtab = sec_symtab->symtab; + sym_strtab = sec_symtab->link->strtab; + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) { + Elf_Rel *rel = &sec->reltab[j]; + Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)]; + const char *symname = sym_name(sym_strtab, sym); + + process(sec, rel, sym, symname); + } + } +} + +static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym, + const char *symname) +{ + unsigned int r_type = ELF_R_TYPE(rel->r_info); + unsigned int bind = ELF_ST_BIND(sym->st_info); + + if ((bind == STB_WEAK) && (sym->st_value == 0)) { + /* Don't relocate weak symbols without a target */ + return 0; + } + + if (regex_skip_reloc(symname)) + return 0; + + switch (r_type) { + case R_SW64_NONE: + case R_SW64_LITERAL: /* relocated by GOT */ + case R_SW64_LITUSE: + case R_SW64_GPDISP: + case R_SW64_BRADDR: + case R_SW64_HINT: + case R_SW64_SREL32: + case R_SW64_GPRELHIGH: + case R_SW64_GPRELLOW: + case R_SW64_LITERAL_GOT: + /* + * NONE can be ignored and PC relative relocations don't + * need to be adjusted. + */ + break; + + case R_SW64_REFQUAD: + add_reloc(&relocs, rel->r_offset, r_type); + break; + + default: + die("Unsupported relocation type: %s (%d)\n", + rel_type(r_type), r_type); + break; + } + + return 0; +} + +static int write_reloc_as_bin(uint32_t v, FILE *f) +{ + unsigned char buf[4]; + + v = cpu_to_elf32(v); + + memcpy(buf, &v, sizeof(uint32_t)); + return fwrite(buf, 1, 4, f); +} + +static int write_reloc_as_text(uint32_t v, FILE *f) +{ + int res; + + res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v); + if (res < 0) + return res; + else + return sizeof(uint32_t); +} + +static void emit_relocs(int as_text, int as_bin, FILE *outf) +{ + int i; + int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin; + int size = 0; + int size_reserved; + struct section *sec_reloc; + + sec_reloc = sec_lookup(".data.reloc"); + if (!sec_reloc) + die("Could not find relocation section\n"); + + size_reserved = sec_reloc->shdr.sh_size; + /* Collect up the relocations */ + walk_relocs(do_reloc); + + /* Print the relocations */ + if (as_text) { + /* Print the relocations in a form suitable that + * gas will like. + */ + printf(".section \".data.reloc\",\"a\"\n"); + printf(".balign 8\n"); + /* Output text to stdout */ + write_reloc = write_reloc_as_text; + outf = stdout; + } else if (as_bin) { + /* Output raw binary to stdout */ + outf = stdout; + } else { + /* + * Seek to offset of the relocation section. + * Each relocation is then written into the + * vmlinux kernel image. + */ + if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) { + die("Seek to %d failed: %s\n", + sec_reloc->shdr.sh_offset, strerror(errno)); + } + } + + for (i = 0; i < relocs.count; i++) + size += write_reloc(relocs.offset[i], outf); + + /* Print a stop, but only if we've actually written some relocs */ + if (size) + size += write_reloc(0, outf); + + if (size > size_reserved) + /* + * Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE + * which will fix this problem and allow a bit of headroom + * if more kernel features are enabled + */ + die("Relocations overflow available space!\n" + "Please adjust CONFIG_RELOCATION_TABLE_SIZE " + "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF); +} + +/* + * As an aid to debugging problems with different linkers + * print summary information about the relocs. + * Since different linkers tend to emit the sections in + * different orders we use the section names in the output. + */ +static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) * sym, + const char *symname) +{ + printf("%16s 0x%x %16s %40s %16s\n", + sec_name(sec->shdr.sh_info), + (unsigned int)rel->r_offset, + rel_type(ELF_R_TYPE(rel->r_info)), + symname, + sec_name(sym->st_shndx)); + return 0; +} + +static void print_reloc_info(void) +{ + printf("%16s %10s %16s %40s %16s\n", + "reloc section", + "offset", + "reloc type", + "symbol", + "symbol section"); + walk_relocs(do_reloc_info); +} + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs) +{ + regex_init(); + read_ehdr(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); + read_relocs(fp); + if (show_reloc_info) { + print_reloc_info(); + return; + } + emit_relocs(as_text, as_bin, fp); + if (!keep_relocs) + remove_relocs(fp); +} diff --git a/arch/sw_64/tools/relocs.h b/arch/sw_64/tools/relocs.h new file mode 100644 index 0000000000000000000000000000000000000000..17c7e31113a0e5f93ac2b596d54e31bc9de7fe58 --- /dev/null +++ b/arch/sw_64/tools/relocs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _SW64_TOOLS_RELOCS_H +#define _SW64_TOOLS_RELOCS_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#define USE_BSD +#include +#include + +#define EM_SW64 0x9916 +/* + * SW64 ELF relocation types + */ +#define R_SW64_NONE 0 /* No reloc */ +#define R_SW64_REFLONG 1 /* Direct 32 bit */ +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW64_GPDISP 6 /* Add displacement to GP */ +#define R_SW64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW64_SREL16 9 /* PC relative 16 bit */ +#define R_SW64_SREL32 10 /* PC relative 32 bit */ +#define R_SW64_SREL64 11 /* PC relative 64 bit */ +#define R_SW64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW64_COPY 24 /* Copy symbol at runtime */ +#define R_SW64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW64_RELATIVE 27 /* Adjust by program base */ +#define R_SW64_BRSGP 28 +#define R_SW64_TLSGD 29 +#define R_SW64_TLS_LDM 30 +#define R_SW64_DTPMOD64 31 +#define R_SW64_GOTDTPREL 32 +#define R_SW64_DTPREL64 33 +#define R_SW64_DTPRELHI 34 +#define R_SW64_DTPRELLO 35 +#define R_SW64_DTPREL16 36 +#define R_SW64_GOTTPREL 37 +#define R_SW64_TPREL64 38 +#define R_SW64_TPRELHI 39 +#define R_SW64_TPRELLO 40 +#define R_SW64_TPREL16 41 +#define R_SW64_LITERAL_GOT 43 /* GP relative */ + +void die(char *fmt, ...); + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum symtype { + S_ABS, + S_REL, + S_SEG, + S_LIN, + S_NSYMTYPES +}; + +void process(FILE *fp, int as_text, int as_bin, + int show_reloc_info, int keep_relocs); +#endif /* _SW64_TOOLS_RELOCS_H */ diff --git a/arch/sw_64/tools/relocs_main.c b/arch/sw_64/tools/relocs_main.c new file mode 100644 index 0000000000000000000000000000000000000000..30a830a070dbe98b4b0b4770b9fc3ca1d3406941 --- /dev/null +++ b/arch/sw_64/tools/relocs_main.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "relocs.h" + +void die(char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + vfprintf(stderr, fmt, ap); + va_end(ap); + exit(1); +} + +static void usage(void) +{ + die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n"); +} + +int main(int argc, char **argv) +{ + int show_reloc_info, as_text, as_bin, keep_relocs; + const char *fname; + FILE *fp; + int i; + unsigned char e_ident[EI_NIDENT]; + + show_reloc_info = 0; + as_text = 0; + as_bin = 0; + keep_relocs = 0; + fname = NULL; + for (i = 1; i < argc; i++) { + char *arg = argv[i]; + + if (*arg == '-') { + if (strcmp(arg, "--reloc-info") == 0) { + show_reloc_info = 1; + continue; + } + if (strcmp(arg, "--text") == 0) { + as_text = 1; + continue; + } + if (strcmp(arg, "--bin") == 0) { + as_bin = 1; + continue; + } + if (strcmp(arg, "--keep") == 0) { + keep_relocs = 1; + continue; + } + } else if (!fname) { + fname = arg; + continue; + } + usage(); + } + if (!fname) + usage(); + + fp = fopen(fname, "r+"); + if (!fp) + die("Cannot open %s: %s\n", fname, strerror(errno)); + + if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT) + die("Cannot read %s: %s", fname, strerror(errno)); + + rewind(fp); + if (e_ident[EI_CLASS] == ELFCLASS64) + process(fp, as_text, as_bin, show_reloc_info, keep_relocs); + else + die("Unsupport ELF class on SW64: %s", fname); + //process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs); + fclose(fp); + return 0; +} diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 66bfabae8814919c61902da3cdc77a45d9efa775..62d59a9e8e88ab274972992c035217b0dcc14244 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -99,6 +99,7 @@ config X86 select ARCH_HAS_SYNC_CORE_BEFORE_USERMODE select ARCH_HAS_SYSCALL_WRAPPER select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_USE_MEMREMAP_PROT select ARCH_HAS_DEBUG_WX select ARCH_HAS_ZONE_DMA_SET if EXPERT select ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -478,8 +479,11 @@ config GOLDFISH config X86_CPU_RESCTRL bool "x86 CPU resource control support" depends on X86 && (CPU_SUP_INTEL || CPU_SUP_AMD) + depends on MISC_FILESYSTEMS select KERNFS - select PROC_CPU_RESCTRL if PROC_FS + select ARCH_HAS_CPU_RESCTRL + select RESCTRL_FS + select RESCTRL_FS_PSEUDO_LOCK help Enable x86 CPU resource control support. @@ -1311,18 +1315,43 @@ config X86_REBOOTFIXUPS config MICROCODE def_bool y - depends on CPU_SUP_AMD || CPU_SUP_INTEL + depends on CPU_SUP_AMD || CPU_SUP_INTEL || CPU_SUP_HYGON + +config MICROCODE_INITRD32 + def_bool y + depends on MICROCODE && X86_32 && BLK_DEV_INITRD config MICROCODE_LATE_LOADING bool "Late microcode loading (DANGEROUS)" default n - depends on MICROCODE + depends on MICROCODE && SMP help Loading microcode late, when the system is up and executing instructions is a tricky business and should be avoided if possible. Just the sequence of synchronizing all cores and SMT threads is one fragile dance which does not guarantee that cores might not softlock after the loading. Therefore, - use this at your own risk. Late loading taints the kernel too. + use this at your own risk. Late loading taints the kernel unless the + microcode header indicates that it is safe for late loading via the + minimal revision check. This minimal revision check can be enforced on + the kernel command line with "microcode.minrev=Y". + +config MICROCODE_LATE_FORCE_MINREV + bool "Enforce late microcode loading minimal revision check" + default n + depends on MICROCODE_LATE_LOADING + help + To prevent that users load microcode late which modifies already + in use features, newer microcode patches have a minimum revision field + in the microcode header, which tells the kernel which minimum + revision must be active in the CPU to safely load that new microcode + late into the running system. If disabled the check will not + be enforced but the kernel will be tainted when the minimal + revision check fails. + + This minimal revision check can also be controlled via the + "microcode.minrev" parameter on the kernel command line. + + If unsure say Y. config X86_MSR tristate "/dev/cpu/*/msr - Model-specific register support" @@ -2028,6 +2057,29 @@ config EFI_RUNTIME_MAP See also Documentation/ABI/testing/sysfs-firmware-efi-runtime-map. +config HYGON_CSV + bool "Hygon secure virtualization CSV support" + default y + depends on CPU_SUP_HYGON && AMD_MEM_ENCRYPT && CMA + help + Hygon CSV integrates secure processor, memory encryption and + memory isolation to provide the ability to protect guest's private + data. It has evolved from CSV, CSV2 to CSV3. + + For CSV, the guest's memory is encrypted. + + For CSV2, not only the guest's memory, but also the guest's vCPU + registers are encrypted, neither other guests nor the host can tamper + with the vCPU registers. + + For CSV3, the guest's context like vCPU registers, control block and + nested page table is accessed only by the guest itself and the secure + processor. Neither other guests nor the host can tamper with the + guest's context. + + Say Y here to enable support for the whole capbilities of Hygon secure + virtualization on hygon processor. + source "kernel/Kconfig.hz" config ARCH_SUPPORTS_KEXEC diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 71fc531b95b4eede6890911b73af3a9995d8b14a..a0c5dab165d9682f3698772018910a08a9504823 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -108,6 +108,7 @@ ifdef CONFIG_X86_64 vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o vmlinux-objs-$(CONFIG_AMD_MEM_ENCRYPT) += $(obj)/sev.o + vmlinux-objs-$(CONFIG_HYGON_CSV) += $(obj)/csv.o endif vmlinux-objs-$(CONFIG_ACPI) += $(obj)/acpi.o diff --git a/arch/x86/boot/compressed/csv.c b/arch/x86/boot/compressed/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..ae6ca14840487c5bdfc77558a8e33bc9a8449f0d --- /dev/null +++ b/arch/x86/boot/compressed/csv.c @@ -0,0 +1,81 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV Support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include "misc.h" + +#undef __init +#undef __initdata +#undef __pa +#define __init +#define __initdata +#define __pa(x) ((unsigned long)(x)) + +#include "../../kernel/csv-shared.c" + +static unsigned int csv3_enabled __section(".data"); +static unsigned int csv3_secure_call_init; + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr) +{ + if (!csv3_enabled) + return; + + if ((set | clr) & _PAGE_ENC) { + if (set & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_ENC); + + if (clr & _PAGE_ENC) + csv3_early_secure_call(__pa(address), 1, CSV3_SECURE_CMD_DEC); + } +} + +/* Invoke it before jump to real kernel in case secure call pages are not mapped + * in the identity page table. + * + * If no #VC happens, there is no identity mapping in page table for secure call + * pages. And page fault is not supported in the early stage when real kernel is + * running. As a result, CSV3 guest will shutdown when access secure call pages + * by then. + */ +void csv_init_secure_call_pages(void *boot_params) +{ + if (!csv3_enabled || csv3_secure_call_init) + return; + + /* + * boot_params may be not sanitized, but it's OK to access e820_table + * field. + */ + csv3_scan_secure_call_pages(boot_params); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); + csv3_secure_call_init = 1; +} + +void csv_set_status(void) +{ + unsigned int eax; + unsigned int ebx; + unsigned int ecx; + unsigned int edx; + + eax = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + + /* HygonGenuine */ + if (ebx == CPUID_VENDOR_HygonGenuine_ebx && + ecx == CPUID_VENDOR_HygonGenuine_ecx && + edx == CPUID_VENDOR_HygonGenuine_edx && + sme_me_mask) { + unsigned long low, high; + + asm volatile("rdmsr\n" : "=a" (low), "=d" (high) : + "c" (MSR_AMD64_SEV)); + + if (low & MSR_CSV3_ENABLED) + csv3_enabled = 1; + } +} diff --git a/arch/x86/boot/compressed/csv.h b/arch/x86/boot/compressed/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..8b8a33551895c2c87281ac993a7deb92e5ead653 --- /dev/null +++ b/arch/x86/boot/compressed/csv.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon CSV header for early boot related functions. + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef BOOT_COMPRESSED_CSV_H +#define BOOT_COMPRESSED_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void csv_set_status(void); +void csv_init_secure_call_pages(void *boot_params); + +void csv_update_page_attr(unsigned long address, pteval_t set, pteval_t clr); + +#else + +static inline void csv_set_status(void) { } +static inline void csv_init_secure_call_pages(void *boot_params) { } + +static inline void csv_update_page_attr(unsigned long address, + pteval_t set, pteval_t clr) { } + +#endif + +#endif /* BOOT_COMPRESSED_CSV_H */ diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index bf4a10a5794f1c84d793f03e00ecd430fbead8c2..d01f9431cee14bce2403919ea27e28fc9ac28898 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -397,6 +397,16 @@ SYM_CODE_START(startup_64) movq %r15, %rdi call sev_enable #endif +#ifdef CONFIG_HYGON_CSV + /* + * Check CSV active status. The CSV and CSV2 guest are indicated by + * MSR_AMD64_SEV_ENABLED_BIT and MSR_AMD64_SEV_ES_ENABLED_BIT in MSR + * register 0xc0010131, respectively. + * The CSV3 guest is indicated by MSR_CSV3_ENABLED in MSR register + * 0xc0010131. + */ + call csv_set_status +#endif /* * configure_5level_paging() updates the number of paging levels using @@ -463,6 +473,16 @@ SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated) movq %r15, %rdi call initialize_identity_maps +#ifdef CONFIG_HYGON_CSV + /* + * If running as a CSV3 guest, secure call pages must be mapped in + * the identity page table before jumping to the decompressed kernel. + * Scan secure call pages here in safe. + */ + movq %r15, %rdi + call csv_init_secure_call_pages +#endif + /* * Do the extraction, and jump to the new kernel.. */ diff --git a/arch/x86/boot/compressed/ident_map_64.c b/arch/x86/boot/compressed/ident_map_64.c index 08f93b0401bbd3498b0754e20780e2d585e064f1..ad791d6ebfccc01013f838b706191b220b854750 100644 --- a/arch/x86/boot/compressed/ident_map_64.c +++ b/arch/x86/boot/compressed/ident_map_64.c @@ -298,6 +298,9 @@ static int set_clr_page_flags(struct x86_mapping_info *info, if ((set | clr) & _PAGE_ENC) { clflush_page(address); + /* On CSV3, notify secure processor to manage page attr changes */ + csv_update_page_attr(address, set, clr); + /* * If the encryption attribute is being cleared, change the page state * to shared in the RMP table. diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index cc70d3fb90497ed82cadd61546cd215c2933173b..ae170867a55a6f9f0136f6a05d0b2e130921dbdb 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -37,6 +37,7 @@ #include #include "tdx.h" +#include "csv.h" #define BOOT_CTYPE_H #include diff --git a/arch/x86/configs/anolis-debug_defconfig b/arch/x86/configs/anolis-debug_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..2619e84b4914eaf3283dc0798887b89b38a86b41 --- /dev/null +++ b/arch/x86/configs/anolis-debug_defconfig @@ -0,0 +1,8125 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_CONSTRUCTORS=y +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_KERNEL_ZSTD=y +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +CONFIG_GENERIC_IRQ_DEBUGFS=y +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +CONFIG_CGROUP_DEBUG=y +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_CSUM=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_KASAN_SHADOW_OFFSET=0xdffffc0000000000 +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=1024 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK=y +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +CONFIG_PM_ADVANCED_DEBUG=y +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +CONFIG_PM_TRACE=y +CONFIG_PM_TRACE_RTC=y +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y +CONFIG_ACPI_DEBUGGER=y +CONFIG_ACPI_DEBUGGER_USER=m +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_DEBUG=y +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +CONFIG_ACPI_CUSTOM_METHOD=m +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +CONFIG_ACPI_APEI_ERST_DEBUG=m +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +CONFIG_ACPI_CONFIGFS=m +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +CONFIG_LOCK_EVENT_COUNTS=y +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +CONFIG_BFQ_CGROUP_DEBUG=y +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_DEBUG=y +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +CONFIG_CFG80211_DEBUGFS=y +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +CONFIG_MAC80211_MESSAGE_TRACING=y +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +CONFIG_CEPH_LIB_PRETTYDEBUG=y +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +CONFIG_FW_UPLOAD=y +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +CONFIG_SATA_ZHAOXIN=m + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +CONFIG_NFP_DEBUG=y +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +CONFIG_ATH_DEBUG=y +# CONFIG_ATH_TRACEPOINTS is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +CONFIG_ATH10K_DEBUG=y +# CONFIG_ATH10K_DEBUGFS is not set +CONFIG_ATH10K_TRACING=y +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +CONFIG_IWLWIFI_DEBUG=y +CONFIG_IWLWIFI_DEBUGFS=y +CONFIG_IWLWIFI_DEVICE_TRACING=y +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +CONFIG_RTLWIFI_DEBUG=y +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +CONFIG_RTW88_DEBUG=y +CONFIG_RTW88_DEBUGFS=y +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_ZHAOXIN=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m +CONFIG_I2C_ZHAOXIN_SMBUS=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +CONFIG_RC_LOOPBACK=m +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +CONFIG_NOUVEAU_DEBUG_MMU=y +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +CONFIG_EDAC_DEBUG=y +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +CONFIG_DMABUF_DEBUG=y +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +CONFIG_INTEL_IFS=m +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_TPMI=m +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m +CONFIG_INTEL_TURBO_MAX_3=y +CONFIG_INTEL_VSEC=y +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +CONFIG_IOMMU_DEBUGFS=y +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +# CONFIG_AMD_IOMMU_DEBUGFS is not set +CONFIG_DMAR_TABLE=y +CONFIG_DMAR_PERF=y +CONFIG_DMAR_DEBUG=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_DEBUGFS=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +CONFIG_EXT4_DEBUG=y +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +CONFIG_XFS_WARN=y +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +CONFIG_QUOTA_DEBUG=y +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +# CONFIG_EROFS_FS_ZIP_DEFLATE is not set +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=y +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=y +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=y +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +# CONFIG_CRYPTO_DEV_ZHAOXIN is not set +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m +CONFIG_HYGON_GM=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +CONFIG_RANDOM32_SELFTEST=y +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +CONFIG_DMA_API_DEBUG=y +CONFIG_DMA_API_DEBUG_SG=y +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_STACKDEPOT_ALWAYS_INIT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +CONFIG_UBSAN=y +# CONFIG_UBSAN_TRAP is not set +CONFIG_CC_HAS_UBSAN_BOUNDS_STRICT=y +CONFIG_CC_HAS_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_BOUNDS=y +CONFIG_UBSAN_BOUNDS_STRICT=y +CONFIG_UBSAN_ARRAY_BOUNDS=y +CONFIG_UBSAN_SHIFT=y +# CONFIG_UBSAN_DIV_ZERO is not set +CONFIG_UBSAN_BOOL=y +CONFIG_UBSAN_ENUM=y +# CONFIG_UBSAN_ALIGNMENT is not set +CONFIG_UBSAN_SANITIZE_ALL=y +# CONFIG_TEST_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +CONFIG_PAGE_EXTENSION=y +CONFIG_DEBUG_PAGEALLOC=y +# CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +CONFIG_DEBUG_PAGE_REF=y +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK=y +CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000 +CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y +CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN=y +# CONFIG_PER_VMA_LOCK_STATS is not set +CONFIG_DEBUG_OBJECTS=y +# CONFIG_DEBUG_OBJECTS_SELFTEST is not set +CONFIG_DEBUG_OBJECTS_FREE=y +CONFIG_DEBUG_OBJECTS_TIMERS=y +CONFIG_DEBUG_OBJECTS_WORK=y +CONFIG_DEBUG_OBJECTS_RCU_HEAD=y +CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y +CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1 +# CONFIG_SHRINKER_DEBUG is not set +CONFIG_DEBUG_STACK_USAGE=y +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +CONFIG_DEBUG_VM_IRQSOFF=y +CONFIG_DEBUG_VM=y +# CONFIG_DEBUG_VM_MAPLE_TREE is not set +# CONFIG_DEBUG_VM_RB is not set +# CONFIG_DEBUG_VM_PGFLAGS is not set +CONFIG_DEBUG_VM_PGTABLE=y +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_PER_CPU_MAPS=y +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +CONFIG_KASAN=y +CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX=y +CONFIG_KASAN_GENERIC=y +# CONFIG_KASAN_OUTLINE is not set +CONFIG_KASAN_INLINE=y +CONFIG_KASAN_STACK=y +CONFIG_KASAN_VMALLOC=y +# CONFIG_KASAN_MODULE_TEST is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=100 +CONFIG_KFENCE_NUM_OBJECTS=255 +CONFIG_KFENCE_DEFERRABLE=y +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +CONFIG_PROVE_LOCKING=y +# CONFIG_PROVE_RAW_LOCK_NESTING is not set +CONFIG_LOCK_STAT=y +CONFIG_DEBUG_RT_MUTEXES=y +CONFIG_DEBUG_SPINLOCK=y +CONFIG_DEBUG_MUTEXES=y +CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y +CONFIG_DEBUG_RWSEMS=y +CONFIG_DEBUG_LOCK_ALLOC=y +CONFIG_LOCKDEP=y +CONFIG_LOCKDEP_BITS=15 +CONFIG_LOCKDEP_CHAINS_BITS=16 +CONFIG_LOCKDEP_STACK_TRACE_BITS=19 +CONFIG_LOCKDEP_STACK_TRACE_HASH_BITS=14 +CONFIG_LOCKDEP_CIRCULAR_QUEUE_BITS=12 +# CONFIG_DEBUG_LOCKDEP is not set +CONFIG_DEBUG_ATOMIC_SLEEP=y +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +CONFIG_LOCK_TORTURE_TEST=m +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +CONFIG_TRACE_IRQFLAGS=y +CONFIG_TRACE_IRQFLAGS_NMI=y +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set +# CONFIG_DEBUG_KOBJECT_RELEASE is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +CONFIG_DEBUG_SG=y +CONFIG_DEBUG_NOTIFIERS=y +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +CONFIG_DEBUG_CREDENTIALS=y + +# +# RCU Debugging +# +CONFIG_PROVE_RCU=y +CONFIG_TORTURE_TEST=m +# CONFIG_RCU_SCALE_TEST is not set +CONFIG_RCU_TORTURE_TEST=m +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +CONFIG_LATENCYTOP=y +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_PREEMPTIRQ_TRACEPOINTS=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +CONFIG_MMIOTRACE=y +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_MMIOTRACE_TEST is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +CONFIG_X86_DEBUG_FPU=y +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +CONFIG_FAULT_INJECTION=y +CONFIG_FAILSLAB=y +CONFIG_FAIL_PAGE_ALLOC=y +# CONFIG_FAULT_INJECTION_USERCOPY is not set +CONFIG_FAIL_MAKE_REQUEST=y +CONFIG_FAIL_IO_TIMEOUT=y +# CONFIG_FAIL_FUTEX is not set +CONFIG_FAULT_INJECTION_DEBUG_FS=y +# CONFIG_FAIL_FUNCTION is not set +CONFIG_FAIL_MMC_REQUEST=y +# CONFIG_FAIL_SUNRPC is not set +# CONFIG_FAULT_INJECTION_CONFIGFS is not set +# CONFIG_FAULT_INJECTION_STACKTRACE_FILTER is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +CONFIG_TEST_STRING_HELPERS=m +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/configs/anolis_defconfig b/arch/x86/configs/anolis_defconfig new file mode 100644 index 0000000000000000000000000000000000000000..ee0afda89125bc45121aa2db6a4273abd0f66064 --- /dev/null +++ b/arch/x86/configs/anolis_defconfig @@ -0,0 +1,8053 @@ +# +# Automatically generated file; DO NOT EDIT. +# Linux/x86 6.6.7 Kernel Configuration +# +CONFIG_CC_VERSION_TEXT="gcc (scripts/dummy-tools/gcc)" +CONFIG_CC_IS_GCC=y +CONFIG_GCC_VERSION=200000 +CONFIG_CLANG_VERSION=0 +CONFIG_AS_IS_GNU=y +CONFIG_AS_VERSION=25000 +CONFIG_LD_IS_BFD=y +CONFIG_LD_VERSION=25000 +CONFIG_LLD_VERSION=0 +CONFIG_CC_CAN_LINK=y +CONFIG_CC_CAN_LINK_STATIC=y +CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y +CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y +CONFIG_TOOLS_SUPPORT_RELR=y +CONFIG_CC_HAS_ASM_INLINE=y +CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y +CONFIG_PAHOLE_VERSION=117 +CONFIG_IRQ_WORK=y +CONFIG_BUILDTIME_TABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y + +# +# General setup +# +CONFIG_INIT_ENV_ARG_LIMIT=32 +# CONFIG_COMPILE_TEST is not set +# CONFIG_WERROR is not set +CONFIG_LOCALVERSION="" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_BUILD_SALT="" +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_ZSTD=y +# CONFIG_KERNEL_GZIP is not set +# CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set +CONFIG_KERNEL_ZSTD=y +CONFIG_DEFAULT_INIT="" +CONFIG_DEFAULT_HOSTNAME="(none)" +CONFIG_SYSVIPC=y +CONFIG_SYSVIPC_SYSCTL=y +CONFIG_SYSVIPC_COMPAT=y +CONFIG_POSIX_MQUEUE=y +CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_WATCH_QUEUE is not set +CONFIG_CROSS_MEMORY_ATTACH=y +# CONFIG_USELIB is not set +CONFIG_AUDIT=y +CONFIG_HAVE_ARCH_AUDITSYSCALL=y +CONFIG_AUDITSYSCALL=y + +# +# IRQ subsystem +# +CONFIG_GENERIC_IRQ_PROBE=y +CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y +CONFIG_GENERIC_PENDING_IRQ=y +CONFIG_GENERIC_IRQ_MIGRATION=y +CONFIG_GENERIC_IRQ_INJECTION=y +CONFIG_HARDIRQS_SW_RESEND=y +CONFIG_IRQ_DOMAIN=y +CONFIG_IRQ_DOMAIN_HIERARCHY=y +CONFIG_GENERIC_MSI_IRQ=y +CONFIG_IRQ_MSI_IOMMU=y +CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR=y +CONFIG_GENERIC_IRQ_RESERVATION_MODE=y +CONFIG_IRQ_FORCED_THREADING=y +CONFIG_SPARSE_IRQ=y +# CONFIG_GENERIC_IRQ_DEBUGFS is not set +# end of IRQ subsystem + +CONFIG_CLOCKSOURCE_WATCHDOG=y +CONFIG_ARCH_CLOCKSOURCE_INIT=y +CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y +CONFIG_GENERIC_TIME_VSYSCALL=y +CONFIG_GENERIC_CLOCKEVENTS=y +CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y +CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y +CONFIG_GENERIC_CMOS_UPDATE=y +CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y +CONFIG_CONTEXT_TRACKING=y +CONFIG_CONTEXT_TRACKING_IDLE=y + +# +# Timers subsystem +# +CONFIG_TICK_ONESHOT=y +CONFIG_NO_HZ_COMMON=y +# CONFIG_HZ_PERIODIC is not set +# CONFIG_NO_HZ_IDLE is not set +CONFIG_NO_HZ_FULL=y +CONFIG_CONTEXT_TRACKING_USER=y +# CONFIG_CONTEXT_TRACKING_USER_FORCE is not set +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_CLOCKSOURCE_WATCHDOG_MAX_SKEW_US=125 +# end of Timers subsystem + +CONFIG_BPF=y +CONFIG_HAVE_EBPF_JIT=y +CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y + +# +# BPF subsystem +# +CONFIG_BPF_SYSCALL=y +CONFIG_BPF_JIT=y +CONFIG_BPF_JIT_ALWAYS_ON=y +CONFIG_BPF_JIT_DEFAULT_ON=y +CONFIG_BPF_UNPRIV_DEFAULT_OFF=y +# CONFIG_BPF_PRELOAD is not set +CONFIG_BPF_LSM=y +# end of BPF subsystem + +CONFIG_PREEMPT_BUILD=y +CONFIG_PREEMPT_NONE=y +# CONFIG_PREEMPT_VOLUNTARY is not set +# CONFIG_PREEMPT is not set +CONFIG_PREEMPT_COUNT=y +CONFIG_PREEMPTION=y +CONFIG_PREEMPT_DYNAMIC=y +CONFIG_SCHED_CORE=y + +# +# CPU/Task time and stats accounting +# +CONFIG_VIRT_CPU_ACCOUNTING=y +CONFIG_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_SCHED_AVG_IRQ=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_TASKSTATS=y +CONFIG_TASK_DELAY_ACCT=y +CONFIG_TASK_XACCT=y +CONFIG_TASK_IO_ACCOUNTING=y +CONFIG_PSI=y +CONFIG_PSI_DEFAULT_DISABLED=y +# end of CPU/Task time and stats accounting + +CONFIG_CPU_ISOLATION=y + +# +# RCU Subsystem +# +CONFIG_TREE_RCU=y +CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_TREE_SRCU=y +CONFIG_TASKS_RCU_GENERIC=y +CONFIG_TASKS_RCU=y +CONFIG_TASKS_RUDE_RCU=y +CONFIG_TASKS_TRACE_RCU=y +CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_NEED_SEGCBLIST=y +CONFIG_RCU_NOCB_CPU=y +# CONFIG_RCU_NOCB_CPU_DEFAULT_ALL is not set +# CONFIG_RCU_LAZY is not set +# end of RCU Subsystem + +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +# CONFIG_IKHEADERS is not set +CONFIG_LOG_BUF_SHIFT=21 +CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 +# CONFIG_PRINTK_INDEX is not set +CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y + +# +# Scheduler features +# +# CONFIG_UCLAMP_TASK is not set +# end of Scheduler features + +CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y +CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y +CONFIG_CC_HAS_INT128=y +CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5" +CONFIG_GCC11_NO_ARRAY_BOUNDS=y +CONFIG_CC_NO_ARRAY_BOUNDS=y +CONFIG_ARCH_SUPPORTS_INT128=y +CONFIG_NUMA_BALANCING=y +CONFIG_NUMA_BALANCING_DEFAULT_ENABLED=y +CONFIG_CGROUPS=y +CONFIG_PAGE_COUNTER=y +# CONFIG_CGROUP_FAVOR_DYNMODS is not set +CONFIG_MEMCG=y +CONFIG_MEMCG_KMEM=y +CONFIG_BLK_CGROUP=y +CONFIG_CGROUP_WRITEBACK=y +CONFIG_CGROUP_SCHED=y +CONFIG_FAIR_GROUP_SCHED=y +CONFIG_CFS_BANDWIDTH=y +CONFIG_RT_GROUP_SCHED=y +CONFIG_SCHED_MM_CID=y +CONFIG_CGROUP_PIDS=y +CONFIG_CGROUP_RDMA=y +CONFIG_CGROUP_FREEZER=y +CONFIG_CGROUP_HUGETLB=y +CONFIG_CPUSETS=y +CONFIG_PROC_PID_CPUSET=y +CONFIG_CGROUP_DEVICE=y +CONFIG_SCHED_SLI=y +CONFIG_RICH_CONTAINER=y +CONFIG_CGROUP_CPUACCT=y +CONFIG_CGROUP_PERF=y +CONFIG_CGROUP_BPF=y +# CONFIG_CGROUP_MISC is not set +# CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y +CONFIG_NAMESPACES=y +CONFIG_UTS_NS=y +CONFIG_TIME_NS=y +CONFIG_IPC_NS=y +CONFIG_USER_NS=y +CONFIG_PID_NS=y +CONFIG_NET_NS=y +CONFIG_CHECKPOINT_RESTORE=y +CONFIG_SCHED_AUTOGROUP=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="" +CONFIG_RD_GZIP=y +CONFIG_RD_BZIP2=y +CONFIG_RD_LZMA=y +CONFIG_RD_XZ=y +CONFIG_RD_LZO=y +CONFIG_RD_LZ4=y +CONFIG_RD_ZSTD=y +# CONFIG_BOOT_CONFIG is not set +CONFIG_INITRAMFS_PRESERVE_MTIME=y +CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y +# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_LD_ORPHAN_WARN=y +CONFIG_LD_ORPHAN_WARN_LEVEL="warn" +CONFIG_SYSCTL=y +CONFIG_HAVE_UID16=y +CONFIG_SYSCTL_EXCEPTION_TRACE=y +CONFIG_HAVE_PCSPKR_PLATFORM=y +# CONFIG_EXPERT is not set +CONFIG_UID16=y +CONFIG_MULTIUSER=y +CONFIG_SGETMASK_SYSCALL=y +CONFIG_SYSFS_SYSCALL=y +CONFIG_FHANDLE=y +CONFIG_POSIX_TIMERS=y +CONFIG_PRINTK=y +CONFIG_BUG=y +CONFIG_ELF_CORE=y +CONFIG_PCSPKR_PLATFORM=y +CONFIG_BASE_FULL=y +CONFIG_FUTEX=y +CONFIG_FUTEX_PI=y +CONFIG_EPOLL=y +CONFIG_SIGNALFD=y +CONFIG_TIMERFD=y +CONFIG_EVENTFD=y +CONFIG_SHMEM=y +CONFIG_AIO=y +CONFIG_IO_URING=y +CONFIG_ADVISE_SYSCALLS=y +CONFIG_MEMBARRIER=y +CONFIG_KALLSYMS=y +# CONFIG_KALLSYMS_SELFTEST is not set +CONFIG_KALLSYMS_ALL=y +CONFIG_KALLSYMS_ABSOLUTE_PERCPU=y +CONFIG_KALLSYMS_BASE_RELATIVE=y +CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y +CONFIG_KCMP=y +CONFIG_RSEQ=y +CONFIG_CACHESTAT_SYSCALL=y +CONFIG_HAVE_PERF_EVENTS=y +CONFIG_GUEST_PERF_EVENTS=y + +# +# Kernel Performance Events And Counters +# +CONFIG_PERF_EVENTS=y +# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# end of Kernel Performance Events And Counters + +CONFIG_SYSTEM_DATA_VERIFICATION=y +CONFIG_PROFILING=y +CONFIG_TRACEPOINTS=y + +# +# Kexec and crash features +# +CONFIG_CRASH_CORE=y +CONFIG_KEXEC_CORE=y +CONFIG_HAVE_IMA_KEXEC=y +CONFIG_KEXEC=y +CONFIG_KEXEC_FILE=y +CONFIG_KEXEC_SIG=y +# CONFIG_KEXEC_SIG_FORCE is not set +CONFIG_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_KEXEC_JUMP=y +CONFIG_CRASH_DUMP=y +CONFIG_CRASH_HOTPLUG=y +CONFIG_CRASH_MAX_MEMORY_RANGES=8192 +# end of Kexec and crash features +# end of General setup + +CONFIG_64BIT=y +CONFIG_X86_64=y +CONFIG_X86=y +CONFIG_INSTRUCTION_DECODER=y +CONFIG_OUTPUT_FORMAT="elf64-x86-64" +CONFIG_LOCKDEP_SUPPORT=y +CONFIG_STACKTRACE_SUPPORT=y +CONFIG_MMU=y +CONFIG_ARCH_MMAP_RND_BITS_MIN=28 +CONFIG_ARCH_MMAP_RND_BITS_MAX=32 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=8 +CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16 +CONFIG_GENERIC_ISA_DMA=y +CONFIG_GENERIC_BUG=y +CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y +CONFIG_ARCH_MAY_HAVE_PC_FDC=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_ARCH_HAS_CPU_RELAX=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_AUDIT_ARCH=y +CONFIG_HAVE_INTEL_TXT=y +CONFIG_X86_64_SMP=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_FIX_EARLYCON_MEM=y +CONFIG_DYNAMIC_PHYSICAL_MASK=y +CONFIG_PGTABLE_LEVELS=4 +CONFIG_CC_HAS_SANE_STACKPROTECTOR=y + +# +# Processor type and features +# +CONFIG_SMP=y +CONFIG_X86_X2APIC=y +CONFIG_X86_MPPARSE=y +# CONFIG_GOLDFISH is not set +CONFIG_X86_CPU_RESCTRL=y +CONFIG_X86_EXTENDED_PLATFORM=y +# CONFIG_X86_NUMACHIP is not set +# CONFIG_X86_VSMP is not set +CONFIG_X86_UV=y +# CONFIG_X86_GOLDFISH is not set +# CONFIG_X86_INTEL_MID is not set +CONFIG_X86_INTEL_LPSS=y +CONFIG_X86_AMD_PLATFORM_DEVICE=y +CONFIG_IOSF_MBI=y +# CONFIG_IOSF_MBI_DEBUG is not set +CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y +CONFIG_SCHED_OMIT_FRAME_POINTER=y +CONFIG_HYPERVISOR_GUEST=y +CONFIG_PARAVIRT=y +# CONFIG_PARAVIRT_DEBUG is not set +CONFIG_PARAVIRT_SPINLOCKS=y +CONFIG_X86_HV_CALLBACK_VECTOR=y +CONFIG_XEN=y +# CONFIG_XEN_PV is not set +CONFIG_XEN_PVHVM=y +CONFIG_XEN_PVHVM_SMP=y +CONFIG_XEN_PVHVM_GUEST=y +CONFIG_XEN_SAVE_RESTORE=y +# CONFIG_XEN_DEBUG_FS is not set +# CONFIG_XEN_PVH is not set +CONFIG_KVM_GUEST=y +CONFIG_ARCH_CPUIDLE_HALTPOLL=y +# CONFIG_PVH is not set +CONFIG_PARAVIRT_TIME_ACCOUNTING=y +CONFIG_PARAVIRT_CLOCK=y +# CONFIG_JAILHOUSE_GUEST is not set +# CONFIG_ACRN_GUEST is not set +CONFIG_INTEL_TDX_GUEST=y +# CONFIG_MK8 is not set +# CONFIG_MPSC is not set +# CONFIG_MCORE2 is not set +# CONFIG_MATOM is not set +CONFIG_GENERIC_CPU=y +CONFIG_X86_INTERNODE_CACHE_SHIFT=6 +CONFIG_X86_L1_CACHE_SHIFT=6 +CONFIG_X86_TSC=y +CONFIG_X86_CMPXCHG64=y +CONFIG_X86_CMOV=y +CONFIG_X86_MINIMUM_CPU_FAMILY=64 +CONFIG_X86_DEBUGCTLMSR=y +CONFIG_IA32_FEAT_CTL=y +CONFIG_X86_VMX_FEATURE_NAMES=y +CONFIG_CPU_SUP_INTEL=y +CONFIG_CPU_SUP_AMD=y +CONFIG_CPU_SUP_HYGON=y +CONFIG_CPU_SUP_CENTAUR=y +CONFIG_CPU_SUP_ZHAOXIN=y +CONFIG_HPET_TIMER=y +CONFIG_HPET_EMULATE_RTC=y +CONFIG_DMI=y +# CONFIG_GART_IOMMU is not set +CONFIG_BOOT_VESA_SUPPORT=y +# CONFIG_MAXSMP is not set +CONFIG_NR_CPUS_RANGE_BEGIN=2 +CONFIG_NR_CPUS_RANGE_END=8192 +CONFIG_NR_CPUS_DEFAULT=64 +CONFIG_NR_CPUS=1024 +CONFIG_SCHED_CLUSTER=y +CONFIG_SCHED_SMT=y +CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y +CONFIG_X86_LOCAL_APIC=y +CONFIG_X86_IO_APIC=y +CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y +CONFIG_X86_MCE=y +CONFIG_X86_MCELOG_LEGACY=y +CONFIG_X86_MCE_INTEL=y +CONFIG_X86_MCE_AMD=y +CONFIG_X86_MCE_THRESHOLD=y +CONFIG_X86_MCE_INJECT=m + +# +# Performance monitoring +# +CONFIG_PERF_EVENTS_INTEL_UNCORE=m +CONFIG_PERF_EVENTS_INTEL_RAPL=m +CONFIG_PERF_EVENTS_INTEL_CSTATE=m +CONFIG_PERF_EVENTS_AMD_POWER=m +CONFIG_PERF_EVENTS_AMD_UNCORE=y +CONFIG_PERF_EVENTS_AMD_BRS=y +# end of Performance monitoring + +CONFIG_X86_16BIT=y +CONFIG_X86_ESPFIX64=y +CONFIG_X86_VSYSCALL_EMULATION=y +CONFIG_X86_IOPL_IOPERM=y +CONFIG_MICROCODE=y +# CONFIG_MICROCODE_LATE_LOADING is not set +CONFIG_X86_MSR=y +CONFIG_X86_CPUID=y +# CONFIG_X86_5LEVEL is not set +CONFIG_X86_DIRECT_GBPAGES=y +CONFIG_X86_CPA_STATISTICS=y +CONFIG_X86_MEM_ENCRYPT=y +CONFIG_AMD_MEM_ENCRYPT=y +# CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT is not set +CONFIG_NUMA=y +CONFIG_AMD_NUMA=y +CONFIG_X86_64_ACPI_NUMA=y +CONFIG_NUMA_EMU=y +CONFIG_NODES_SHIFT=6 +CONFIG_ARCH_SPARSEMEM_ENABLE=y +CONFIG_ARCH_SPARSEMEM_DEFAULT=y +# CONFIG_ARCH_MEMORY_PROBE is not set +CONFIG_ARCH_PROC_KCORE_TEXT=y +CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 +CONFIG_X86_PMEM_LEGACY_DEVICE=y +CONFIG_X86_PMEM_LEGACY=m +CONFIG_X86_CHECK_BIOS_CORRUPTION=y +# CONFIG_X86_BOOTPARAM_MEMORY_CORRUPTION_CHECK is not set +CONFIG_MTRR=y +CONFIG_MTRR_SANITIZER=y +CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=1 +CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 +CONFIG_X86_PAT=y +CONFIG_ARCH_USES_PG_UNCACHED=y +CONFIG_X86_UMIP=y +CONFIG_CC_HAS_IBT=y +CONFIG_X86_CET=y +CONFIG_X86_KERNEL_IBT=y +CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS=y +# CONFIG_X86_INTEL_TSX_MODE_OFF is not set +# CONFIG_X86_INTEL_TSX_MODE_ON is not set +CONFIG_X86_INTEL_TSX_MODE_AUTO=y +CONFIG_X86_SGX=y +# CONFIG_X86_USER_SHADOW_STACK is not set +CONFIG_EFI=y +CONFIG_EFI_STUB=y +CONFIG_EFI_HANDOVER_PROTOCOL=y +CONFIG_EFI_MIXED=y +# CONFIG_EFI_FAKE_MEMMAP is not set +CONFIG_EFI_RUNTIME_MAP=y +CONFIG_HYGON_CSV=y +# CONFIG_HZ_100 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +CONFIG_HZ_1000=y +CONFIG_HZ=1000 +CONFIG_SCHED_HRTICK=y +CONFIG_ARCH_SUPPORTS_KEXEC=y +CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y +CONFIG_ARCH_SELECTS_KEXEC_FILE=y +CONFIG_ARCH_SUPPORTS_KEXEC_PURGATORY=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_SIG_FORCE=y +CONFIG_ARCH_SUPPORTS_KEXEC_BZIMAGE_VERIFY_SIG=y +CONFIG_ARCH_SUPPORTS_KEXEC_JUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y +CONFIG_ARCH_SUPPORTS_CRASH_HOTPLUG=y +CONFIG_PHYSICAL_START=0x1000000 +CONFIG_RELOCATABLE=y +CONFIG_RANDOMIZE_BASE=y +CONFIG_X86_NEED_RELOCS=y +CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_DYNAMIC_MEMORY_LAYOUT=y +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0xa +# CONFIG_ADDRESS_MASKING is not set +CONFIG_HOTPLUG_CPU=y +# CONFIG_COMPAT_VDSO is not set +CONFIG_LEGACY_VSYSCALL_XONLY=y +# CONFIG_LEGACY_VSYSCALL_NONE is not set +# CONFIG_CMDLINE_BOOL is not set +CONFIG_MODIFY_LDT_SYSCALL=y +# CONFIG_STRICT_SIGALTSTACK_SIZE is not set +CONFIG_HAVE_LIVEPATCH=y +CONFIG_LIVEPATCH=y +# end of Processor type and features + +CONFIG_CC_HAS_SLS=y +CONFIG_CC_HAS_RETURN_THUNK=y +CONFIG_CC_HAS_ENTRY_PADDING=y +CONFIG_FUNCTION_PADDING_CFI=11 +CONFIG_FUNCTION_PADDING_BYTES=16 +CONFIG_CALL_PADDING=y +CONFIG_HAVE_CALL_THUNKS=y +CONFIG_CALL_THUNKS=y +CONFIG_PREFIX_SYMBOLS=y +CONFIG_SPECULATION_MITIGATIONS=y +CONFIG_PAGE_TABLE_ISOLATION=y +CONFIG_RETPOLINE=y +CONFIG_RETHUNK=y +CONFIG_CPU_UNRET_ENTRY=y +CONFIG_CALL_DEPTH_TRACKING=y +# CONFIG_CALL_THUNKS_DEBUG is not set +CONFIG_CPU_IBPB_ENTRY=y +CONFIG_CPU_IBRS_ENTRY=y +CONFIG_CPU_SRSO=y +# CONFIG_SLS is not set +# CONFIG_GDS_FORCE_MITIGATION is not set +CONFIG_ARCH_HAS_ADD_PAGES=y + +# +# Power management and ACPI options +# +CONFIG_ARCH_HIBERNATION_HEADER=y +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +CONFIG_HIBERNATE_CALLBACKS=y +CONFIG_HIBERNATION=y +CONFIG_HIBERNATION_SNAPSHOT_DEV=y +CONFIG_PM_STD_PARTITION="" +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_USERSPACE_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +CONFIG_PM_DEBUG=y +# CONFIG_PM_ADVANCED_DEBUG is not set +# CONFIG_PM_TEST_SUSPEND is not set +CONFIG_PM_SLEEP_DEBUG=y +# CONFIG_PM_TRACE_RTC is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +# CONFIG_ENERGY_MODEL is not set +CONFIG_ARCH_SUPPORTS_ACPI=y +CONFIG_ACPI=y +CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y +CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y +CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y +CONFIG_ACPI_TABLE_LIB=y +# CONFIG_ACPI_DEBUGGER is not set +CONFIG_ACPI_SPCR_TABLE=y +# CONFIG_ACPI_FPDT is not set +CONFIG_ACPI_LPIT=y +CONFIG_ACPI_SLEEP=y +CONFIG_ACPI_REV_OVERRIDE_POSSIBLE=y +CONFIG_ACPI_EC_DEBUGFS=m +CONFIG_ACPI_AC=y +CONFIG_ACPI_BATTERY=y +CONFIG_ACPI_BUTTON=y +CONFIG_ACPI_VIDEO=m +CONFIG_ACPI_FAN=y +CONFIG_ACPI_TAD=m +CONFIG_ACPI_DOCK=y +CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y +CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y +CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=m +CONFIG_ACPI_HOTPLUG_CPU=y +CONFIG_ACPI_PROCESSOR_AGGREGATOR=m +CONFIG_ACPI_THERMAL=y +CONFIG_ACPI_PLATFORM_PROFILE=m +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y +CONFIG_ACPI_TABLE_UPGRADE=y +# CONFIG_ACPI_DEBUG is not set +CONFIG_ACPI_PCI_SLOT=y +CONFIG_ACPI_CONTAINER=y +CONFIG_ACPI_HOTPLUG_MEMORY=y +CONFIG_ACPI_HOTPLUG_IOAPIC=y +CONFIG_ACPI_SBS=m +CONFIG_ACPI_HED=y +# CONFIG_ACPI_CUSTOM_METHOD is not set +CONFIG_ACPI_BGRT=y +CONFIG_ACPI_NFIT=m +# CONFIG_NFIT_SECURITY_DEBUG is not set +CONFIG_ACPI_NUMA=y +CONFIG_ACPI_HMAT=y +CONFIG_HAVE_ACPI_APEI=y +CONFIG_HAVE_ACPI_APEI_NMI=y +CONFIG_ACPI_APEI=y +CONFIG_ACPI_APEI_GHES=y +CONFIG_ACPI_APEI_PCIEAER=y +CONFIG_ACPI_APEI_MEMORY_FAILURE=y +CONFIG_ACPI_APEI_EINJ=m +# CONFIG_ACPI_APEI_ERST_DEBUG is not set +# CONFIG_ACPI_DPTF is not set +CONFIG_ACPI_WATCHDOG=y +CONFIG_ACPI_EXTLOG=m +CONFIG_ACPI_ADXL=y +# CONFIG_ACPI_CONFIGFS is not set +# CONFIG_ACPI_PFRUT is not set +CONFIG_ACPI_PCC=y +# CONFIG_ACPI_FFH is not set +CONFIG_PMIC_OPREGION=y +CONFIG_ACPI_PRMT=y +CONFIG_X86_PM_TIMER=y + +# +# CPU Frequency scaling +# +CONFIG_CPU_FREQ=y +CONFIG_CPU_FREQ_GOV_ATTR_SET=y +CONFIG_CPU_FREQ_GOV_COMMON=y +CONFIG_CPU_FREQ_STAT=y +CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y +# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set +# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set +CONFIG_CPU_FREQ_GOV_PERFORMANCE=y +CONFIG_CPU_FREQ_GOV_POWERSAVE=y +CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_GOV_ONDEMAND=y +CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y +CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y + +# +# CPU frequency scaling drivers +# +CONFIG_X86_INTEL_PSTATE=y +# CONFIG_X86_PCC_CPUFREQ is not set +CONFIG_X86_AMD_PSTATE=y +CONFIG_X86_AMD_PSTATE_DEFAULT_MODE=3 +# CONFIG_X86_AMD_PSTATE_UT is not set +CONFIG_X86_ACPI_CPUFREQ=m +CONFIG_X86_ACPI_CPUFREQ_CPB=y +CONFIG_X86_POWERNOW_K8=m +CONFIG_X86_AMD_FREQ_SENSITIVITY=m +# CONFIG_X86_SPEEDSTEP_CENTRINO is not set +CONFIG_X86_P4_CLOCKMOD=m + +# +# shared options +# +CONFIG_X86_SPEEDSTEP_LIB=m +# end of CPU Frequency scaling + +# +# CPU Idle +# +CONFIG_CPU_IDLE=y +# CONFIG_CPU_IDLE_GOV_LADDER is not set +CONFIG_CPU_IDLE_GOV_MENU=y +# CONFIG_CPU_IDLE_GOV_TEO is not set +CONFIG_CPU_IDLE_GOV_HALTPOLL=y +CONFIG_HALTPOLL_CPUIDLE=y +# end of CPU Idle + +CONFIG_INTEL_IDLE=y +# end of Power management and ACPI options + +# +# Bus options (PCI etc.) +# +CONFIG_PCI_DIRECT=y +CONFIG_PCI_MMCONFIG=y +CONFIG_PCI_XEN=y +CONFIG_MMCONF_FAM10H=y +CONFIG_ISA_DMA_API=y +CONFIG_AMD_NB=y +# end of Bus options (PCI etc.) + +# +# Binary Emulations +# +CONFIG_IA32_EMULATION=y +# CONFIG_X86_X32_ABI is not set +CONFIG_COMPAT_32=y +CONFIG_COMPAT=y +CONFIG_COMPAT_FOR_U64_ALIGNMENT=y +# end of Binary Emulations + +CONFIG_HAVE_KVM=y +CONFIG_HAVE_KVM_PFNCACHE=y +CONFIG_HAVE_KVM_IRQCHIP=y +CONFIG_HAVE_KVM_IRQFD=y +CONFIG_HAVE_KVM_IRQ_ROUTING=y +CONFIG_HAVE_KVM_DIRTY_RING=y +CONFIG_HAVE_KVM_DIRTY_RING_TSO=y +CONFIG_HAVE_KVM_DIRTY_RING_ACQ_REL=y +CONFIG_HAVE_KVM_EVENTFD=y +CONFIG_KVM_MMIO=y +CONFIG_KVM_ASYNC_PF=y +CONFIG_HAVE_KVM_MSI=y +CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y +CONFIG_KVM_VFIO=y +CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT=y +CONFIG_KVM_COMPAT=y +CONFIG_HAVE_KVM_IRQ_BYPASS=y +CONFIG_HAVE_KVM_NO_POLL=y +CONFIG_KVM_XFER_TO_GUEST_WORK=y +CONFIG_HAVE_KVM_PM_NOTIFIER=y +CONFIG_KVM_GENERIC_HARDWARE_ENABLING=y +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_INTEL=m +CONFIG_X86_SGX_KVM=y +CONFIG_KVM_AMD=m +CONFIG_KVM_AMD_SEV=y +CONFIG_KVM_SMM=y +# CONFIG_KVM_XEN is not set +CONFIG_KVM_EXTERNAL_WRITE_TRACKING=y +CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID=y +CONFIG_AS_AVX512=y +CONFIG_AS_SHA1_NI=y +CONFIG_AS_SHA256_NI=y +CONFIG_AS_TPAUSE=y +CONFIG_AS_GFNI=y +CONFIG_AS_WRUSS=y + +# +# General architecture-dependent options +# +CONFIG_HOTPLUG_SMT=y +CONFIG_HOTPLUG_CORE_SYNC=y +CONFIG_HOTPLUG_CORE_SYNC_DEAD=y +CONFIG_HOTPLUG_CORE_SYNC_FULL=y +CONFIG_HOTPLUG_SPLIT_STARTUP=y +CONFIG_HOTPLUG_PARALLEL=y +CONFIG_GENERIC_ENTRY=y +CONFIG_KPROBES=y +CONFIG_JUMP_LABEL=y +# CONFIG_STATIC_KEYS_SELFTEST is not set +# CONFIG_STATIC_CALL_SELFTEST is not set +CONFIG_OPTPROBES=y +CONFIG_KPROBES_ON_FTRACE=y +CONFIG_UPROBES=y +CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_KRETPROBES=y +CONFIG_KRETPROBE_ON_RETHOOK=y +CONFIG_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_IOREMAP_PROT=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y +CONFIG_HAVE_KPROBES_ON_FTRACE=y +CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE=y +CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y +CONFIG_HAVE_NMI=y +CONFIG_TRACE_IRQFLAGS_SUPPORT=y +CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y +CONFIG_HAVE_ARCH_TRACEHOOK=y +CONFIG_HAVE_DMA_CONTIGUOUS=y +CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_ARCH_HAS_FORTIFY_SOURCE=y +CONFIG_ARCH_HAS_SET_MEMORY=y +CONFIG_ARCH_HAS_SET_DIRECT_MAP=y +CONFIG_ARCH_HAS_CPU_FINALIZE_INIT=y +CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y +CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT=y +CONFIG_ARCH_WANTS_NO_INSTR=y +CONFIG_HAVE_ASM_MODVERSIONS=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y +CONFIG_HAVE_RSEQ=y +CONFIG_HAVE_RUST=y +CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y +CONFIG_HAVE_HW_BREAKPOINT=y +CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y +CONFIG_HAVE_USER_RETURN_NOTIFIER=y +CONFIG_HAVE_PERF_EVENTS_NMI=y +CONFIG_HAVE_HARDLOCKUP_DETECTOR_PERF=y +CONFIG_HAVE_PERF_REGS=y +CONFIG_HAVE_PERF_USER_STACK_DUMP=y +CONFIG_HAVE_ARCH_JUMP_LABEL=y +CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y +CONFIG_MMU_GATHER_TABLE_FREE=y +CONFIG_MMU_GATHER_RCU_TABLE_FREE=y +CONFIG_MMU_GATHER_MERGE_VMAS=y +CONFIG_MMU_LAZY_TLB_REFCOUNT=y +CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y +CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y +CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y +CONFIG_HAVE_CMPXCHG_LOCAL=y +CONFIG_HAVE_CMPXCHG_DOUBLE=y +CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y +CONFIG_HAVE_ARCH_SECCOMP=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y +CONFIG_SECCOMP=y +CONFIG_SECCOMP_FILTER=y +# CONFIG_SECCOMP_CACHE_DEBUG is not set +CONFIG_HAVE_ARCH_STACKLEAK=y +CONFIG_HAVE_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR=y +CONFIG_STACKPROTECTOR_STRONG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG=y +CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y +CONFIG_LTO_NONE=y +CONFIG_ARCH_SUPPORTS_CFI_CLANG=y +# CONFIG_CFI_CLANG is not set +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y +CONFIG_HAVE_CONTEXT_TRACKING_USER=y +CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK=y +CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOVE_PUD=y +CONFIG_HAVE_MOVE_PMD=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y +CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD=y +CONFIG_HAVE_ARCH_HUGE_VMAP=y +CONFIG_HAVE_ARCH_HUGE_VMALLOC=y +CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y +CONFIG_ARCH_WANT_PMD_MKWRITE=y +CONFIG_HAVE_ARCH_SOFT_DIRTY=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y +CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y +CONFIG_SOFTIRQ_ON_OWN_STACK=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y +CONFIG_HAVE_ARCH_MMAP_RND_BITS=y +CONFIG_HAVE_EXIT_THREAD=y +CONFIG_ARCH_MMAP_RND_BITS=28 +CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS=y +CONFIG_ARCH_MMAP_RND_COMPAT_BITS=8 +CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES=y +CONFIG_PAGE_SIZE_LESS_THAN_64KB=y +CONFIG_PAGE_SIZE_LESS_THAN_256KB=y +CONFIG_HAVE_OBJTOOL=y +CONFIG_HAVE_JUMP_LABEL_HACK=y +CONFIG_HAVE_NOINSTR_HACK=y +CONFIG_HAVE_NOINSTR_VALIDATION=y +CONFIG_HAVE_UACCESS_VALIDATION=y +CONFIG_HAVE_STACK_VALIDATION=y +CONFIG_HAVE_RELIABLE_STACKTRACE=y +CONFIG_OLD_SIGSUSPEND3=y +CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_COMPAT_32BIT_TIME=y +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y +CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y +CONFIG_RANDOMIZE_KSTACK_OFFSET=y +# CONFIG_RANDOMIZE_KSTACK_OFFSET_DEFAULT is not set +CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y +CONFIG_STRICT_KERNEL_RWX=y +CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y +CONFIG_STRICT_MODULE_RWX=y +CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y +CONFIG_ARCH_USE_MEMREMAP_PROT=y +# CONFIG_LOCK_EVENT_COUNTS is not set +CONFIG_ARCH_HAS_MEM_ENCRYPT=y +CONFIG_ARCH_HAS_CC_PLATFORM=y +CONFIG_HAVE_STATIC_CALL=y +CONFIG_HAVE_STATIC_CALL_INLINE=y +CONFIG_HAVE_PREEMPT_DYNAMIC=y +CONFIG_HAVE_PREEMPT_DYNAMIC_CALL=y +CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y +CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y +CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y +CONFIG_ARCH_HAS_ELFCORE_COMPAT=y +CONFIG_ARCH_HAS_PARANOID_L1D_FLUSH=y +CONFIG_DYNAMIC_SIGFRAME=y +CONFIG_HAVE_ARCH_NODE_DEV_GROUP=y +CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG=y + +# +# GCOV-based kernel profiling +# +# CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y +# end of GCOV-based kernel profiling + +CONFIG_HAVE_GCC_PLUGINS=y +CONFIG_GCC_PLUGINS=y +# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set +CONFIG_FUNCTION_ALIGNMENT_4B=y +CONFIG_FUNCTION_ALIGNMENT_16B=y +CONFIG_FUNCTION_ALIGNMENT=16 +# end of General architecture-dependent options + +CONFIG_RT_MUTEXES=y +CONFIG_BASE_SMALL=0 +CONFIG_MODULE_SIG_FORMAT=y +CONFIG_MODULES=y +# CONFIG_MODULE_DEBUG is not set +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_MODULE_FORCE_UNLOAD is not set +# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set +CONFIG_MODVERSIONS=y +CONFIG_ASM_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_MODULE_SIG=y +# CONFIG_MODULE_SIG_FORCE is not set +# CONFIG_MODULE_SIG_ALL is not set +# CONFIG_MODULE_SIG_SHA1 is not set +# CONFIG_MODULE_SIG_SHA224 is not set +CONFIG_MODULE_SIG_SHA256=y +# CONFIG_MODULE_SIG_SHA384 is not set +# CONFIG_MODULE_SIG_SHA512 is not set +CONFIG_MODULE_SIG_HASH="sha256" +CONFIG_MODULE_COMPRESS_NONE=y +# CONFIG_MODULE_COMPRESS_GZIP is not set +# CONFIG_MODULE_COMPRESS_XZ is not set +# CONFIG_MODULE_COMPRESS_ZSTD is not set +# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set +CONFIG_MODPROBE_PATH="/sbin/modprobe" +CONFIG_MODULES_TREE_LOOKUP=y +CONFIG_BLOCK=y +CONFIG_BLOCK_LEGACY_AUTOLOAD=y +CONFIG_BLK_RQ_ALLOC_TIME=y +CONFIG_BLK_CGROUP_RWSTAT=y +CONFIG_BLK_CGROUP_PUNT_BIO=y +CONFIG_BLK_DEV_BSG_COMMON=y +CONFIG_BLK_ICQ=y +CONFIG_BLK_DEV_BSGLIB=y +CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_INTEGRITY_T10=m +CONFIG_BLK_DEV_ZONED=y +CONFIG_BLK_DEV_THROTTLING=y +# CONFIG_BLK_DEV_THROTTLING_LOW is not set +# CONFIG_BLK_WBT is not set +CONFIG_BLK_CGROUP_IOLATENCY=y +# CONFIG_BLK_CGROUP_FC_APPID is not set +CONFIG_BLK_CGROUP_IOCOST=y +# CONFIG_BLK_CGROUP_IOPRIO is not set +CONFIG_BLK_DEBUG_FS=y +CONFIG_BLK_DEBUG_FS_ZONED=y +# CONFIG_BLK_SED_OPAL is not set +# CONFIG_BLK_INLINE_ENCRYPTION is not set + +# +# Partition Types +# +CONFIG_PARTITION_ADVANCED=y +# CONFIG_ACORN_PARTITION is not set +# CONFIG_AIX_PARTITION is not set +CONFIG_OSF_PARTITION=y +CONFIG_AMIGA_PARTITION=y +# CONFIG_ATARI_PARTITION is not set +CONFIG_MAC_PARTITION=y +CONFIG_MSDOS_PARTITION=y +CONFIG_BSD_DISKLABEL=y +CONFIG_MINIX_SUBPARTITION=y +CONFIG_SOLARIS_X86_PARTITION=y +CONFIG_UNIXWARE_DISKLABEL=y +# CONFIG_LDM_PARTITION is not set +CONFIG_SGI_PARTITION=y +# CONFIG_ULTRIX_PARTITION is not set +CONFIG_SUN_PARTITION=y +CONFIG_KARMA_PARTITION=y +CONFIG_EFI_PARTITION=y +# CONFIG_SYSV68_PARTITION is not set +# CONFIG_CMDLINE_PARTITION is not set +# end of Partition Types + +CONFIG_BLK_MQ_PCI=y +CONFIG_BLK_MQ_VIRTIO=y +CONFIG_BLK_PM=y +CONFIG_BLOCK_HOLDER_DEPRECATED=y +CONFIG_BLK_MQ_STACKING=y + +# +# IO Schedulers +# +CONFIG_MQ_IOSCHED_DEADLINE=y +CONFIG_MQ_IOSCHED_KYBER=y +CONFIG_IOSCHED_BFQ=y +CONFIG_BFQ_GROUP_IOSCHED=y +# CONFIG_BFQ_CGROUP_DEBUG is not set +# end of IO Schedulers + +CONFIG_PREEMPT_NOTIFIERS=y +CONFIG_PADATA=y +CONFIG_ASN1=y +CONFIG_UNINLINE_SPIN_UNLOCK=y +CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y +CONFIG_MUTEX_SPIN_ON_OWNER=y +CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y +CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y +CONFIG_QUEUED_SPINLOCKS=y +CONFIG_ARCH_USE_QUEUED_RWLOCKS=y +CONFIG_QUEUED_RWLOCKS=y +CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y +CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE=y +CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y +CONFIG_CK_KABI_RESERVE=y +CONFIG_CK_KABI_SIZE_ALIGN_CHECKS=y +CONFIG_FREEZER=y + +# +# Executable file formats +# +CONFIG_BINFMT_ELF=y +CONFIG_COMPAT_BINFMT_ELF=y +CONFIG_ELFCORE=y +CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y +CONFIG_BINFMT_SCRIPT=y +CONFIG_BINFMT_MISC=m +CONFIG_COREDUMP=y +# end of Executable file formats + +# +# Memory Management options +# +CONFIG_ZPOOL=y +CONFIG_SWAP=y +CONFIG_ZSWAP=y +# CONFIG_ZSWAP_DEFAULT_ON is not set +# CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_DEFLATE is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZO=y +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_842 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4 is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_LZ4HC is not set +# CONFIG_ZSWAP_COMPRESSOR_DEFAULT_ZSTD is not set +CONFIG_ZSWAP_COMPRESSOR_DEFAULT="lzo" +CONFIG_ZSWAP_ZPOOL_DEFAULT_ZBUD=y +# CONFIG_ZSWAP_ZPOOL_DEFAULT_Z3FOLD is not set +# CONFIG_ZSWAP_ZPOOL_DEFAULT_ZSMALLOC is not set +CONFIG_ZSWAP_ZPOOL_DEFAULT="zbud" +CONFIG_ZBUD=y +# CONFIG_Z3FOLD is not set +CONFIG_ZSMALLOC=y +CONFIG_ZSMALLOC_STAT=y +CONFIG_ZSMALLOC_CHAIN_SIZE=8 + +# +# SLAB allocator options +# +# CONFIG_SLAB_DEPRECATED is not set +CONFIG_SLUB=y +# CONFIG_SLAB_MERGE_DEFAULT is not set +CONFIG_SLAB_FREELIST_RANDOM=y +# CONFIG_SLAB_FREELIST_HARDENED is not set +# CONFIG_SLUB_STATS is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_RANDOM_KMALLOC_CACHES is not set +# end of SLAB allocator options + +CONFIG_SHUFFLE_PAGE_ALLOCATOR=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SPARSEMEM=y +CONFIG_SPARSEMEM_EXTREME=y +CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y +CONFIG_SPARSEMEM_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP=y +CONFIG_ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP=y +CONFIG_HAVE_FAST_GUP=y +CONFIG_NUMA_KEEP_MEMINFO=y +CONFIG_MEMORY_ISOLATION=y +CONFIG_EXCLUSIVE_SYSTEM_RAM=y +CONFIG_HAVE_BOOTMEM_INFO_NODE=y +CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y +CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y +CONFIG_MEMORY_HOTPLUG=y +CONFIG_MEMORY_HOTPLUG_DEFAULT_ONLINE=y +CONFIG_MEMORY_HOTREMOVE=y +CONFIG_MHP_MEMMAP_ON_MEMORY=y +CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y +CONFIG_SPLIT_PTLOCK_CPUS=4 +CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y +CONFIG_MEMORY_BALLOON=y +CONFIG_BALLOON_COMPACTION=y +CONFIG_COMPACTION=y +CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1 +CONFIG_PAGE_REPORTING=y +CONFIG_MIGRATION=y +CONFIG_DEVICE_MIGRATION=y +CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y +CONFIG_ARCH_ENABLE_THP_MIGRATION=y +CONFIG_CONTIG_ALLOC=y +CONFIG_PHYS_ADDR_T_64BIT=y +CONFIG_MMU_NOTIFIER=y +CONFIG_KSM=y +CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 +CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y +CONFIG_MEMORY_FAILURE=y +CONFIG_HWPOISON_INJECT=m +CONFIG_ARCH_WANT_GENERAL_HUGETLB=y +CONFIG_ARCH_WANTS_THP_SWAP=y +CONFIG_TRANSPARENT_HUGEPAGE=y +CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y +# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_THP_SWAP=y +CONFIG_READ_ONLY_THP_FOR_FS=y +CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y +CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y +CONFIG_USE_PERCPU_NUMA_NODE_ID=y +CONFIG_HAVE_SETUP_PER_CPU_AREA=y +CONFIG_CMA=y +# CONFIG_CMA_DEBUG is not set +# CONFIG_CMA_DEBUGFS is not set +# CONFIG_CMA_SYSFS is not set +CONFIG_CMA_AREAS=19 +CONFIG_MEM_SOFT_DIRTY=y +CONFIG_GENERIC_EARLY_IOREMAP=y +CONFIG_DEFERRED_STRUCT_PAGE_INIT=y +CONFIG_PAGE_IDLE_FLAG=y +CONFIG_IDLE_PAGE_TRACKING=y +CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y +CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y +CONFIG_ARCH_HAS_PTE_DEVMAP=y +CONFIG_ZONE_DMA=y +CONFIG_ZONE_DMA32=y +CONFIG_ZONE_DEVICE=y +CONFIG_HMM_MIRROR=y +CONFIG_GET_FREE_REGION=y +CONFIG_DEVICE_PRIVATE=y +CONFIG_VMAP_PFN=y +CONFIG_ARCH_USES_HIGH_VMA_FLAGS=y +CONFIG_ARCH_HAS_PKEYS=y +CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_PERCPU_STATS is not set +# CONFIG_GUP_TEST is not set +# CONFIG_DMAPOOL_TEST is not set +CONFIG_ARCH_HAS_PTE_SPECIAL=y +CONFIG_MAPPING_DIRTY_HELPERS=y +CONFIG_MEMFD_CREATE=y +CONFIG_SECRETMEM=y +# CONFIG_ANON_VMA_NAME is not set +CONFIG_USERFAULTFD=y +CONFIG_HAVE_ARCH_USERFAULTFD_WP=y +CONFIG_HAVE_ARCH_USERFAULTFD_MINOR=y +CONFIG_PTE_MARKER_UFFD_WP=y +CONFIG_LRU_GEN=y +# CONFIG_LRU_GEN_ENABLED is not set +# CONFIG_LRU_GEN_STATS is not set +CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y +CONFIG_PER_VMA_LOCK=y +CONFIG_LOCK_MM_AND_FIND_VMA=y + +# +# Data Access Monitoring +# +CONFIG_DAMON=y +CONFIG_DAMON_VADDR=y +CONFIG_DAMON_PADDR=y +# CONFIG_DAMON_SYSFS is not set +CONFIG_DAMON_DBGFS=y +# CONFIG_DAMON_RECLAIM is not set +# CONFIG_DAMON_LRU_SORT is not set +# end of Data Access Monitoring +# end of Memory Management options + +CONFIG_NET=y +CONFIG_NET_INGRESS=y +CONFIG_NET_EGRESS=y +CONFIG_NET_XGRESS=y +CONFIG_NET_REDIRECT=y +CONFIG_SKB_EXTENSIONS=y + +# +# Networking options +# +CONFIG_PACKET=y +CONFIG_PACKET_DIAG=m +CONFIG_UNIX=y +CONFIG_UNIX_SCM=y +CONFIG_AF_UNIX_OOB=y +CONFIG_UNIX_DIAG=m +CONFIG_TLS=m +CONFIG_TLS_DEVICE=y +# CONFIG_TLS_TOE is not set +CONFIG_XFRM=y +CONFIG_XFRM_OFFLOAD=y +CONFIG_XFRM_ALGO=y +CONFIG_XFRM_USER=y +# CONFIG_XFRM_USER_COMPAT is not set +CONFIG_XFRM_INTERFACE=m +CONFIG_XFRM_SUB_POLICY=y +CONFIG_XFRM_MIGRATE=y +CONFIG_XFRM_STATISTICS=y +CONFIG_XFRM_AH=m +CONFIG_XFRM_ESP=m +CONFIG_XFRM_IPCOMP=m +CONFIG_NET_KEY=m +CONFIG_NET_KEY_MIGRATE=y +CONFIG_SMC=m +CONFIG_SMC_DIAG=m +CONFIG_XDP_SOCKETS=y +CONFIG_XDP_SOCKETS_DIAG=m +CONFIG_NET_HANDSHAKE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_FIB_TRIE_STATS=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y +# CONFIG_IP_PNP is not set +CONFIG_NET_IPIP=m +CONFIG_NET_IPGRE_DEMUX=m +CONFIG_NET_IP_TUNNEL=m +CONFIG_NET_IPGRE=m +CONFIG_NET_IPGRE_BROADCAST=y +CONFIG_IP_MROUTE_COMMON=y +CONFIG_IP_MROUTE=y +CONFIG_IP_MROUTE_MULTIPLE_TABLES=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_NET_IPVTI=m +CONFIG_NET_UDP_TUNNEL=m +# CONFIG_NET_FOU is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m +# CONFIG_INET_ESPINTCP is not set +CONFIG_INET_IPCOMP=m +CONFIG_INET_TABLE_PERTURB_ORDER=16 +CONFIG_INET_XFRM_TUNNEL=m +CONFIG_INET_TUNNEL=m +CONFIG_INET_DIAG=m +CONFIG_INET_TCP_DIAG=m +CONFIG_INET_UDP_DIAG=m +CONFIG_INET_RAW_DIAG=m +# CONFIG_INET_DIAG_DESTROY is not set +CONFIG_TCP_CONG_ADVANCED=y +CONFIG_TCP_CONG_BIC=m +CONFIG_TCP_CONG_CUBIC=y +CONFIG_TCP_CONG_WESTWOOD=m +CONFIG_TCP_CONG_HTCP=m +CONFIG_TCP_CONG_HSTCP=m +CONFIG_TCP_CONG_HYBLA=m +CONFIG_TCP_CONG_VEGAS=m +CONFIG_TCP_CONG_NV=m +CONFIG_TCP_CONG_SCALABLE=m +CONFIG_TCP_CONG_LP=m +CONFIG_TCP_CONG_VENO=m +CONFIG_TCP_CONG_YEAH=m +CONFIG_TCP_CONG_ILLINOIS=m +CONFIG_TCP_CONG_DCTCP=m +CONFIG_TCP_CONG_CDG=m +CONFIG_TCP_CONG_BBR=m +CONFIG_DEFAULT_CUBIC=y +# CONFIG_DEFAULT_RENO is not set +CONFIG_DEFAULT_TCP_CONG="cubic" +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m +# CONFIG_INET6_ESPINTCP is not set +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_MIP6=m +# CONFIG_IPV6_ILA is not set +CONFIG_INET6_XFRM_TUNNEL=m +CONFIG_INET6_TUNNEL=m +CONFIG_IPV6_VTI=m +CONFIG_IPV6_SIT=m +CONFIG_IPV6_SIT_6RD=y +CONFIG_IPV6_NDISC_NODETYPE=y +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_GRE=m +CONFIG_IPV6_MULTIPLE_TABLES=y +CONFIG_IPV6_SUBTREES=y +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y +CONFIG_IPV6_PIMSM_V2=y +# CONFIG_IPV6_SEG6_LWTUNNEL is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_IPV6_RPL_LWTUNNEL is not set +# CONFIG_IPV6_IOAM6_LWTUNNEL is not set +CONFIG_NETLABEL=y +CONFIG_MPTCP=y +CONFIG_INET_MPTCP_DIAG=m +CONFIG_MPTCP_IPV6=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NET_PTP_CLASSIFY=y +CONFIG_NETWORK_PHY_TIMESTAMPING=y +CONFIG_NETFILTER=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=m + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_EGRESS=y +CONFIG_NETFILTER_SKIP_EGRESS=y +CONFIG_NETFILTER_NETLINK=m +CONFIG_NETFILTER_FAMILY_BRIDGE=y +CONFIG_NETFILTER_FAMILY_ARP=y +CONFIG_NETFILTER_BPF_LINK=y +# CONFIG_NETFILTER_NETLINK_HOOK is not set +CONFIG_NETFILTER_NETLINK_ACCT=m +CONFIG_NETFILTER_NETLINK_QUEUE=m +CONFIG_NETFILTER_NETLINK_LOG=m +CONFIG_NETFILTER_NETLINK_OSF=m +CONFIG_NF_CONNTRACK=m +CONFIG_NF_LOG_SYSLOG=m +CONFIG_NETFILTER_CONNCOUNT=m +CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y +CONFIG_NF_CONNTRACK_PROCFS=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y +CONFIG_NF_CONNTRACK_OVS=y +CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y +CONFIG_NF_CT_PROTO_SCTP=y +CONFIG_NF_CT_PROTO_UDPLITE=y +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_BROADCAST=m +CONFIG_NF_CONNTRACK_NETBIOS_NS=m +CONFIG_NF_CONNTRACK_SNMP=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NF_CT_NETLINK_TIMEOUT=m +CONFIG_NF_CT_NETLINK_HELPER=m +CONFIG_NETFILTER_NETLINK_GLUE_CT=y +CONFIG_NF_NAT=m +CONFIG_NF_NAT_AMANDA=m +CONFIG_NF_NAT_FTP=m +CONFIG_NF_NAT_IRC=m +CONFIG_NF_NAT_SIP=m +CONFIG_NF_NAT_TFTP=m +CONFIG_NF_NAT_REDIRECT=y +CONFIG_NF_NAT_MASQUERADE=y +CONFIG_NF_NAT_OVS=y +CONFIG_NETFILTER_SYNPROXY=m +CONFIG_NF_TABLES=m +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NFT_NUMGEN=m +CONFIG_NFT_CT=m +CONFIG_NFT_FLOW_OFFLOAD=m +CONFIG_NFT_CONNLIMIT=m +CONFIG_NFT_LOG=m +CONFIG_NFT_LIMIT=m +CONFIG_NFT_MASQ=m +CONFIG_NFT_REDIR=m +CONFIG_NFT_NAT=m +CONFIG_NFT_TUNNEL=m +CONFIG_NFT_QUEUE=m +CONFIG_NFT_QUOTA=m +CONFIG_NFT_REJECT=m +CONFIG_NFT_REJECT_INET=m +CONFIG_NFT_COMPAT=m +CONFIG_NFT_HASH=m +CONFIG_NFT_FIB=m +CONFIG_NFT_FIB_INET=m +CONFIG_NFT_XFRM=m +CONFIG_NFT_SOCKET=m +CONFIG_NFT_OSF=m +CONFIG_NFT_TPROXY=m +# CONFIG_NFT_SYNPROXY is not set +CONFIG_NF_DUP_NETDEV=m +CONFIG_NFT_DUP_NETDEV=m +CONFIG_NFT_FWD_NETDEV=m +CONFIG_NFT_FIB_NETDEV=m +# CONFIG_NFT_REJECT_NETDEV is not set +CONFIG_NF_FLOW_TABLE_INET=m +CONFIG_NF_FLOW_TABLE=m +# CONFIG_NF_FLOW_TABLE_PROCFS is not set +CONFIG_NETFILTER_XTABLES=y +# CONFIG_NETFILTER_XTABLES_COMPAT is not set + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=m +CONFIG_NETFILTER_XT_CONNMARK=m +CONFIG_NETFILTER_XT_SET=m + +# +# Xtables targets +# +CONFIG_NETFILTER_XT_TARGET_AUDIT=m +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=m +CONFIG_NETFILTER_XT_TARGET_CT=m +CONFIG_NETFILTER_XT_TARGET_DSCP=m +CONFIG_NETFILTER_XT_TARGET_HL=m +CONFIG_NETFILTER_XT_TARGET_HMARK=m +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=m +CONFIG_NETFILTER_XT_TARGET_LED=m +CONFIG_NETFILTER_XT_TARGET_LOG=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_NAT=m +CONFIG_NETFILTER_XT_TARGET_NETMAP=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_NOTRACK=m +CONFIG_NETFILTER_XT_TARGET_RATEEST=m +CONFIG_NETFILTER_XT_TARGET_REDIRECT=m +CONFIG_NETFILTER_XT_TARGET_MASQUERADE=m +CONFIG_NETFILTER_XT_TARGET_TEE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m + +# +# Xtables matches +# +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m +CONFIG_NETFILTER_XT_MATCH_BPF=m +CONFIG_NETFILTER_XT_MATCH_CGROUP=m +CONFIG_NETFILTER_XT_MATCH_CLUSTER=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_CPU=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=m +CONFIG_NETFILTER_XT_MATCH_DSCP=m +CONFIG_NETFILTER_XT_MATCH_ECN=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_HL=m +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_IPVS=m +CONFIG_NETFILTER_XT_MATCH_L2TP=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_NFACCT=m +CONFIG_NETFILTER_XT_MATCH_OSF=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SCTP=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +# end of Core Netfilter Configuration + +CONFIG_IP_SET=m +CONFIG_IP_SET_MAX=256 +CONFIG_IP_SET_BITMAP_IP=m +CONFIG_IP_SET_BITMAP_IPMAC=m +CONFIG_IP_SET_BITMAP_PORT=m +CONFIG_IP_SET_HASH_IP=m +CONFIG_IP_SET_HASH_IPMARK=m +CONFIG_IP_SET_HASH_IPPORT=m +CONFIG_IP_SET_HASH_IPPORTIP=m +CONFIG_IP_SET_HASH_IPPORTNET=m +CONFIG_IP_SET_HASH_IPMAC=m +CONFIG_IP_SET_HASH_MAC=m +CONFIG_IP_SET_HASH_NETPORTNET=m +CONFIG_IP_SET_HASH_NET=m +CONFIG_IP_SET_HASH_NETNET=m +CONFIG_IP_SET_HASH_NETPORT=m +CONFIG_IP_SET_HASH_NETIFACE=m +CONFIG_IP_SET_LIST_SET=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +# CONFIG_IP_VS_DEBUG is not set +CONFIG_IP_VS_TAB_BITS=12 + +# +# IPVS transport protocol load balancing support +# +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_AH_ESP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_PROTO_SCTP=y + +# +# IPVS scheduler +# +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_FO=m +CONFIG_IP_VS_OVF=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_MH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +# CONFIG_IP_VS_TWOS is not set + +# +# IPVS SH scheduler +# +CONFIG_IP_VS_SH_TAB_BITS=8 + +# +# IPVS MH scheduler +# +CONFIG_IP_VS_MH_TAB_INDEX=12 + +# +# IPVS application helper +# +CONFIG_IP_VS_FTP=m +CONFIG_IP_VS_NFCT=y +CONFIG_IP_VS_PE_SIP=m + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=m +CONFIG_NF_SOCKET_IPV4=m +CONFIG_NF_TPROXY_IPV4=m +CONFIG_NF_TABLES_IPV4=y +CONFIG_NFT_REJECT_IPV4=m +CONFIG_NFT_DUP_IPV4=m +CONFIG_NFT_FIB_IPV4=m +CONFIG_NF_TABLES_ARP=y +CONFIG_NF_DUP_IPV4=m +CONFIG_NF_LOG_ARP=m +CONFIG_NF_LOG_IPV4=m +CONFIG_NF_REJECT_IPV4=m +CONFIG_NF_NAT_SNMP_BASIC=m +CONFIG_NF_NAT_PPTP=m +CONFIG_NF_NAT_H323=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_RPFILTER=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_SYNPROXY=m +CONFIG_IP_NF_NAT=m +CONFIG_IP_NF_TARGET_MASQUERADE=m +CONFIG_IP_NF_TARGET_NETMAP=m +CONFIG_IP_NF_TARGET_REDIRECT=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_SECURITY=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +# end of IP: Netfilter Configuration + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_SOCKET_IPV6=m +CONFIG_NF_TPROXY_IPV6=m +CONFIG_NF_TABLES_IPV6=y +CONFIG_NFT_REJECT_IPV6=m +CONFIG_NFT_DUP_IPV6=m +CONFIG_NFT_FIB_IPV6=m +CONFIG_NF_DUP_IPV6=m +CONFIG_NF_REJECT_IPV6=m +CONFIG_NF_LOG_IPV6=m +CONFIG_IP6_NF_IPTABLES=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RPFILTER=m +CONFIG_IP6_NF_MATCH_RT=m +# CONFIG_IP6_NF_MATCH_SRH is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_TARGET_SYNPROXY=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_IP6_NF_SECURITY=m +CONFIG_IP6_NF_NAT=m +CONFIG_IP6_NF_TARGET_MASQUERADE=m +CONFIG_IP6_NF_TARGET_NPT=m +# end of IPv6: Netfilter Configuration + +CONFIG_NF_DEFRAG_IPV6=m +CONFIG_NF_TABLES_BRIDGE=m +# CONFIG_NFT_BRIDGE_META is not set +CONFIG_NFT_BRIDGE_REJECT=m +# CONFIG_NF_CONNTRACK_BRIDGE is not set +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +# CONFIG_BPFILTER is not set +# CONFIG_IP_DCCP is not set +CONFIG_IP_SCTP=m +# CONFIG_SCTP_DBG_OBJCNT is not set +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5 is not set +CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1=y +# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set +CONFIG_SCTP_COOKIE_HMAC_MD5=y +CONFIG_SCTP_COOKIE_HMAC_SHA1=y +CONFIG_INET_SCTP_DIAG=m +# CONFIG_RDS is not set +CONFIG_TIPC=m +CONFIG_TIPC_MEDIA_IB=y +CONFIG_TIPC_MEDIA_UDP=y +CONFIG_TIPC_CRYPTO=y +CONFIG_TIPC_DIAG=m +CONFIG_ATM=m +CONFIG_ATM_CLIP=m +# CONFIG_ATM_CLIP_NO_ICMP is not set +CONFIG_ATM_LANE=m +# CONFIG_ATM_MPOA is not set +CONFIG_ATM_BR2684=m +# CONFIG_ATM_BR2684_IPFILTER is not set +CONFIG_L2TP=m +CONFIG_L2TP_DEBUGFS=m +CONFIG_L2TP_V3=y +CONFIG_L2TP_IP=m +CONFIG_L2TP_ETH=m +CONFIG_STP=m +CONFIG_GARP=m +CONFIG_MRP=m +CONFIG_BRIDGE=m +CONFIG_BRIDGE_IGMP_SNOOPING=y +CONFIG_BRIDGE_VLAN_FILTERING=y +# CONFIG_BRIDGE_MRP is not set +# CONFIG_BRIDGE_CFM is not set +# CONFIG_NET_DSA is not set +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_VLAN_8021Q_MVRP=y +CONFIG_LLC=m +# CONFIG_LLC2 is not set +# CONFIG_ATALK is not set +# CONFIG_X25 is not set +# CONFIG_LAPB is not set +# CONFIG_PHONET is not set +CONFIG_6LOWPAN=m +# CONFIG_6LOWPAN_DEBUGFS is not set +# CONFIG_6LOWPAN_NHC is not set +CONFIG_IEEE802154=m +# CONFIG_IEEE802154_NL802154_EXPERIMENTAL is not set +CONFIG_IEEE802154_SOCKET=m +CONFIG_IEEE802154_6LOWPAN=m +CONFIG_MAC802154=m +CONFIG_NET_SCHED=y + +# +# Queueing/Scheduling +# +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_MULTIQ=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFB=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +# CONFIG_NET_SCH_CBS is not set +# CONFIG_NET_SCH_ETF is not set +CONFIG_NET_SCH_MQPRIO_LIB=m +# CONFIG_NET_SCH_TAPRIO is not set +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_DRR=m +CONFIG_NET_SCH_MQPRIO=m +# CONFIG_NET_SCH_SKBPRIO is not set +CONFIG_NET_SCH_CHOKE=m +CONFIG_NET_SCH_QFQ=m +CONFIG_NET_SCH_CODEL=m +CONFIG_NET_SCH_FQ_CODEL=y +# CONFIG_NET_SCH_CAKE is not set +CONFIG_NET_SCH_FQ=m +CONFIG_NET_SCH_HHF=m +CONFIG_NET_SCH_PIE=m +# CONFIG_NET_SCH_FQ_PIE is not set +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_SCH_PLUG=m +# CONFIG_NET_SCH_ETS is not set +CONFIG_NET_SCH_DEFAULT=y +# CONFIG_DEFAULT_FQ is not set +# CONFIG_DEFAULT_CODEL is not set +CONFIG_DEFAULT_FQ_CODEL=y +# CONFIG_DEFAULT_SFQ is not set +# CONFIG_DEFAULT_PFIFO_FAST is not set +CONFIG_DEFAULT_NET_SCH="fq_codel" + +# +# Classification +# +CONFIG_NET_CLS=y +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_CLS_U32_PERF=y +CONFIG_CLS_U32_MARK=y +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_CGROUP=y +CONFIG_NET_CLS_BPF=m +CONFIG_NET_CLS_FLOWER=m +CONFIG_NET_CLS_MATCHALL=m +CONFIG_NET_EMATCH=y +CONFIG_NET_EMATCH_STACK=32 +CONFIG_NET_EMATCH_CMP=m +CONFIG_NET_EMATCH_NBYTE=m +CONFIG_NET_EMATCH_U32=m +CONFIG_NET_EMATCH_META=m +CONFIG_NET_EMATCH_TEXT=m +CONFIG_NET_EMATCH_IPSET=m +# CONFIG_NET_EMATCH_IPT is not set +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=m +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_SAMPLE=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_ACT_CSUM=m +# CONFIG_NET_ACT_MPLS is not set +CONFIG_NET_ACT_VLAN=m +CONFIG_NET_ACT_BPF=m +# CONFIG_NET_ACT_CONNMARK is not set +# CONFIG_NET_ACT_CTINFO is not set +CONFIG_NET_ACT_SKBMOD=m +# CONFIG_NET_ACT_IFE is not set +CONFIG_NET_ACT_TUNNEL_KEY=m +CONFIG_NET_ACT_CT=m +# CONFIG_NET_ACT_GATE is not set +CONFIG_NET_TC_SKB_EXT=y +CONFIG_NET_SCH_FIFO=y +CONFIG_DCB=y +CONFIG_DNS_RESOLVER=m +# CONFIG_BATMAN_ADV is not set +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_VSOCKETS=m +CONFIG_VSOCKETS_DIAG=m +CONFIG_VSOCKETS_LOOPBACK=m +CONFIG_VMWARE_VMCI_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS=m +CONFIG_VIRTIO_VSOCKETS_COMMON=m +CONFIG_HYPERV_VSOCKETS=m +CONFIG_NETLINK_DIAG=m +CONFIG_MPLS=y +CONFIG_NET_MPLS_GSO=y +CONFIG_MPLS_ROUTING=m +CONFIG_MPLS_IPTUNNEL=m +CONFIG_NET_NSH=y +# CONFIG_HSR is not set +CONFIG_NET_SWITCHDEV=y +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_QRTR is not set +# CONFIG_NET_NCSI is not set +CONFIG_PCPU_DEV_REFCNT=y +CONFIG_MAX_SKB_FRAGS=17 +CONFIG_RPS=y +CONFIG_RFS_ACCEL=y +CONFIG_SOCK_RX_QUEUE_MAPPING=y +CONFIG_XPS=y +CONFIG_CGROUP_NET_PRIO=y +CONFIG_CGROUP_NET_CLASSID=y +CONFIG_NET_RX_BUSY_POLL=y +CONFIG_BQL=y +CONFIG_BPF_STREAM_PARSER=y +CONFIG_NET_FLOW_LIMIT=y + +# +# Network testing +# +CONFIG_NET_PKTGEN=m +CONFIG_NET_DROP_MONITOR=y +# end of Network testing +# end of Networking options + +# CONFIG_HAMRADIO is not set +# CONFIG_CAN is not set +CONFIG_BT=m +CONFIG_BT_BREDR=y +CONFIG_BT_RFCOMM=m +CONFIG_BT_RFCOMM_TTY=y +CONFIG_BT_BNEP=m +CONFIG_BT_BNEP_MC_FILTER=y +CONFIG_BT_BNEP_PROTO_FILTER=y +CONFIG_BT_CMTP=m +CONFIG_BT_HIDP=m +CONFIG_BT_HS=y +CONFIG_BT_LE=y +CONFIG_BT_LE_L2CAP_ECRED=y +# CONFIG_BT_6LOWPAN is not set +# CONFIG_BT_LEDS is not set +# CONFIG_BT_MSFTEXT is not set +# CONFIG_BT_AOSPEXT is not set +CONFIG_BT_DEBUGFS=y +# CONFIG_BT_SELFTEST is not set + +# +# Bluetooth device drivers +# +CONFIG_BT_INTEL=m +CONFIG_BT_BCM=m +CONFIG_BT_RTL=m +CONFIG_BT_HCIBTUSB=m +CONFIG_BT_HCIBTUSB_AUTOSUSPEND=y +CONFIG_BT_HCIBTUSB_POLL_SYNC=y +CONFIG_BT_HCIBTUSB_BCM=y +# CONFIG_BT_HCIBTUSB_MTK is not set +CONFIG_BT_HCIBTUSB_RTL=y +CONFIG_BT_HCIBTSDIO=m +CONFIG_BT_HCIUART=m +CONFIG_BT_HCIUART_H4=y +CONFIG_BT_HCIUART_BCSP=y +CONFIG_BT_HCIUART_ATH3K=y +# CONFIG_BT_HCIUART_INTEL is not set +# CONFIG_BT_HCIUART_AG6XX is not set +CONFIG_BT_HCIBCM203X=m +# CONFIG_BT_HCIBCM4377 is not set +CONFIG_BT_HCIBPA10X=m +CONFIG_BT_HCIBFUSB=m +CONFIG_BT_HCIVHCI=m +CONFIG_BT_MRVL=m +CONFIG_BT_MRVL_SDIO=m +CONFIG_BT_ATH3K=m +# CONFIG_BT_MTKSDIO is not set +# CONFIG_BT_VIRTIO is not set +# end of Bluetooth device drivers + +# CONFIG_AF_RXRPC is not set +# CONFIG_AF_KCM is not set +CONFIG_STREAM_PARSER=y +# CONFIG_MCTP is not set +CONFIG_FIB_RULES=y +CONFIG_WIRELESS=y +CONFIG_CFG80211=m +# CONFIG_NL80211_TESTMODE is not set +# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set +CONFIG_CFG80211_REQUIRE_SIGNED_REGDB=y +CONFIG_CFG80211_USE_KERNEL_REGDB_KEYS=y +CONFIG_CFG80211_DEFAULT_PS=y +# CONFIG_CFG80211_DEBUGFS is not set +CONFIG_CFG80211_CRDA_SUPPORT=y +# CONFIG_CFG80211_WEXT is not set +CONFIG_MAC80211=m +CONFIG_MAC80211_HAS_RC=y +CONFIG_MAC80211_RC_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y +CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" +# CONFIG_MAC80211_MESH is not set +CONFIG_MAC80211_LEDS=y +CONFIG_MAC80211_DEBUGFS=y +# CONFIG_MAC80211_MESSAGE_TRACING is not set +# CONFIG_MAC80211_DEBUG_MENU is not set +CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 +CONFIG_RFKILL=m +CONFIG_RFKILL_LEDS=y +CONFIG_RFKILL_INPUT=y +# CONFIG_RFKILL_GPIO is not set +# CONFIG_NET_9P is not set +# CONFIG_CAIF is not set +CONFIG_CEPH_LIB=m +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +CONFIG_CEPH_LIB_USE_DNS_RESOLVER=y +# CONFIG_NFC is not set +CONFIG_PSAMPLE=m +# CONFIG_NET_IFE is not set +CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y +CONFIG_DST_CACHE=y +CONFIG_GRO_CELLS=y +CONFIG_SOCK_VALIDATE_XMIT=y +CONFIG_NET_SELFTESTS=y +CONFIG_NET_SOCK_MSG=y +CONFIG_NET_DEVLINK=y +CONFIG_PAGE_POOL=y +# CONFIG_PAGE_POOL_STATS is not set +CONFIG_FAILOVER=m +CONFIG_ETHTOOL_NETLINK=y + +# +# Device Drivers +# +CONFIG_HAVE_EISA=y +# CONFIG_EISA is not set +CONFIG_HAVE_PCI=y +CONFIG_PCI=y +CONFIG_PCI_DOMAINS=y +CONFIG_PCIEPORTBUS=y +CONFIG_HOTPLUG_PCI_PCIE=y +CONFIG_PCIEAER=y +CONFIG_PCIEAER_INJECT=m +CONFIG_PCIE_ECRC=y +CONFIG_PCIEASPM=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set +# CONFIG_PCIEASPM_POWER_SUPERSAVE is not set +# CONFIG_PCIEASPM_PERFORMANCE is not set +CONFIG_PCIE_PME=y +CONFIG_PCIE_DPC=y +# CONFIG_PCIE_PTM is not set +CONFIG_PCIE_EDR=y +CONFIG_PCI_MSI=y +CONFIG_PCI_QUIRKS=y +# CONFIG_PCI_DEBUG is not set +# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_STUB=y +CONFIG_PCI_PF_STUB=y +CONFIG_PCI_ATS=y +CONFIG_PCI_DOE=y +CONFIG_PCI_LOCKLESS_CONFIG=y +CONFIG_PCI_IOV=y +CONFIG_PCI_PRI=y +CONFIG_PCI_PASID=y +# CONFIG_PCI_P2PDMA is not set +CONFIG_PCI_LABEL=y +CONFIG_PCI_HYPERV=m +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=64 +CONFIG_HOTPLUG_PCI=y +CONFIG_HOTPLUG_PCI_ACPI=y +CONFIG_HOTPLUG_PCI_ACPI_IBM=m +# CONFIG_HOTPLUG_PCI_CPCI is not set +CONFIG_HOTPLUG_PCI_SHPC=y + +# +# PCI controller drivers +# +CONFIG_VMD=y +CONFIG_PCI_HYPERV_INTERFACE=m + +# +# Cadence-based PCIe controllers +# +# end of Cadence-based PCIe controllers + +# +# DesignWare-based PCIe controllers +# +# CONFIG_PCI_MESON is not set +# CONFIG_PCIE_DW_PLAT_HOST is not set +# end of DesignWare-based PCIe controllers + +# +# Mobiveil-based PCIe controllers +# +# end of Mobiveil-based PCIe controllers +# end of PCI controller drivers + +# +# PCI Endpoint +# +# CONFIG_PCI_ENDPOINT is not set +# end of PCI Endpoint + +# +# PCI switch controller drivers +# +# CONFIG_PCI_SW_SWITCHTEC is not set +# end of PCI switch controller drivers + +CONFIG_CXL_BUS=m +CONFIG_CXL_PCI=m +# CONFIG_CXL_MEM_RAW_COMMANDS is not set +CONFIG_CXL_ACPI=m +CONFIG_CXL_PMEM=m +CONFIG_CXL_MEM=m +CONFIG_CXL_PORT=m +CONFIG_CXL_SUSPEND=y +CONFIG_CXL_REGION=y +# CONFIG_CXL_REGION_INVALIDATION_TEST is not set +CONFIG_CXL_PMU=m +CONFIG_PCCARD=y +# CONFIG_PCMCIA is not set +CONFIG_CARDBUS=y + +# +# PC-card bridges +# +CONFIG_YENTA=m +CONFIG_YENTA_O2=y +CONFIG_YENTA_RICOH=y +CONFIG_YENTA_TI=y +CONFIG_YENTA_ENE_TUNE=y +CONFIG_YENTA_TOSHIBA=y +# CONFIG_RAPIDIO is not set + +# +# Generic Driver Options +# +CONFIG_AUXILIARY_BUS=y +# CONFIG_UEVENT_HELPER is not set +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +# CONFIG_DEVTMPFS_SAFE is not set +CONFIG_STANDALONE=y +CONFIG_PREVENT_FIRMWARE_BUILD=y + +# +# Firmware loader +# +CONFIG_FW_LOADER=y +CONFIG_FW_LOADER_DEBUG=y +CONFIG_FW_LOADER_PAGED_BUF=y +CONFIG_FW_LOADER_SYSFS=y +CONFIG_EXTRA_FIRMWARE="" +CONFIG_FW_LOADER_USER_HELPER=y +# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set +# CONFIG_FW_LOADER_COMPRESS is not set +CONFIG_FW_CACHE=y +CONFIG_FW_UPLOAD=y +# end of Firmware loader + +CONFIG_WANT_DEV_COREDUMP=y +CONFIG_ALLOW_DEV_COREDUMP=y +CONFIG_DEV_COREDUMP=y +# CONFIG_DEBUG_DRIVER is not set +# CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set +CONFIG_HMEM_REPORTING=y +# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set +CONFIG_SYS_HYPERVISOR=y +CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y +CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=m +CONFIG_REGMAP_SPI=m +CONFIG_DMA_SHARED_BUFFER=y +# CONFIG_DMA_FENCE_TRACE is not set +# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set +# end of Generic Driver Options + +# +# Bus devices +# +# CONFIG_MHI_BUS is not set +# CONFIG_MHI_BUS_EP is not set +# end of Bus devices + +# +# Cache Drivers +# +# end of Cache Drivers + +CONFIG_CONNECTOR=y +CONFIG_PROC_EVENTS=y + +# +# Firmware Drivers +# + +# +# ARM System Control and Management Interface Protocol +# +# end of ARM System Control and Management Interface Protocol + +CONFIG_EDD=m +# CONFIG_EDD_OFF is not set +CONFIG_FIRMWARE_MEMMAP=y +CONFIG_DMIID=y +CONFIG_DMI_SYSFS=y +CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y +CONFIG_ISCSI_IBFT_FIND=y +CONFIG_ISCSI_IBFT=m +CONFIG_FW_CFG_SYSFS=y +# CONFIG_FW_CFG_SYSFS_CMDLINE is not set +CONFIG_SYSFB=y +# CONFIG_SYSFB_SIMPLEFB is not set +# CONFIG_GOOGLE_FIRMWARE is not set + +# +# EFI (Extensible Firmware Interface) Support +# +CONFIG_EFI_ESRT=y +CONFIG_EFI_VARS_PSTORE=y +CONFIG_EFI_VARS_PSTORE_DEFAULT_DISABLE=y +CONFIG_EFI_SOFT_RESERVE=y +CONFIG_EFI_DXE_MEM_ATTRIBUTES=y +CONFIG_EFI_RUNTIME_WRAPPERS=y +# CONFIG_EFI_BOOTLOADER_CONTROL is not set +# CONFIG_EFI_CAPSULE_LOADER is not set +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set +# CONFIG_RESET_ATTACK_MITIGATION is not set +CONFIG_EFI_RCI2_TABLE=y +# CONFIG_EFI_DISABLE_PCI_DMA is not set +CONFIG_EFI_EARLYCON=y +CONFIG_EFI_CUSTOM_SSDT_OVERLAYS=y +# CONFIG_EFI_DISABLE_RUNTIME is not set +CONFIG_EFI_COCO_SECRET=y +CONFIG_UNACCEPTED_MEMORY=y +# end of EFI (Extensible Firmware Interface) Support + +CONFIG_UEFI_CPER=y +CONFIG_UEFI_CPER_X86=y + +# +# Tegra firmware driver +# +# end of Tegra firmware driver +# end of Firmware Drivers + +# CONFIG_GNSS is not set +CONFIG_MTD=m +# CONFIG_MTD_TESTS is not set + +# +# Partition parsers +# +# CONFIG_MTD_AR7_PARTS is not set +# CONFIG_MTD_CMDLINE_PARTS is not set +# CONFIG_MTD_REDBOOT_PARTS is not set +# end of Partition parsers + +# +# User Modules And Translation Layers +# +CONFIG_MTD_BLKDEVS=m +CONFIG_MTD_BLOCK=m +# CONFIG_MTD_BLOCK_RO is not set + +# +# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK. +# +# CONFIG_FTL is not set +# CONFIG_NFTL is not set +# CONFIG_INFTL is not set +# CONFIG_RFD_FTL is not set +# CONFIG_SSFDC is not set +# CONFIG_SM_FTL is not set +# CONFIG_MTD_OOPS is not set +# CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set + +# +# RAM/ROM/Flash chip drivers +# +# CONFIG_MTD_CFI is not set +# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_MAP_BANK_WIDTH_1=y +CONFIG_MTD_MAP_BANK_WIDTH_2=y +CONFIG_MTD_MAP_BANK_WIDTH_4=y +CONFIG_MTD_CFI_I1=y +CONFIG_MTD_CFI_I2=y +# CONFIG_MTD_RAM is not set +# CONFIG_MTD_ROM is not set +# CONFIG_MTD_ABSENT is not set +# end of RAM/ROM/Flash chip drivers + +# +# Mapping drivers for chip access +# +# CONFIG_MTD_COMPLEX_MAPPINGS is not set +# CONFIG_MTD_INTEL_VR_NOR is not set +# CONFIG_MTD_PLATRAM is not set +# end of Mapping drivers for chip access + +# +# Self-contained MTD device drivers +# +# CONFIG_MTD_PMC551 is not set +# CONFIG_MTD_DATAFLASH is not set +# CONFIG_MTD_MCHP23K256 is not set +# CONFIG_MTD_MCHP48L640 is not set +# CONFIG_MTD_SST25L is not set +# CONFIG_MTD_SLRAM is not set +# CONFIG_MTD_PHRAM is not set +# CONFIG_MTD_MTDRAM is not set +# CONFIG_MTD_BLOCK2MTD is not set + +# +# Disk-On-Chip Device Drivers +# +# CONFIG_MTD_DOCG3 is not set +# end of Self-contained MTD device drivers + +# +# NAND +# +# CONFIG_MTD_ONENAND is not set +# CONFIG_MTD_RAW_NAND is not set +# CONFIG_MTD_SPI_NAND is not set + +# +# ECC engine support +# +# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set +# CONFIG_MTD_NAND_ECC_SW_BCH is not set +# CONFIG_MTD_NAND_ECC_MXIC is not set +# end of ECC engine support +# end of NAND + +# +# LPDDR & LPDDR2 PCM memory drivers +# +# CONFIG_MTD_LPDDR is not set +# end of LPDDR & LPDDR2 PCM memory drivers + +# CONFIG_MTD_SPI_NOR is not set +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set +# CONFIG_MTD_HYPERBUS is not set +# CONFIG_OF is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y +CONFIG_PARPORT=m +CONFIG_PARPORT_PC=m +CONFIG_PARPORT_SERIAL=m +# CONFIG_PARPORT_PC_FIFO is not set +# CONFIG_PARPORT_PC_SUPERIO is not set +CONFIG_PARPORT_1284=y +CONFIG_PARPORT_NOT_PC=y +CONFIG_PNP=y +# CONFIG_PNP_DEBUG_MESSAGES is not set + +# +# Protocols +# +CONFIG_PNPACPI=y +CONFIG_BLK_DEV=y +CONFIG_BLK_DEV_NULL_BLK=m +# CONFIG_BLK_DEV_FD is not set +CONFIG_CDROM=m +# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set +CONFIG_ZRAM=m +CONFIG_ZRAM_DEF_COMP_LZORLE=y +# CONFIG_ZRAM_DEF_COMP_ZSTD is not set +# CONFIG_ZRAM_DEF_COMP_LZ4 is not set +# CONFIG_ZRAM_DEF_COMP_LZO is not set +# CONFIG_ZRAM_DEF_COMP_LZ4HC is not set +CONFIG_ZRAM_DEF_COMP="lzo-rle" +CONFIG_ZRAM_WRITEBACK=y +# CONFIG_ZRAM_MEMORY_TRACKING is not set +# CONFIG_ZRAM_MULTI_COMP is not set +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_LOOP_MIN_COUNT=0 +# CONFIG_BLK_DEV_DRBD is not set +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=m +CONFIG_BLK_DEV_RAM_COUNT=16 +CONFIG_BLK_DEV_RAM_SIZE=16384 +CONFIG_CDROM_PKTCDVD=m +CONFIG_CDROM_PKTCDVD_BUFFERS=8 +# CONFIG_CDROM_PKTCDVD_WCACHE is not set +# CONFIG_ATA_OVER_ETH is not set +CONFIG_XEN_BLKDEV_FRONTEND=m +CONFIG_VIRTIO_BLK=y +CONFIG_BLK_DEV_RBD=m +CONFIG_BLK_DEV_UBLK=m +CONFIG_BLKDEV_UBLK_LEGACY_OPCODES=y + +# +# NVME Support +# +CONFIG_NVME_CORE=m +CONFIG_BLK_DEV_NVME=m +CONFIG_NVME_MULTIPATH=y +# CONFIG_NVME_VERBOSE_ERRORS is not set +# CONFIG_NVME_HWMON is not set +CONFIG_NVME_FABRICS=m +CONFIG_NVME_RDMA=m +CONFIG_NVME_FC=m +CONFIG_NVME_TCP=m +# CONFIG_NVME_AUTH is not set +CONFIG_NVME_TARGET=m +# CONFIG_NVME_TARGET_PASSTHRU is not set +CONFIG_NVME_TARGET_LOOP=m +CONFIG_NVME_TARGET_RDMA=m +CONFIG_NVME_TARGET_FC=m +CONFIG_NVME_TARGET_FCLOOP=m +CONFIG_NVME_TARGET_TCP=m +# CONFIG_NVME_TARGET_AUTH is not set +# end of NVME Support + +# +# Misc devices +# +CONFIG_SENSORS_LIS3LV02D=m +# CONFIG_AD525X_DPOT is not set +# CONFIG_DUMMY_IRQ is not set +# CONFIG_IBM_ASM is not set +# CONFIG_PHANTOM is not set +CONFIG_TIFM_CORE=m +CONFIG_TIFM_7XX1=m +# CONFIG_ICS932S401 is not set +CONFIG_ENCLOSURE_SERVICES=m +CONFIG_SGI_XP=m +CONFIG_HP_ILO=m +CONFIG_SGI_GRU=m +# CONFIG_SGI_GRU_DEBUG is not set +CONFIG_APDS9802ALS=m +CONFIG_ISL29003=m +CONFIG_ISL29020=m +CONFIG_SENSORS_TSL2550=m +CONFIG_SENSORS_BH1770=m +CONFIG_SENSORS_APDS990X=m +# CONFIG_HMC6352 is not set +# CONFIG_DS1682 is not set +CONFIG_VMWARE_BALLOON=m +# CONFIG_LATTICE_ECP3_CONFIG is not set +# CONFIG_SRAM is not set +# CONFIG_DW_XDATA_PCIE is not set +# CONFIG_PCI_ENDPOINT_TEST is not set +# CONFIG_XILINX_SDFEC is not set +CONFIG_MISC_RTSX=m +# CONFIG_C2PORT is not set + +# +# EEPROM support +# +# CONFIG_EEPROM_AT24 is not set +# CONFIG_EEPROM_AT25 is not set +CONFIG_EEPROM_LEGACY=m +CONFIG_EEPROM_MAX6875=m +CONFIG_EEPROM_93CX6=m +# CONFIG_EEPROM_93XX46 is not set +# CONFIG_EEPROM_IDT_89HPESX is not set +# CONFIG_EEPROM_EE1004 is not set +# end of EEPROM support + +CONFIG_CB710_CORE=m +# CONFIG_CB710_DEBUG is not set +CONFIG_CB710_DEBUG_ASSUMPTIONS=y + +# +# Texas Instruments shared transport line discipline +# +# CONFIG_TI_ST is not set +# end of Texas Instruments shared transport line discipline + +CONFIG_SENSORS_LIS3_I2C=m + +# +# Altera FPGA firmware download module (requires I2C) +# +CONFIG_ALTERA_STAPL=m +CONFIG_INTEL_MEI=m +CONFIG_INTEL_MEI_ME=m +# CONFIG_INTEL_MEI_TXE is not set +# CONFIG_INTEL_MEI_GSC is not set +# CONFIG_INTEL_MEI_HDCP is not set +# CONFIG_INTEL_MEI_PXP is not set +# CONFIG_INTEL_MEI_GSC_PROXY is not set +CONFIG_VMWARE_VMCI=m +# CONFIG_GENWQE is not set +# CONFIG_ECHO is not set +# CONFIG_BCM_VK is not set +# CONFIG_MISC_ALCOR_PCI is not set +CONFIG_MISC_RTSX_PCI=m +CONFIG_MISC_RTSX_USB=m +CONFIG_UACCE=m +CONFIG_PVPANIC=y +# CONFIG_PVPANIC_MMIO is not set +# CONFIG_PVPANIC_PCI is not set +# CONFIG_GP_PCI1XXXX is not set +# end of Misc devices + +# +# SCSI device support +# +CONFIG_SCSI_MOD=y +CONFIG_RAID_ATTRS=m +CONFIG_SCSI_COMMON=y +CONFIG_SCSI=y +CONFIG_SCSI_DMA=y +CONFIG_SCSI_NETLINK=y +CONFIG_SCSI_PROC_FS=y + +# +# SCSI support type (disk, tape, CD-ROM) +# +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_BLK_DEV_SR=m +CONFIG_CHR_DEV_SG=m +CONFIG_BLK_DEV_BSG=y +CONFIG_CHR_DEV_SCH=m +CONFIG_SCSI_ENCLOSURE=m +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y + +# +# SCSI Transports +# +CONFIG_SCSI_SPI_ATTRS=m +CONFIG_SCSI_FC_ATTRS=m +CONFIG_SCSI_ISCSI_ATTRS=m +CONFIG_SCSI_SAS_ATTRS=m +CONFIG_SCSI_SAS_LIBSAS=m +CONFIG_SCSI_SAS_ATA=y +CONFIG_SCSI_SAS_HOST_SMP=y +CONFIG_SCSI_SRP_ATTRS=m +# end of SCSI Transports + +CONFIG_SCSI_LOWLEVEL=y +CONFIG_ISCSI_TCP=m +CONFIG_ISCSI_BOOT_SYSFS=m +# CONFIG_SCSI_CXGB3_ISCSI is not set +CONFIG_SCSI_CXGB4_ISCSI=m +CONFIG_SCSI_BNX2_ISCSI=m +CONFIG_SCSI_BNX2X_FCOE=m +CONFIG_BE2ISCSI=m +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +CONFIG_SCSI_HPSA=m +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +CONFIG_SCSI_AACRAID=m +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +CONFIG_MEGARAID_SAS=m +CONFIG_SCSI_MPT3SAS=m +CONFIG_SCSI_MPT2SAS_MAX_SGE=128 +CONFIG_SCSI_MPT3SAS_MAX_SGE=128 +CONFIG_SCSI_MPT2SAS=m +CONFIG_SCSI_MPI3MR=m +CONFIG_SCSI_SMARTPQI=m +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_BUSLOGIC is not set +# CONFIG_SCSI_MYRB is not set +# CONFIG_SCSI_MYRS is not set +CONFIG_VMWARE_PVSCSI=m +# CONFIG_XEN_SCSI_FRONTEND is not set +CONFIG_HYPERV_STORAGE=m +CONFIG_LIBFC=m +CONFIG_LIBFCOE=m +CONFIG_FCOE=m +CONFIG_FCOE_FNIC=m +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FDOMAIN_PCI is not set +CONFIG_SCSI_ISCI=m +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_PPA is not set +# CONFIG_SCSI_IMM is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_IPR is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +CONFIG_SCSI_QLA_FC=m +# CONFIG_TCM_QLA2XXX is not set +CONFIG_SCSI_QLA_ISCSI=m +CONFIG_QEDI=m +CONFIG_QEDF=m +CONFIG_SCSI_LPFC=m +# CONFIG_SCSI_LPFC_DEBUG_FS is not set +# CONFIG_SCSI_EFCT is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_WD719X is not set +CONFIG_SCSI_DEBUG=m +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set +# CONFIG_SCSI_BFA_FC is not set +CONFIG_SCSI_VIRTIO=m +CONFIG_SCSI_CHELSIO_FCOE=m +CONFIG_SCSI_DH=y +CONFIG_SCSI_DH_RDAC=y +CONFIG_SCSI_DH_HP_SW=y +CONFIG_SCSI_DH_EMC=y +CONFIG_SCSI_DH_ALUA=y +# end of SCSI device support + +CONFIG_ATA=m +CONFIG_SATA_HOST=y +CONFIG_PATA_TIMINGS=y +CONFIG_ATA_VERBOSE_ERROR=y +CONFIG_ATA_FORCE=y +CONFIG_ATA_ACPI=y +# CONFIG_SATA_ZPODD is not set +CONFIG_SATA_PMP=y + +# +# Controllers with non-SFF native interface +# +CONFIG_SATA_AHCI=m +CONFIG_SATA_MOBILE_LPM_POLICY=0 +CONFIG_SATA_AHCI_PLATFORM=m +# CONFIG_AHCI_DWC is not set +# CONFIG_SATA_INIC162X is not set +# CONFIG_SATA_ACARD_AHCI is not set +# CONFIG_SATA_SIL24 is not set +CONFIG_ATA_SFF=y + +# +# SFF controllers with custom DMA interface +# +# CONFIG_PDC_ADMA is not set +# CONFIG_SATA_QSTOR is not set +# CONFIG_SATA_SX4 is not set +CONFIG_ATA_BMDMA=y + +# +# SATA SFF controllers with BMDMA +# +CONFIG_ATA_PIIX=m +# CONFIG_SATA_DWC is not set +# CONFIG_SATA_MV is not set +# CONFIG_SATA_NV is not set +# CONFIG_SATA_PROMISE is not set +# CONFIG_SATA_SIL is not set +# CONFIG_SATA_SIS is not set +# CONFIG_SATA_SVW is not set +# CONFIG_SATA_ULI is not set +# CONFIG_SATA_VIA is not set +# CONFIG_SATA_VITESSE is not set +CONFIG_SATA_ZHAOXIN=m + +# +# PATA SFF controllers with BMDMA +# +# CONFIG_PATA_ALI is not set +# CONFIG_PATA_AMD is not set +# CONFIG_PATA_ARTOP is not set +# CONFIG_PATA_ATIIXP is not set +# CONFIG_PATA_ATP867X is not set +# CONFIG_PATA_CMD64X is not set +# CONFIG_PATA_CYPRESS is not set +# CONFIG_PATA_EFAR is not set +# CONFIG_PATA_HPT366 is not set +# CONFIG_PATA_HPT37X is not set +# CONFIG_PATA_HPT3X2N is not set +# CONFIG_PATA_HPT3X3 is not set +# CONFIG_PATA_IT8213 is not set +# CONFIG_PATA_IT821X is not set +# CONFIG_PATA_JMICRON is not set +# CONFIG_PATA_MARVELL is not set +# CONFIG_PATA_NETCELL is not set +# CONFIG_PATA_NINJA32 is not set +# CONFIG_PATA_NS87415 is not set +# CONFIG_PATA_OLDPIIX is not set +# CONFIG_PATA_OPTIDMA is not set +# CONFIG_PATA_PDC2027X is not set +# CONFIG_PATA_PDC_OLD is not set +# CONFIG_PATA_RADISYS is not set +# CONFIG_PATA_RDC is not set +# CONFIG_PATA_SCH is not set +# CONFIG_PATA_SERVERWORKS is not set +# CONFIG_PATA_SIL680 is not set +# CONFIG_PATA_SIS is not set +# CONFIG_PATA_TOSHIBA is not set +# CONFIG_PATA_TRIFLEX is not set +# CONFIG_PATA_VIA is not set +# CONFIG_PATA_WINBOND is not set + +# +# PIO-only SFF controllers +# +# CONFIG_PATA_CMD640_PCI is not set +# CONFIG_PATA_MPIIX is not set +# CONFIG_PATA_NS87410 is not set +# CONFIG_PATA_OPTI is not set +# CONFIG_PATA_RZ1000 is not set +# CONFIG_PATA_PARPORT is not set + +# +# Generic fallback / legacy drivers +# +# CONFIG_PATA_ACPI is not set +CONFIG_ATA_GENERIC=m +# CONFIG_PATA_LEGACY is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=y +CONFIG_MD_AUTODETECT=y +CONFIG_MD_BITMAP_FILE=y +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +# CONFIG_MD_MULTIPATH is not set +CONFIG_MD_FAULTY=m +CONFIG_MD_CLUSTER=m +# CONFIG_BCACHE is not set +CONFIG_BLK_DEV_DM_BUILTIN=y +CONFIG_BLK_DEV_DM=m +CONFIG_DM_DEBUG=y +CONFIG_DM_BUFIO=m +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set +CONFIG_DM_BIO_PRISON=m +CONFIG_DM_PERSISTENT_DATA=m +# CONFIG_DM_UNSTRIPED is not set +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_THIN_PROVISIONING=m +CONFIG_DM_CACHE=m +CONFIG_DM_CACHE_SMQ=m +CONFIG_DM_WRITECACHE=m +# CONFIG_DM_EBS is not set +CONFIG_DM_ERA=m +# CONFIG_DM_CLONE is not set +CONFIG_DM_MIRROR=m +CONFIG_DM_LOG_USERSPACE=m +CONFIG_DM_RAID=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_DM_MULTIPATH_QL=m +CONFIG_DM_MULTIPATH_ST=m +# CONFIG_DM_MULTIPATH_HST is not set +# CONFIG_DM_MULTIPATH_IOA is not set +CONFIG_DM_DELAY=m +# CONFIG_DM_DUST is not set +CONFIG_DM_UEVENT=y +CONFIG_DM_FLAKEY=m +CONFIG_DM_VERITY=m +# CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG is not set +# CONFIG_DM_VERITY_FEC is not set +CONFIG_DM_SWITCH=m +CONFIG_DM_LOG_WRITES=m +CONFIG_DM_INTEGRITY=m +# CONFIG_DM_ZONED is not set +CONFIG_DM_AUDIT=y +CONFIG_TARGET_CORE=m +CONFIG_TCM_IBLOCK=m +CONFIG_TCM_FILEIO=m +CONFIG_TCM_PSCSI=m +CONFIG_TCM_USER2=m +CONFIG_LOOPBACK_TARGET=m +# CONFIG_TCM_FC is not set +CONFIG_ISCSI_TARGET=m +CONFIG_ISCSI_TARGET_CXGB4=m +# CONFIG_SBP_TARGET is not set +# CONFIG_REMOTE_TARGET is not set +CONFIG_FUSION=y +CONFIG_FUSION_SPI=m +# CONFIG_FUSION_FC is not set +CONFIG_FUSION_SAS=m +CONFIG_FUSION_MAX_SGE=128 +# CONFIG_FUSION_CTL is not set +CONFIG_FUSION_LOGGING=y + +# +# IEEE 1394 (FireWire) support +# +CONFIG_FIREWIRE=m +CONFIG_FIREWIRE_OHCI=m +CONFIG_FIREWIRE_SBP2=m +CONFIG_FIREWIRE_NET=m +# CONFIG_FIREWIRE_NOSY is not set +# end of IEEE 1394 (FireWire) support + +CONFIG_MACINTOSH_DRIVERS=y +CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_NETDEVICES=y +CONFIG_MII=m +CONFIG_NET_CORE=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_WIREGUARD=m +# CONFIG_WIREGUARD_DEBUG is not set +# CONFIG_EQUALIZER is not set +CONFIG_NET_FC=y +CONFIG_IFB=m +CONFIG_NET_TEAM=m +CONFIG_NET_TEAM_MODE_BROADCAST=m +CONFIG_NET_TEAM_MODE_ROUNDROBIN=m +CONFIG_NET_TEAM_MODE_RANDOM=m +CONFIG_NET_TEAM_MODE_ACTIVEBACKUP=m +CONFIG_NET_TEAM_MODE_LOADBALANCE=m +CONFIG_MACVLAN=m +CONFIG_MACVTAP=m +CONFIG_IPVLAN_L3S=y +CONFIG_IPVLAN=m +CONFIG_IPVTAP=m +CONFIG_VXLAN=m +CONFIG_GENEVE=m +# CONFIG_BAREUDP is not set +# CONFIG_GTP is not set +# CONFIG_AMT is not set +CONFIG_MACSEC=m +CONFIG_NETCONSOLE=m +CONFIG_NETCONSOLE_DYNAMIC=y +# CONFIG_NETCONSOLE_EXTENDED_LOG is not set +CONFIG_NETPOLL=y +CONFIG_NET_POLL_CONTROLLER=y +CONFIG_TUN=m +CONFIG_TAP=m +# CONFIG_TUN_VNET_CROSS_LE is not set +CONFIG_VETH=m +CONFIG_VIRTIO_NET=m +CONFIG_NLMON=m +CONFIG_NET_VRF=m +CONFIG_VSOCKMON=m +# CONFIG_ARCNET is not set +# CONFIG_ATM_DRIVERS is not set +CONFIG_ETHERNET=y +CONFIG_MDIO=m +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set +# CONFIG_ALTERA_TSE is not set +CONFIG_NET_VENDOR_AMAZON=y +CONFIG_ENA_ETHERNET=m +# CONFIG_NET_VENDOR_AMD is not set +CONFIG_NET_VENDOR_AQUANTIA=y +CONFIG_AQTION=m +# CONFIG_NET_VENDOR_ARC is not set +CONFIG_NET_VENDOR_ASIX=y +# CONFIG_SPI_AX88796C is not set +CONFIG_NET_VENDOR_ATHEROS=y +CONFIG_ATL2=m +CONFIG_ATL1=m +CONFIG_ATL1E=m +CONFIG_ATL1C=m +CONFIG_ALX=m +# CONFIG_CX_ECAT is not set +CONFIG_NET_VENDOR_BROADCOM=y +# CONFIG_B44 is not set +# CONFIG_BCMGENET is not set +CONFIG_BNX2=m +CONFIG_CNIC=m +CONFIG_TIGON3=m +CONFIG_TIGON3_HWMON=y +CONFIG_BNX2X=m +CONFIG_BNX2X_SRIOV=y +# CONFIG_SYSTEMPORT is not set +CONFIG_BNXT=m +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +CONFIG_BNXT_DCB=y +CONFIG_BNXT_HWMON=y +# CONFIG_NET_VENDOR_CADENCE is not set +CONFIG_NET_VENDOR_CAVIUM=y +# CONFIG_THUNDER_NIC_PF is not set +# CONFIG_THUNDER_NIC_VF is not set +# CONFIG_THUNDER_NIC_BGX is not set +# CONFIG_THUNDER_NIC_RGX is not set +CONFIG_CAVIUM_PTP=y +CONFIG_LIQUIDIO_CORE=m +CONFIG_LIQUIDIO=m +CONFIG_LIQUIDIO_VF=m +CONFIG_NET_VENDOR_CHELSIO=y +# CONFIG_CHELSIO_T1 is not set +# CONFIG_CHELSIO_T3 is not set +CONFIG_CHELSIO_T4=m +# CONFIG_CHELSIO_T4_DCB is not set +CONFIG_CHELSIO_T4VF=m +CONFIG_CHELSIO_LIB=m +CONFIG_CHELSIO_INLINE_CRYPTO=y +CONFIG_CHELSIO_IPSEC_INLINE=m +# CONFIG_CHELSIO_TLS_DEVICE is not set +CONFIG_NET_VENDOR_CISCO=y +CONFIG_ENIC=m +# CONFIG_NET_VENDOR_CORTINA is not set +CONFIG_NET_VENDOR_DAVICOM=y +# CONFIG_DM9051 is not set +CONFIG_DNET=m +CONFIG_NET_VENDOR_DEC=y +# CONFIG_NET_TULIP is not set +# CONFIG_NET_VENDOR_DLINK is not set +CONFIG_NET_VENDOR_EMULEX=y +CONFIG_BE2NET=m +CONFIG_BE2NET_HWMON=y +# CONFIG_BE2NET_BE2 is not set +# CONFIG_BE2NET_BE3 is not set +CONFIG_BE2NET_LANCER=y +CONFIG_BE2NET_SKYHAWK=y +CONFIG_NET_VENDOR_ENGLEDER=y +# CONFIG_TSNEP is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +CONFIG_NET_VENDOR_FUNGIBLE=y +# CONFIG_FUN_ETH is not set +CONFIG_NET_VENDOR_GOOGLE=y +CONFIG_GVE=m +CONFIG_NET_VENDOR_HUAWEI=y +CONFIG_HINIC=m +# CONFIG_NET_VENDOR_I825XX is not set +CONFIG_NET_VENDOR_INTEL=y +# CONFIG_E100 is not set +CONFIG_E1000=m +CONFIG_E1000E=m +CONFIG_E1000E_HWTS=y +CONFIG_IGB=m +CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y +CONFIG_IGBVF=m +CONFIG_IXGBE=m +CONFIG_IXGBE_HWMON=y +CONFIG_IXGBE_DCA=y +CONFIG_IXGBE_DCB=y +CONFIG_IXGBE_IPSEC=y +CONFIG_IXGBEVF=m +CONFIG_IXGBEVF_IPSEC=y +CONFIG_I40E=m +CONFIG_I40E_DCB=y +CONFIG_IAVF=m +CONFIG_I40EVF=m +CONFIG_ICE=m +CONFIG_ICE_SWITCHDEV=y +CONFIG_ICE_HWTS=y +CONFIG_FM10K=m +CONFIG_IGC=m +# CONFIG_JME is not set +CONFIG_NET_VENDOR_ADI=y +# CONFIG_ADIN1110 is not set +CONFIG_NET_VENDOR_LITEX=y +# CONFIG_NET_VENDOR_MARVELL is not set +CONFIG_NET_VENDOR_MELLANOX=y +CONFIG_MLX4_EN=m +CONFIG_MLX4_EN_DCB=y +CONFIG_MLX4_CORE=m +CONFIG_MLX4_DEBUG=y +# CONFIG_MLX4_CORE_GEN2 is not set +CONFIG_MLX5_CORE=m +CONFIG_MLX5_FPGA=y +CONFIG_MLX5_CORE_EN=y +CONFIG_MLX5_EN_ARFS=y +CONFIG_MLX5_EN_RXNFC=y +CONFIG_MLX5_MPFS=y +CONFIG_MLX5_ESWITCH=y +CONFIG_MLX5_BRIDGE=y +CONFIG_MLX5_CLS_ACT=y +CONFIG_MLX5_TC_CT=y +CONFIG_MLX5_TC_SAMPLE=y +CONFIG_MLX5_CORE_EN_DCB=y +# CONFIG_MLX5_CORE_IPOIB is not set +# CONFIG_MLX5_MACSEC is not set +# CONFIG_MLX5_EN_IPSEC is not set +# CONFIG_MLX5_EN_TLS is not set +CONFIG_MLX5_SW_STEERING=y +# CONFIG_MLX5_SF is not set +CONFIG_MLXSW_CORE=m +CONFIG_MLXSW_CORE_HWMON=y +CONFIG_MLXSW_CORE_THERMAL=y +CONFIG_MLXSW_PCI=m +CONFIG_MLXSW_I2C=m +CONFIG_MLXSW_SPECTRUM=m +CONFIG_MLXSW_SPECTRUM_DCB=y +CONFIG_MLXSW_MINIMAL=m +CONFIG_MLXFW=m +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MICROCHIP is not set +# CONFIG_NET_VENDOR_MICROSEMI is not set +CONFIG_NET_VENDOR_MICROSOFT=y +# CONFIG_MICROSOFT_MANA is not set +CONFIG_NET_VENDOR_MYRI=y +CONFIG_MYRI10GE=m +CONFIG_MYRI10GE_DCA=y +# CONFIG_FEALNX is not set +# CONFIG_NET_VENDOR_NI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETERION is not set +CONFIG_NET_VENDOR_NETRONOME=y +CONFIG_NFP=m +CONFIG_NFP_APP_FLOWER=y +CONFIG_NFP_APP_ABM_NIC=y +CONFIG_NFP_NET_IPSEC=y +# CONFIG_NFP_DEBUG is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +CONFIG_NET_VENDOR_OKI=y +CONFIG_ETHOC=m +# CONFIG_NET_VENDOR_PACKET_ENGINES is not set +CONFIG_NET_VENDOR_PENSANDO=y +# CONFIG_IONIC is not set +CONFIG_NET_VENDOR_QLOGIC=y +CONFIG_QLA3XXX=m +# CONFIG_QLCNIC is not set +CONFIG_NETXEN_NIC=m +CONFIG_QED=m +CONFIG_QED_LL2=y +CONFIG_QED_SRIOV=y +CONFIG_QEDE=m +CONFIG_QED_RDMA=y +CONFIG_QED_ISCSI=y +CONFIG_QED_FCOE=y +CONFIG_QED_OOO=y +CONFIG_NET_VENDOR_BROCADE=y +# CONFIG_BNA is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set +# CONFIG_NET_VENDOR_RDC is not set +CONFIG_NET_VENDOR_REALTEK=y +# CONFIG_ATP is not set +CONFIG_8139CP=m +CONFIG_8139TOO=m +# CONFIG_8139TOO_PIO is not set +# CONFIG_8139TOO_TUNE_TWISTER is not set +CONFIG_8139TOO_8129=y +# CONFIG_8139_OLD_RX_RESET is not set +CONFIG_R8169=m +# CONFIG_NET_VENDOR_RENESAS is not set +CONFIG_NET_VENDOR_ROCKER=y +CONFIG_ROCKER=m +# CONFIG_NET_VENDOR_SAMSUNG is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +CONFIG_NET_VENDOR_SOLARFLARE=y +CONFIG_SFC=m +CONFIG_SFC_MTD=y +CONFIG_SFC_MCDI_MON=y +CONFIG_SFC_SRIOV=y +CONFIG_SFC_MCDI_LOGGING=y +# CONFIG_SFC_FALCON is not set +# CONFIG_SFC_SIENA is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_SOCIONEXT is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +CONFIG_NET_VENDOR_VERTEXCOM=y +# CONFIG_MSE102X is not set +# CONFIG_NET_VENDOR_VIA is not set +CONFIG_NET_VENDOR_WANGXUN=y +CONFIG_LIBWX=m +CONFIG_NGBE=m +CONFIG_TXGBE=m +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_NET_VENDOR_XILINX=y +# CONFIG_XILINX_EMACLITE is not set +# CONFIG_XILINX_AXI_EMAC is not set +# CONFIG_XILINX_LL_TEMAC is not set +# CONFIG_FDDI is not set +# CONFIG_HIPPI is not set +# CONFIG_NET_SB1000 is not set +CONFIG_PHYLINK=m +CONFIG_PHYLIB=y +CONFIG_SWPHY=y +CONFIG_LED_TRIGGER_PHY=y +CONFIG_FIXED_PHY=y +CONFIG_SFP=m + +# +# MII PHY device drivers +# +CONFIG_AMD_PHY=m +# CONFIG_ADIN_PHY is not set +# CONFIG_ADIN1100_PHY is not set +CONFIG_AQUANTIA_PHY=m +CONFIG_AX88796B_PHY=m +CONFIG_BROADCOM_PHY=m +# CONFIG_BCM54140_PHY is not set +CONFIG_BCM7XXX_PHY=m +# CONFIG_BCM84881_PHY is not set +CONFIG_BCM87XX_PHY=m +CONFIG_BCM_NET_PHYLIB=m +CONFIG_BCM_NET_PHYPTP=m +CONFIG_CICADA_PHY=m +CONFIG_CORTINA_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_LXT_PHY=m +CONFIG_INTEL_XWAY_PHY=m +CONFIG_LSI_ET1011C_PHY=m +CONFIG_MARVELL_PHY=m +CONFIG_MARVELL_10G_PHY=m +# CONFIG_MARVELL_88Q2XXX_PHY is not set +# CONFIG_MARVELL_88X2222_PHY is not set +# CONFIG_MAXLINEAR_GPHY is not set +# CONFIG_MEDIATEK_GE_PHY is not set +CONFIG_MICREL_PHY=m +# CONFIG_MICROCHIP_T1S_PHY is not set +CONFIG_MICROCHIP_PHY=m +CONFIG_MICROCHIP_T1_PHY=m +CONFIG_MICROSEMI_PHY=m +# CONFIG_MOTORCOMM_PHY is not set +CONFIG_NATIONAL_PHY=m +# CONFIG_NXP_CBTX_PHY is not set +# CONFIG_NXP_C45_TJA11XX_PHY is not set +# CONFIG_NXP_TJA11XX_PHY is not set +# CONFIG_NCN26000_PHY is not set +CONFIG_QSEMI_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_RENESAS_PHY=m +CONFIG_ROCKCHIP_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_STE10XP=m +CONFIG_TERANETICS_PHY=m +CONFIG_DP83822_PHY=m +CONFIG_DP83TC811_PHY=m +CONFIG_DP83848_PHY=m +CONFIG_DP83867_PHY=m +# CONFIG_DP83869_PHY is not set +# CONFIG_DP83TD510_PHY is not set +CONFIG_VITESSE_PHY=m +CONFIG_XILINX_GMII2RGMII=m +# CONFIG_MICREL_KS8995MA is not set +# CONFIG_PSE_CONTROLLER is not set +CONFIG_MDIO_DEVICE=y +CONFIG_MDIO_BUS=y +CONFIG_FWNODE_MDIO=y +CONFIG_ACPI_MDIO=y +CONFIG_MDIO_DEVRES=y +CONFIG_MDIO_BITBANG=m +CONFIG_MDIO_BCM_UNIMAC=m +CONFIG_MDIO_CAVIUM=m +# CONFIG_MDIO_GPIO is not set +CONFIG_MDIO_I2C=m +# CONFIG_MDIO_MVUSB is not set +CONFIG_MDIO_THUNDER=m + +# +# MDIO Multiplexers +# + +# +# PCS device drivers +# +CONFIG_PCS_XPCS=m +# end of PCS device drivers + +# CONFIG_PLIP is not set +CONFIG_PPP=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_FILTER=y +CONFIG_PPP_MPPE=m +CONFIG_PPP_MULTILINK=y +CONFIG_PPPOATM=m +CONFIG_PPPOE=m +# CONFIG_PPPOE_HASH_BITS_1 is not set +# CONFIG_PPPOE_HASH_BITS_2 is not set +CONFIG_PPPOE_HASH_BITS_4=y +# CONFIG_PPPOE_HASH_BITS_8 is not set +CONFIG_PPPOE_HASH_BITS=4 +CONFIG_PPTP=m +CONFIG_PPPOL2TP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_SLIP=m +CONFIG_SLHC=m +CONFIG_SLIP_COMPRESSED=y +CONFIG_SLIP_SMART=y +# CONFIG_SLIP_MODE_SLIP6 is not set +CONFIG_USB_NET_DRIVERS=y +CONFIG_USB_CATC=m +CONFIG_USB_KAWETH=m +CONFIG_USB_PEGASUS=m +CONFIG_USB_RTL8150=m +CONFIG_USB_RTL8152=m +CONFIG_USB_LAN78XX=m +CONFIG_USB_USBNET=m +CONFIG_USB_NET_AX8817X=m +CONFIG_USB_NET_AX88179_178A=m +CONFIG_USB_NET_CDCETHER=m +CONFIG_USB_NET_CDC_EEM=m +CONFIG_USB_NET_CDC_NCM=m +CONFIG_USB_NET_HUAWEI_CDC_NCM=m +CONFIG_USB_NET_CDC_MBIM=m +CONFIG_USB_NET_DM9601=m +# CONFIG_USB_NET_SR9700 is not set +# CONFIG_USB_NET_SR9800 is not set +CONFIG_USB_NET_SMSC75XX=m +CONFIG_USB_NET_SMSC95XX=m +CONFIG_USB_NET_GL620A=m +CONFIG_USB_NET_NET1080=m +CONFIG_USB_NET_PLUSB=m +CONFIG_USB_NET_MCS7830=m +CONFIG_USB_NET_RNDIS_HOST=m +CONFIG_USB_NET_CDC_SUBSET_ENABLE=m +CONFIG_USB_NET_CDC_SUBSET=m +CONFIG_USB_ALI_M5632=y +CONFIG_USB_AN2720=y +CONFIG_USB_BELKIN=y +CONFIG_USB_ARMLINUX=y +CONFIG_USB_EPSON2888=y +CONFIG_USB_KC2190=y +CONFIG_USB_NET_ZAURUS=m +CONFIG_USB_NET_CX82310_ETH=m +CONFIG_USB_NET_KALMIA=m +CONFIG_USB_NET_QMI_WWAN=m +CONFIG_USB_HSO=m +CONFIG_USB_NET_INT51X1=m +CONFIG_USB_IPHETH=m +CONFIG_USB_SIERRA_NET=m +CONFIG_USB_VL600=m +CONFIG_USB_NET_CH9200=m +# CONFIG_USB_NET_AQC111 is not set +CONFIG_USB_RTL8153_ECM=m +CONFIG_WLAN=y +# CONFIG_WLAN_VENDOR_ADMTEK is not set +CONFIG_ATH_COMMON=m +CONFIG_WLAN_VENDOR_ATH=y +# CONFIG_ATH_DEBUG is not set +# CONFIG_ATH5K is not set +# CONFIG_ATH5K_PCI is not set +CONFIG_ATH9K_HW=m +CONFIG_ATH9K_COMMON=m +CONFIG_ATH9K_COMMON_DEBUG=y +CONFIG_ATH9K_BTCOEX_SUPPORT=y +CONFIG_ATH9K=m +CONFIG_ATH9K_PCI=y +CONFIG_ATH9K_AHB=y +CONFIG_ATH9K_DEBUGFS=y +# CONFIG_ATH9K_STATION_STATISTICS is not set +# CONFIG_ATH9K_DYNACK is not set +CONFIG_ATH9K_WOW=y +CONFIG_ATH9K_RFKILL=y +# CONFIG_ATH9K_CHANNEL_CONTEXT is not set +CONFIG_ATH9K_PCOEM=y +# CONFIG_ATH9K_PCI_NO_EEPROM is not set +CONFIG_ATH9K_HTC=m +# CONFIG_ATH9K_HTC_DEBUGFS is not set +# CONFIG_ATH9K_HWRNG is not set +# CONFIG_ATH9K_COMMON_SPECTRAL is not set +# CONFIG_CARL9170 is not set +# CONFIG_ATH6KL is not set +# CONFIG_AR5523 is not set +# CONFIG_WIL6210 is not set +CONFIG_ATH10K=m +CONFIG_ATH10K_CE=y +CONFIG_ATH10K_PCI=m +# CONFIG_ATH10K_SDIO is not set +# CONFIG_ATH10K_USB is not set +# CONFIG_ATH10K_DEBUG is not set +# CONFIG_ATH10K_DEBUGFS is not set +# CONFIG_ATH10K_TRACING is not set +# CONFIG_WCN36XX is not set +# CONFIG_ATH11K is not set +# CONFIG_ATH12K is not set +# CONFIG_WLAN_VENDOR_ATMEL is not set +CONFIG_WLAN_VENDOR_BROADCOM=y +# CONFIG_B43 is not set +# CONFIG_B43LEGACY is not set +CONFIG_BRCMUTIL=m +CONFIG_BRCMSMAC=m +CONFIG_BRCMSMAC_LEDS=y +CONFIG_BRCMFMAC=m +CONFIG_BRCMFMAC_PROTO_BCDC=y +CONFIG_BRCMFMAC_PROTO_MSGBUF=y +CONFIG_BRCMFMAC_SDIO=y +CONFIG_BRCMFMAC_USB=y +CONFIG_BRCMFMAC_PCIE=y +# CONFIG_BRCM_TRACING is not set +# CONFIG_BRCMDBG is not set +# CONFIG_WLAN_VENDOR_CISCO is not set +CONFIG_WLAN_VENDOR_INTEL=y +# CONFIG_IPW2100 is not set +# CONFIG_IPW2200 is not set +# CONFIG_IWL4965 is not set +# CONFIG_IWL3945 is not set +CONFIG_IWLWIFI=m +CONFIG_IWLWIFI_LEDS=y +CONFIG_IWLDVM=m +CONFIG_IWLMVM=m +CONFIG_IWLWIFI_OPMODE_MODULAR=y + +# +# Debugging Options +# +# CONFIG_IWLWIFI_DEBUG is not set +CONFIG_IWLWIFI_DEBUGFS=y +# CONFIG_IWLWIFI_DEVICE_TRACING is not set +# end of Debugging Options + +# CONFIG_WLAN_VENDOR_INTERSIL is not set +CONFIG_WLAN_VENDOR_MARVELL=y +# CONFIG_LIBERTAS is not set +# CONFIG_LIBERTAS_THINFIRM is not set +CONFIG_MWIFIEX=m +CONFIG_MWIFIEX_SDIO=m +CONFIG_MWIFIEX_PCIE=m +CONFIG_MWIFIEX_USB=m +# CONFIG_MWL8K is not set +CONFIG_WLAN_VENDOR_MEDIATEK=y +CONFIG_MT7601U=m +CONFIG_MT76_CORE=m +CONFIG_MT76_LEDS=y +CONFIG_MT76_USB=m +CONFIG_MT76x02_LIB=m +CONFIG_MT76x02_USB=m +CONFIG_MT76x0_COMMON=m +CONFIG_MT76x0U=m +# CONFIG_MT76x0E is not set +CONFIG_MT76x2_COMMON=m +# CONFIG_MT76x2E is not set +CONFIG_MT76x2U=m +# CONFIG_MT7603E is not set +# CONFIG_MT7615E is not set +# CONFIG_MT7663U is not set +# CONFIG_MT7663S is not set +# CONFIG_MT7915E is not set +# CONFIG_MT7921E is not set +# CONFIG_MT7921S is not set +# CONFIG_MT7921U is not set +# CONFIG_MT7996E is not set +CONFIG_WLAN_VENDOR_MICROCHIP=y +# CONFIG_WILC1000_SDIO is not set +# CONFIG_WILC1000_SPI is not set +CONFIG_WLAN_VENDOR_PURELIFI=y +# CONFIG_PLFXLC is not set +CONFIG_WLAN_VENDOR_RALINK=y +CONFIG_RT2X00=m +# CONFIG_RT2400PCI is not set +# CONFIG_RT2500PCI is not set +# CONFIG_RT61PCI is not set +CONFIG_RT2800PCI=m +CONFIG_RT2800PCI_RT33XX=y +CONFIG_RT2800PCI_RT35XX=y +CONFIG_RT2800PCI_RT53XX=y +CONFIG_RT2800PCI_RT3290=y +# CONFIG_RT2500USB is not set +# CONFIG_RT73USB is not set +CONFIG_RT2800USB=m +CONFIG_RT2800USB_RT33XX=y +CONFIG_RT2800USB_RT35XX=y +CONFIG_RT2800USB_RT3573=y +CONFIG_RT2800USB_RT53XX=y +CONFIG_RT2800USB_RT55XX=y +CONFIG_RT2800USB_UNKNOWN=y +CONFIG_RT2800_LIB=m +CONFIG_RT2800_LIB_MMIO=m +CONFIG_RT2X00_LIB_MMIO=m +CONFIG_RT2X00_LIB_PCI=m +CONFIG_RT2X00_LIB_USB=m +CONFIG_RT2X00_LIB=m +CONFIG_RT2X00_LIB_FIRMWARE=y +CONFIG_RT2X00_LIB_CRYPTO=y +CONFIG_RT2X00_LIB_LEDS=y +CONFIG_RT2X00_LIB_DEBUGFS=y +# CONFIG_RT2X00_DEBUG is not set +CONFIG_WLAN_VENDOR_REALTEK=y +# CONFIG_RTL8180 is not set +# CONFIG_RTL8187 is not set +CONFIG_RTL_CARDS=m +CONFIG_RTL8192CE=m +CONFIG_RTL8192SE=m +CONFIG_RTL8192DE=m +CONFIG_RTL8723AE=m +CONFIG_RTL8723BE=m +CONFIG_RTL8188EE=m +CONFIG_RTL8192EE=m +CONFIG_RTL8821AE=m +CONFIG_RTL8192CU=m +CONFIG_RTLWIFI=m +CONFIG_RTLWIFI_PCI=m +CONFIG_RTLWIFI_USB=m +# CONFIG_RTLWIFI_DEBUG is not set +CONFIG_RTL8192C_COMMON=m +CONFIG_RTL8723_COMMON=m +CONFIG_RTLBTCOEXIST=m +CONFIG_RTL8XXXU=m +# CONFIG_RTL8XXXU_UNTESTED is not set +CONFIG_RTW88=m +CONFIG_RTW88_CORE=m +CONFIG_RTW88_PCI=m +CONFIG_RTW88_8822B=m +CONFIG_RTW88_8822C=m +CONFIG_RTW88_8822BE=m +# CONFIG_RTW88_8822BS is not set +# CONFIG_RTW88_8822BU is not set +CONFIG_RTW88_8822CE=m +# CONFIG_RTW88_8822CS is not set +# CONFIG_RTW88_8822CU is not set +# CONFIG_RTW88_8723DE is not set +# CONFIG_RTW88_8723DS is not set +# CONFIG_RTW88_8723DU is not set +# CONFIG_RTW88_8821CE is not set +# CONFIG_RTW88_8821CS is not set +# CONFIG_RTW88_8821CU is not set +# CONFIG_RTW88_DEBUG is not set +# CONFIG_RTW88_DEBUGFS is not set +# CONFIG_RTW89 is not set +# CONFIG_WLAN_VENDOR_RSI is not set +CONFIG_WLAN_VENDOR_SILABS=y +# CONFIG_WFX is not set +# CONFIG_WLAN_VENDOR_ST is not set +# CONFIG_WLAN_VENDOR_TI is not set +# CONFIG_WLAN_VENDOR_ZYDAS is not set +CONFIG_WLAN_VENDOR_QUANTENNA=y +# CONFIG_QTNFMAC_PCIE is not set +# CONFIG_USB_NET_RNDIS_WLAN is not set +CONFIG_MAC80211_HWSIM=m +# CONFIG_VIRT_WIFI is not set +CONFIG_WAN=y +CONFIG_HDLC=m +CONFIG_HDLC_RAW=m +# CONFIG_HDLC_RAW_ETH is not set +CONFIG_HDLC_CISCO=m +CONFIG_HDLC_FR=m +CONFIG_HDLC_PPP=m + +# +# X.25/LAPB support is disabled +# +# CONFIG_PCI200SYN is not set +# CONFIG_WANXL is not set +# CONFIG_PC300TOO is not set +# CONFIG_FARSYNC is not set +CONFIG_IEEE802154_DRIVERS=m +CONFIG_IEEE802154_FAKELB=m +# CONFIG_IEEE802154_AT86RF230 is not set +# CONFIG_IEEE802154_MRF24J40 is not set +# CONFIG_IEEE802154_CC2520 is not set +# CONFIG_IEEE802154_ATUSB is not set +# CONFIG_IEEE802154_ADF7242 is not set +# CONFIG_IEEE802154_CA8210 is not set +# CONFIG_IEEE802154_MCR20A is not set +# CONFIG_IEEE802154_HWSIM is not set + +# +# Wireless WAN +# +# CONFIG_WWAN is not set +# end of Wireless WAN + +CONFIG_XEN_NETDEV_FRONTEND=m +CONFIG_VMXNET3=m +CONFIG_FUJITSU_ES=m +CONFIG_HYPERV_NET=m +CONFIG_NETDEVSIM=m +CONFIG_NET_FAILOVER=m +CONFIG_ISDN=y +CONFIG_ISDN_CAPI=y +CONFIG_CAPI_TRACE=y +CONFIG_ISDN_CAPI_MIDDLEWARE=y +CONFIG_MISDN=m +CONFIG_MISDN_DSP=m +CONFIG_MISDN_L1OIP=m + +# +# mISDN hardware drivers +# +CONFIG_MISDN_HFCPCI=m +CONFIG_MISDN_HFCMULTI=m +CONFIG_MISDN_HFCUSB=m +CONFIG_MISDN_AVMFRITZ=m +CONFIG_MISDN_SPEEDFAX=m +CONFIG_MISDN_INFINEON=m +CONFIG_MISDN_W6692=m +CONFIG_MISDN_NETJET=m +CONFIG_MISDN_HDLC=m +CONFIG_MISDN_IPAC=m +CONFIG_MISDN_ISAR=m + +# +# Input device support +# +CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y +CONFIG_INPUT_FF_MEMLESS=m +CONFIG_INPUT_SPARSEKMAP=m +# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_VIVALDIFMAP=y + +# +# Userland interfaces +# +CONFIG_INPUT_MOUSEDEV=m +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 +CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 +CONFIG_INPUT_JOYDEV=m +CONFIG_INPUT_EVDEV=y +# CONFIG_INPUT_EVBUG is not set + +# +# Input Device Drivers +# +CONFIG_INPUT_KEYBOARD=y +# CONFIG_KEYBOARD_ADC is not set +# CONFIG_KEYBOARD_ADP5588 is not set +# CONFIG_KEYBOARD_ADP5589 is not set +# CONFIG_KEYBOARD_APPLESPI is not set +CONFIG_KEYBOARD_ATKBD=y +# CONFIG_KEYBOARD_QT1050 is not set +# CONFIG_KEYBOARD_QT1070 is not set +# CONFIG_KEYBOARD_QT2160 is not set +# CONFIG_KEYBOARD_DLINK_DIR685 is not set +# CONFIG_KEYBOARD_LKKBD is not set +# CONFIG_KEYBOARD_GPIO is not set +# CONFIG_KEYBOARD_GPIO_POLLED is not set +# CONFIG_KEYBOARD_TCA6416 is not set +# CONFIG_KEYBOARD_TCA8418 is not set +# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set +# CONFIG_KEYBOARD_LM8333 is not set +# CONFIG_KEYBOARD_MAX7359 is not set +# CONFIG_KEYBOARD_MCS is not set +# CONFIG_KEYBOARD_MPR121 is not set +# CONFIG_KEYBOARD_NEWTON is not set +# CONFIG_KEYBOARD_OPENCORES is not set +# CONFIG_KEYBOARD_SAMSUNG is not set +# CONFIG_KEYBOARD_STOWAWAY is not set +# CONFIG_KEYBOARD_SUNKBD is not set +# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set +# CONFIG_KEYBOARD_XTKBD is not set +# CONFIG_KEYBOARD_CYPRESS_SF is not set +CONFIG_INPUT_MOUSE=y +CONFIG_MOUSE_PS2=m +CONFIG_MOUSE_PS2_ALPS=y +CONFIG_MOUSE_PS2_BYD=y +CONFIG_MOUSE_PS2_LOGIPS2PP=y +CONFIG_MOUSE_PS2_SYNAPTICS=y +CONFIG_MOUSE_PS2_SYNAPTICS_SMBUS=y +CONFIG_MOUSE_PS2_CYPRESS=y +CONFIG_MOUSE_PS2_LIFEBOOK=y +CONFIG_MOUSE_PS2_TRACKPOINT=y +CONFIG_MOUSE_PS2_ELANTECH=y +CONFIG_MOUSE_PS2_ELANTECH_SMBUS=y +CONFIG_MOUSE_PS2_SENTELIC=y +# CONFIG_MOUSE_PS2_TOUCHKIT is not set +CONFIG_MOUSE_PS2_FOCALTECH=y +CONFIG_MOUSE_PS2_VMMOUSE=y +CONFIG_MOUSE_PS2_SMBUS=y +CONFIG_MOUSE_SERIAL=m +CONFIG_MOUSE_APPLETOUCH=m +CONFIG_MOUSE_BCM5974=m +CONFIG_MOUSE_CYAPA=m +CONFIG_MOUSE_ELAN_I2C=m +CONFIG_MOUSE_ELAN_I2C_I2C=y +# CONFIG_MOUSE_ELAN_I2C_SMBUS is not set +CONFIG_MOUSE_VSXXXAA=m +# CONFIG_MOUSE_GPIO is not set +CONFIG_MOUSE_SYNAPTICS_I2C=m +CONFIG_MOUSE_SYNAPTICS_USB=m +# CONFIG_INPUT_JOYSTICK is not set +CONFIG_INPUT_TABLET=y +CONFIG_TABLET_USB_ACECAD=m +CONFIG_TABLET_USB_AIPTEK=m +# CONFIG_TABLET_USB_HANWANG is not set +CONFIG_TABLET_USB_KBTAB=m +# CONFIG_TABLET_USB_PEGASUS is not set +CONFIG_TABLET_SERIAL_WACOM4=m +CONFIG_INPUT_TOUCHSCREEN=y +# CONFIG_TOUCHSCREEN_ADS7846 is not set +# CONFIG_TOUCHSCREEN_AD7877 is not set +# CONFIG_TOUCHSCREEN_AD7879 is not set +# CONFIG_TOUCHSCREEN_ADC is not set +# CONFIG_TOUCHSCREEN_ATMEL_MXT is not set +# CONFIG_TOUCHSCREEN_AUO_PIXCIR is not set +# CONFIG_TOUCHSCREEN_BU21013 is not set +# CONFIG_TOUCHSCREEN_BU21029 is not set +# CONFIG_TOUCHSCREEN_CHIPONE_ICN8505 is not set +# CONFIG_TOUCHSCREEN_CY8CTMA140 is not set +# CONFIG_TOUCHSCREEN_CY8CTMG110 is not set +# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP4_CORE is not set +# CONFIG_TOUCHSCREEN_CYTTSP5 is not set +# CONFIG_TOUCHSCREEN_DYNAPRO is not set +# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set +# CONFIG_TOUCHSCREEN_EETI is not set +# CONFIG_TOUCHSCREEN_EGALAX_SERIAL is not set +# CONFIG_TOUCHSCREEN_EXC3000 is not set +# CONFIG_TOUCHSCREEN_FUJITSU is not set +# CONFIG_TOUCHSCREEN_GOODIX is not set +# CONFIG_TOUCHSCREEN_HIDEEP is not set +# CONFIG_TOUCHSCREEN_HYCON_HY46XX is not set +# CONFIG_TOUCHSCREEN_HYNITRON_CSTXXX is not set +# CONFIG_TOUCHSCREEN_ILI210X is not set +# CONFIG_TOUCHSCREEN_ILITEK is not set +# CONFIG_TOUCHSCREEN_S6SY761 is not set +# CONFIG_TOUCHSCREEN_GUNZE is not set +# CONFIG_TOUCHSCREEN_EKTF2127 is not set +# CONFIG_TOUCHSCREEN_ELAN is not set +CONFIG_TOUCHSCREEN_ELO=m +CONFIG_TOUCHSCREEN_WACOM_W8001=m +CONFIG_TOUCHSCREEN_WACOM_I2C=m +# CONFIG_TOUCHSCREEN_MAX11801 is not set +# CONFIG_TOUCHSCREEN_MCS5000 is not set +# CONFIG_TOUCHSCREEN_MMS114 is not set +# CONFIG_TOUCHSCREEN_MELFAS_MIP4 is not set +# CONFIG_TOUCHSCREEN_MSG2638 is not set +# CONFIG_TOUCHSCREEN_MTOUCH is not set +# CONFIG_TOUCHSCREEN_NOVATEK_NVT_TS is not set +# CONFIG_TOUCHSCREEN_IMAGIS is not set +# CONFIG_TOUCHSCREEN_INEXIO is not set +# CONFIG_TOUCHSCREEN_PENMOUNT is not set +# CONFIG_TOUCHSCREEN_EDT_FT5X06 is not set +# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set +# CONFIG_TOUCHSCREEN_TOUCHWIN is not set +# CONFIG_TOUCHSCREEN_PIXCIR is not set +# CONFIG_TOUCHSCREEN_WDT87XX_I2C is not set +# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set +# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set +# CONFIG_TOUCHSCREEN_TSC_SERIO is not set +# CONFIG_TOUCHSCREEN_TSC2004 is not set +# CONFIG_TOUCHSCREEN_TSC2005 is not set +# CONFIG_TOUCHSCREEN_TSC2007 is not set +# CONFIG_TOUCHSCREEN_RM_TS is not set +# CONFIG_TOUCHSCREEN_SILEAD is not set +# CONFIG_TOUCHSCREEN_SIS_I2C is not set +# CONFIG_TOUCHSCREEN_ST1232 is not set +# CONFIG_TOUCHSCREEN_STMFTS is not set +# CONFIG_TOUCHSCREEN_SURFACE3_SPI is not set +# CONFIG_TOUCHSCREEN_SX8654 is not set +# CONFIG_TOUCHSCREEN_TPS6507X is not set +# CONFIG_TOUCHSCREEN_ZET6223 is not set +# CONFIG_TOUCHSCREEN_ZFORCE is not set +# CONFIG_TOUCHSCREEN_COLIBRI_VF50 is not set +# CONFIG_TOUCHSCREEN_ROHM_BU21023 is not set +# CONFIG_TOUCHSCREEN_IQS5XX is not set +# CONFIG_TOUCHSCREEN_IQS7211 is not set +# CONFIG_TOUCHSCREEN_ZINITIX is not set +# CONFIG_TOUCHSCREEN_HIMAX_HX83112B is not set +CONFIG_INPUT_MISC=y +# CONFIG_INPUT_AD714X is not set +# CONFIG_INPUT_BMA150 is not set +# CONFIG_INPUT_E3X0_BUTTON is not set +CONFIG_INPUT_PCSPKR=m +# CONFIG_INPUT_MMA8450 is not set +CONFIG_INPUT_APANEL=m +# CONFIG_INPUT_GPIO_BEEPER is not set +# CONFIG_INPUT_GPIO_DECODER is not set +# CONFIG_INPUT_GPIO_VIBRA is not set +CONFIG_INPUT_ATLAS_BTNS=m +CONFIG_INPUT_ATI_REMOTE2=m +CONFIG_INPUT_KEYSPAN_REMOTE=m +# CONFIG_INPUT_KXTJ9 is not set +CONFIG_INPUT_POWERMATE=m +CONFIG_INPUT_YEALINK=m +CONFIG_INPUT_CM109=m +CONFIG_INPUT_UINPUT=m +# CONFIG_INPUT_PCF8574 is not set +# CONFIG_INPUT_PWM_BEEPER is not set +# CONFIG_INPUT_PWM_VIBRA is not set +CONFIG_INPUT_GPIO_ROTARY_ENCODER=m +# CONFIG_INPUT_DA7280_HAPTICS is not set +# CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set +# CONFIG_INPUT_IQS269A is not set +# CONFIG_INPUT_IQS626A is not set +# CONFIG_INPUT_IQS7222 is not set +# CONFIG_INPUT_CMA3000 is not set +CONFIG_INPUT_XEN_KBDDEV_FRONTEND=m +# CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set +# CONFIG_INPUT_DRV260X_HAPTICS is not set +# CONFIG_INPUT_DRV2665_HAPTICS is not set +# CONFIG_INPUT_DRV2667_HAPTICS is not set +CONFIG_RMI4_CORE=m +CONFIG_RMI4_I2C=m +# CONFIG_RMI4_SPI is not set +CONFIG_RMI4_SMB=m +CONFIG_RMI4_F03=y +CONFIG_RMI4_F03_SERIO=m +CONFIG_RMI4_2D_SENSOR=y +CONFIG_RMI4_F11=y +CONFIG_RMI4_F12=y +CONFIG_RMI4_F30=y +# CONFIG_RMI4_F34 is not set +# CONFIG_RMI4_F3A is not set +CONFIG_RMI4_F55=y + +# +# Hardware I/O ports +# +CONFIG_SERIO=y +CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y +CONFIG_SERIO_I8042=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_CT82C710 is not set +# CONFIG_SERIO_PARKBD is not set +# CONFIG_SERIO_PCIPS2 is not set +CONFIG_SERIO_LIBPS2=y +CONFIG_SERIO_RAW=m +CONFIG_SERIO_ALTERA_PS2=m +# CONFIG_SERIO_PS2MULT is not set +CONFIG_SERIO_ARC_PS2=m +CONFIG_HYPERV_KEYBOARD=m +# CONFIG_SERIO_GPIO_PS2 is not set +# CONFIG_USERIO is not set +# CONFIG_GAMEPORT is not set +# end of Hardware I/O ports +# end of Input device support + +# +# Character devices +# +CONFIG_TTY=y +CONFIG_VT=y +CONFIG_CONSOLE_TRANSLATIONS=y +CONFIG_VT_CONSOLE=y +CONFIG_VT_CONSOLE_SLEEP=y +CONFIG_HW_CONSOLE=y +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_UNIX98_PTYS=y +# CONFIG_LEGACY_PTYS is not set +CONFIG_LEGACY_TIOCSTI=y +CONFIG_LDISC_AUTOLOAD=y + +# +# Serial drivers +# +CONFIG_SERIAL_EARLYCON=y +CONFIG_SERIAL_8250=y +# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set +CONFIG_SERIAL_8250_PNP=y +# CONFIG_SERIAL_8250_16550A_VARIANTS is not set +# CONFIG_SERIAL_8250_FINTEK is not set +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y +CONFIG_SERIAL_8250_PCILIB=y +CONFIG_SERIAL_8250_PCI=y +CONFIG_SERIAL_8250_EXAR=y +CONFIG_SERIAL_8250_NR_UARTS=32 +CONFIG_SERIAL_8250_RUNTIME_UARTS=4 +CONFIG_SERIAL_8250_EXTENDED=y +CONFIG_SERIAL_8250_MANY_PORTS=y +# CONFIG_SERIAL_8250_PCI1XXXX is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +# CONFIG_SERIAL_8250_DETECT_IRQ is not set +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_DWLIB=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_RT288X is not set +CONFIG_SERIAL_8250_LPSS=y +CONFIG_SERIAL_8250_MID=y +CONFIG_SERIAL_8250_PERICOM=y + +# +# Non-8250 serial port support +# +# CONFIG_SERIAL_KGDB_NMI is not set +# CONFIG_SERIAL_MAX3100 is not set +# CONFIG_SERIAL_MAX310X is not set +# CONFIG_SERIAL_UARTLITE is not set +CONFIG_SERIAL_CORE=y +CONFIG_SERIAL_CORE_CONSOLE=y +CONFIG_CONSOLE_POLL=y +CONFIG_SERIAL_JSM=m +# CONFIG_SERIAL_LANTIQ is not set +# CONFIG_SERIAL_SCCNXP is not set +# CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_ALTERA_JTAGUART is not set +# CONFIG_SERIAL_ALTERA_UART is not set +CONFIG_SERIAL_ARC=m +CONFIG_SERIAL_ARC_NR_PORTS=1 +# CONFIG_SERIAL_RP2 is not set +# CONFIG_SERIAL_FSL_LPUART is not set +# CONFIG_SERIAL_FSL_LINFLEXUART is not set +# CONFIG_SERIAL_SPRD is not set +# end of Serial drivers + +CONFIG_SERIAL_MCTRL_GPIO=y +CONFIG_SERIAL_NONSTANDARD=y +# CONFIG_MOXA_INTELLIO is not set +# CONFIG_MOXA_SMARTIO is not set +CONFIG_N_HDLC=m +CONFIG_N_GSM=m +CONFIG_NOZOMI=m +# CONFIG_NULL_TTY is not set +CONFIG_HVC_DRIVER=y +CONFIG_HVC_IRQ=y +CONFIG_HVC_XEN=y +CONFIG_HVC_XEN_FRONTEND=y +# CONFIG_SERIAL_DEV_BUS is not set +CONFIG_PRINTER=m +# CONFIG_LP_CONSOLE is not set +CONFIG_PPDEV=m +CONFIG_VIRTIO_CONSOLE=m +CONFIG_IPMI_HANDLER=m +CONFIG_IPMI_DMI_DECODE=y +CONFIG_IPMI_PLAT_DATA=y +CONFIG_IPMI_PANIC_EVENT=y +CONFIG_IPMI_PANIC_STRING=y +CONFIG_IPMI_DEVICE_INTERFACE=m +CONFIG_IPMI_SI=m +CONFIG_IPMI_SSIF=m +CONFIG_IPMI_WATCHDOG=m +CONFIG_IPMI_POWEROFF=m +CONFIG_HW_RANDOM=y +CONFIG_HW_RANDOM_TIMERIOMEM=m +CONFIG_HW_RANDOM_INTEL=m +CONFIG_HW_RANDOM_AMD=m +# CONFIG_HW_RANDOM_BA431 is not set +CONFIG_HW_RANDOM_VIA=m +CONFIG_HW_RANDOM_ZHAOXIN=m +CONFIG_HW_RANDOM_VIRTIO=y +# CONFIG_HW_RANDOM_XIPHERA is not set +# CONFIG_APPLICOM is not set +# CONFIG_MWAVE is not set +CONFIG_DEVMEM=y +CONFIG_NVRAM=y +CONFIG_DEVPORT=y +CONFIG_HPET=y +CONFIG_HPET_MMAP=y +# CONFIG_HPET_MMAP_DEFAULT is not set +CONFIG_HANGCHECK_TIMER=m +CONFIG_UV_MMTIMER=m +CONFIG_TCG_TPM=y +CONFIG_HW_RANDOM_TPM=y +CONFIG_TCG_TIS_CORE=y +CONFIG_TCG_TIS=y +# CONFIG_TCG_TIS_SPI is not set +# CONFIG_TCG_TIS_I2C is not set +# CONFIG_TCG_TIS_I2C_CR50 is not set +CONFIG_TCG_TIS_I2C_ATMEL=m +CONFIG_TCG_TIS_I2C_INFINEON=m +CONFIG_TCG_TIS_I2C_NUVOTON=m +CONFIG_TCG_NSC=m +CONFIG_TCG_ATMEL=m +CONFIG_TCG_INFINEON=m +# CONFIG_TCG_XEN is not set +CONFIG_TCG_CRB=y +# CONFIG_TCG_VTPM_PROXY is not set +CONFIG_TCG_TIS_ST33ZP24=m +CONFIG_TCG_TIS_ST33ZP24_I2C=m +# CONFIG_TCG_TIS_ST33ZP24_SPI is not set +CONFIG_TELCLOCK=m +# CONFIG_XILLYBUS is not set +# CONFIG_XILLYUSB is not set +# end of Character devices + +# +# I2C support +# +CONFIG_I2C=m +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_COMPAT=y +CONFIG_I2C_CHARDEV=m +CONFIG_I2C_MUX=m + +# +# Multiplexer I2C Chip support +# +# CONFIG_I2C_MUX_GPIO is not set +# CONFIG_I2C_MUX_LTC4306 is not set +# CONFIG_I2C_MUX_PCA9541 is not set +# CONFIG_I2C_MUX_PCA954x is not set +# CONFIG_I2C_MUX_REG is not set +CONFIG_I2C_MUX_MLXCPLD=m +# end of Multiplexer I2C Chip support + +CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=m +CONFIG_I2C_ALGOBIT=m +CONFIG_I2C_ALGOPCA=m + +# +# I2C Hardware Bus support +# + +# +# PC SMBus host controller drivers +# +# CONFIG_I2C_ALI1535 is not set +# CONFIG_I2C_ALI1563 is not set +# CONFIG_I2C_ALI15X3 is not set +CONFIG_I2C_AMD756=m +CONFIG_I2C_AMD756_S4882=m +CONFIG_I2C_AMD8111=m +# CONFIG_I2C_AMD_MP2 is not set +CONFIG_I2C_I801=m +CONFIG_I2C_ISCH=m +CONFIG_I2C_ISMT=m +CONFIG_I2C_PIIX4=m +CONFIG_I2C_NFORCE2=m +CONFIG_I2C_NFORCE2_S4985=m +# CONFIG_I2C_NVIDIA_GPU is not set +# CONFIG_I2C_SIS5595 is not set +# CONFIG_I2C_SIS630 is not set +CONFIG_I2C_SIS96X=m +CONFIG_I2C_VIA=m +CONFIG_I2C_VIAPRO=m +CONFIG_I2C_ZHAOXIN=m + +# +# ACPI drivers +# +CONFIG_I2C_SCMI=m +CONFIG_I2C_ZHAOXIN_SMBUS=m + +# +# I2C system bus drivers (mostly embedded / system-on-chip) +# +# CONFIG_I2C_CBUS_GPIO is not set +CONFIG_I2C_DESIGNWARE_CORE=m +# CONFIG_I2C_DESIGNWARE_SLAVE is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=m +# CONFIG_I2C_DESIGNWARE_AMDPSP is not set +CONFIG_I2C_DESIGNWARE_BAYTRAIL=y +# CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set +# CONFIG_I2C_GPIO is not set +# CONFIG_I2C_OCORES is not set +CONFIG_I2C_PCA_PLATFORM=m +CONFIG_I2C_SIMTEC=m +# CONFIG_I2C_XILINX is not set + +# +# External I2C/SMBus adapter drivers +# +CONFIG_I2C_DIOLAN_U2C=m +# CONFIG_I2C_CP2615 is not set +CONFIG_I2C_PARPORT=m +# CONFIG_I2C_PCI1XXXX is not set +# CONFIG_I2C_ROBOTFUZZ_OSIF is not set +# CONFIG_I2C_TAOS_EVM is not set +CONFIG_I2C_TINY_USB=m +CONFIG_I2C_VIPERBOARD=m + +# +# Other I2C/SMBus bus drivers +# +CONFIG_I2C_MLXCPLD=m +# CONFIG_I2C_VIRTIO is not set +# end of I2C Hardware Bus support + +CONFIG_I2C_STUB=m +# CONFIG_I2C_SLAVE is not set +# CONFIG_I2C_DEBUG_CORE is not set +# CONFIG_I2C_DEBUG_ALGO is not set +# CONFIG_I2C_DEBUG_BUS is not set +# end of I2C support + +# CONFIG_I3C is not set +CONFIG_SPI=y +# CONFIG_SPI_DEBUG is not set +CONFIG_SPI_MASTER=y +# CONFIG_SPI_MEM is not set + +# +# SPI Master Controller Drivers +# +# CONFIG_SPI_ALTERA is not set +# CONFIG_SPI_AXI_SPI_ENGINE is not set +# CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_BUTTERFLY is not set +# CONFIG_SPI_CADENCE is not set +# CONFIG_SPI_DESIGNWARE is not set +# CONFIG_SPI_GPIO is not set +# CONFIG_SPI_LM70_LLP is not set +# CONFIG_SPI_MICROCHIP_CORE is not set +# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set +# CONFIG_SPI_LANTIQ_SSC is not set +# CONFIG_SPI_OC_TINY is not set +# CONFIG_SPI_PCI1XXXX is not set +# CONFIG_SPI_PXA2XX is not set +# CONFIG_SPI_SC18IS602 is not set +# CONFIG_SPI_SIFIVE is not set +# CONFIG_SPI_MXIC is not set +# CONFIG_SPI_XCOMM is not set +# CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +# CONFIG_SPI_AMD is not set + +# +# SPI Multiplexer support +# +# CONFIG_SPI_MUX is not set + +# +# SPI Protocol Masters +# +# CONFIG_SPI_SPIDEV is not set +# CONFIG_SPI_LOOPBACK_TEST is not set +# CONFIG_SPI_TLE62X0 is not set +# CONFIG_SPI_SLAVE is not set +CONFIG_SPI_DYNAMIC=y +# CONFIG_SPMI is not set +# CONFIG_HSI is not set +CONFIG_PPS=y +# CONFIG_PPS_DEBUG is not set + +# +# PPS clients support +# +# CONFIG_PPS_CLIENT_KTIMER is not set +CONFIG_PPS_CLIENT_LDISC=m +CONFIG_PPS_CLIENT_PARPORT=m +CONFIG_PPS_CLIENT_GPIO=m + +# +# PPS generators support +# + +# +# PTP clock support +# +CONFIG_PTP_1588_CLOCK=y +CONFIG_PTP_1588_CLOCK_OPTIONAL=y +CONFIG_DP83640_PHY=m +# CONFIG_PTP_1588_CLOCK_INES is not set +CONFIG_PTP_1588_CLOCK_KVM=m +# CONFIG_PTP_1588_CLOCK_IDT82P33 is not set +# CONFIG_PTP_1588_CLOCK_IDTCM is not set +# CONFIG_PTP_1588_CLOCK_MOCK is not set +# CONFIG_PTP_1588_CLOCK_VMW is not set +# CONFIG_PTP_1588_CLOCK_OCP is not set +# end of PTP clock support + +CONFIG_PINCTRL=y +CONFIG_PINMUX=y +CONFIG_PINCONF=y +CONFIG_GENERIC_PINCONF=y +# CONFIG_DEBUG_PINCTRL is not set +# CONFIG_PINCTRL_AMD is not set +# CONFIG_PINCTRL_CY8C95X0 is not set +# CONFIG_PINCTRL_MCP23S08 is not set + +# +# Intel pinctrl drivers +# +CONFIG_PINCTRL_BAYTRAIL=y +# CONFIG_PINCTRL_CHERRYVIEW is not set +# CONFIG_PINCTRL_LYNXPOINT is not set +CONFIG_PINCTRL_INTEL=y +# CONFIG_PINCTRL_ALDERLAKE is not set +CONFIG_PINCTRL_BROXTON=m +CONFIG_PINCTRL_CANNONLAKE=m +CONFIG_PINCTRL_CEDARFORK=m +CONFIG_PINCTRL_DENVERTON=m +# CONFIG_PINCTRL_ELKHARTLAKE is not set +# CONFIG_PINCTRL_EMMITSBURG is not set +CONFIG_PINCTRL_GEMINILAKE=m +CONFIG_PINCTRL_ICELAKE=m +# CONFIG_PINCTRL_JASPERLAKE is not set +# CONFIG_PINCTRL_LAKEFIELD is not set +CONFIG_PINCTRL_LEWISBURG=m +# CONFIG_PINCTRL_METEORLAKE is not set +CONFIG_PINCTRL_SUNRISEPOINT=m +# CONFIG_PINCTRL_TIGERLAKE is not set +# end of Intel pinctrl drivers + +CONFIG_PINCTRL_ZHAOXIN=m +CONFIG_PINCTRL_KX7000=m + +# +# Renesas pinctrl drivers +# +# end of Renesas pinctrl drivers + +CONFIG_GPIOLIB=y +CONFIG_GPIOLIB_FASTPATH_LIMIT=512 +CONFIG_GPIO_ACPI=y +CONFIG_GPIOLIB_IRQCHIP=y +# CONFIG_DEBUG_GPIO is not set +CONFIG_GPIO_CDEV=y +CONFIG_GPIO_CDEV_V1=y +CONFIG_GPIO_GENERIC=m + +# +# Memory mapped GPIO drivers +# +CONFIG_GPIO_AMDPT=m +# CONFIG_GPIO_DWAPB is not set +# CONFIG_GPIO_EXAR is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set +CONFIG_GPIO_ICH=m +# CONFIG_GPIO_MB86S7X is not set +# CONFIG_GPIO_AMD_FCH is not set +# end of Memory mapped GPIO drivers + +# +# Port-mapped I/O GPIO drivers +# +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_F7188X is not set +# CONFIG_GPIO_IT87 is not set +# CONFIG_GPIO_SCH is not set +# CONFIG_GPIO_SCH311X is not set +# CONFIG_GPIO_WINBOND is not set +# CONFIG_GPIO_WS16C48 is not set +# end of Port-mapped I/O GPIO drivers + +# +# I2C GPIO expanders +# +# CONFIG_GPIO_FXL6408 is not set +# CONFIG_GPIO_DS4520 is not set +# CONFIG_GPIO_MAX7300 is not set +# CONFIG_GPIO_MAX732X is not set +# CONFIG_GPIO_PCA953X is not set +# CONFIG_GPIO_PCA9570 is not set +# CONFIG_GPIO_PCF857X is not set +# CONFIG_GPIO_TPIC2810 is not set +# end of I2C GPIO expanders + +# +# MFD GPIO expanders +# +# CONFIG_GPIO_ELKHARTLAKE is not set +# end of MFD GPIO expanders + +# +# PCI GPIO expanders +# +# CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set +# CONFIG_GPIO_ML_IOH is not set +# CONFIG_GPIO_PCI_IDIO_16 is not set +# CONFIG_GPIO_PCIE_IDIO_24 is not set +# CONFIG_GPIO_RDC321X is not set +# end of PCI GPIO expanders + +# +# SPI GPIO expanders +# +# CONFIG_GPIO_MAX3191X is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set +# CONFIG_GPIO_PISOSR is not set +# CONFIG_GPIO_XRA1403 is not set +# end of SPI GPIO expanders + +# +# USB GPIO expanders +# +CONFIG_GPIO_VIPERBOARD=m +# end of USB GPIO expanders + +# +# Virtual GPIO drivers +# +# CONFIG_GPIO_AGGREGATOR is not set +# CONFIG_GPIO_LATCH is not set +# CONFIG_GPIO_MOCKUP is not set +# CONFIG_GPIO_VIRTIO is not set +# CONFIG_GPIO_SIM is not set +# end of Virtual GPIO drivers + +# CONFIG_W1 is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set +CONFIG_POWER_SUPPLY=y +# CONFIG_POWER_SUPPLY_DEBUG is not set +CONFIG_POWER_SUPPLY_HWMON=y +# CONFIG_GENERIC_ADC_BATTERY is not set +# CONFIG_IP5XXX_POWER is not set +# CONFIG_TEST_POWER is not set +# CONFIG_CHARGER_ADP5061 is not set +# CONFIG_BATTERY_CW2015 is not set +# CONFIG_BATTERY_DS2780 is not set +# CONFIG_BATTERY_DS2781 is not set +# CONFIG_BATTERY_DS2782 is not set +# CONFIG_BATTERY_SAMSUNG_SDI is not set +# CONFIG_BATTERY_SBS is not set +# CONFIG_CHARGER_SBS is not set +# CONFIG_MANAGER_SBS is not set +# CONFIG_BATTERY_BQ27XXX is not set +# CONFIG_BATTERY_MAX17040 is not set +# CONFIG_BATTERY_MAX17042 is not set +# CONFIG_CHARGER_MAX8903 is not set +# CONFIG_CHARGER_LP8727 is not set +# CONFIG_CHARGER_GPIO is not set +# CONFIG_CHARGER_LT3651 is not set +# CONFIG_CHARGER_LTC4162L is not set +# CONFIG_CHARGER_MAX77976 is not set +# CONFIG_CHARGER_BQ2415X is not set +# CONFIG_CHARGER_BQ24257 is not set +# CONFIG_CHARGER_BQ24735 is not set +# CONFIG_CHARGER_BQ2515X is not set +# CONFIG_CHARGER_BQ25890 is not set +# CONFIG_CHARGER_BQ25980 is not set +# CONFIG_CHARGER_BQ256XX is not set +# CONFIG_BATTERY_GAUGE_LTC2941 is not set +# CONFIG_BATTERY_GOLDFISH is not set +# CONFIG_BATTERY_RT5033 is not set +# CONFIG_CHARGER_RT9455 is not set +# CONFIG_CHARGER_BD99954 is not set +# CONFIG_BATTERY_UG3105 is not set +CONFIG_HWMON=y +CONFIG_HWMON_VID=m +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +CONFIG_SENSORS_ABITUGURU=m +CONFIG_SENSORS_ABITUGURU3=m +# CONFIG_SENSORS_AD7314 is not set +CONFIG_SENSORS_AD7414=m +CONFIG_SENSORS_AD7418=m +CONFIG_SENSORS_ADM1025=m +CONFIG_SENSORS_ADM1026=m +CONFIG_SENSORS_ADM1029=m +CONFIG_SENSORS_ADM1031=m +# CONFIG_SENSORS_ADM1177 is not set +CONFIG_SENSORS_ADM9240=m +CONFIG_SENSORS_ADT7X10=m +# CONFIG_SENSORS_ADT7310 is not set +CONFIG_SENSORS_ADT7410=m +CONFIG_SENSORS_ADT7411=m +CONFIG_SENSORS_ADT7462=m +CONFIG_SENSORS_ADT7470=m +CONFIG_SENSORS_ADT7475=m +# CONFIG_SENSORS_AHT10 is not set +# CONFIG_SENSORS_AQUACOMPUTER_D5NEXT is not set +# CONFIG_SENSORS_AS370 is not set +CONFIG_SENSORS_ASC7621=m +# CONFIG_SENSORS_AXI_FAN_CONTROL is not set +CONFIG_SENSORS_K8TEMP=m +CONFIG_SENSORS_K10TEMP=m +CONFIG_SENSORS_FAM15H_POWER=m +CONFIG_SENSORS_APPLESMC=m +CONFIG_SENSORS_ASB100=m +CONFIG_SENSORS_ATXP1=m +# CONFIG_SENSORS_CORSAIR_CPRO is not set +# CONFIG_SENSORS_CORSAIR_PSU is not set +# CONFIG_SENSORS_DRIVETEMP is not set +CONFIG_SENSORS_DS620=m +CONFIG_SENSORS_DS1621=m +CONFIG_SENSORS_DELL_SMM=m +# CONFIG_I8K is not set +CONFIG_SENSORS_I5K_AMB=m +CONFIG_SENSORS_F71805F=m +CONFIG_SENSORS_F71882FG=m +CONFIG_SENSORS_F75375S=m +CONFIG_SENSORS_FSCHMD=m +# CONFIG_SENSORS_FTSTEUTATES is not set +CONFIG_SENSORS_GL518SM=m +CONFIG_SENSORS_GL520SM=m +CONFIG_SENSORS_G760A=m +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_HS3001 is not set +CONFIG_SENSORS_IBMAEM=m +CONFIG_SENSORS_IBMPEX=m +# CONFIG_SENSORS_IIO_HWMON is not set +CONFIG_SENSORS_I5500=m +CONFIG_SENSORS_CORETEMP=m +CONFIG_SENSORS_IT87=m +CONFIG_SENSORS_JC42=m +# CONFIG_SENSORS_POWR1220 is not set +CONFIG_SENSORS_LINEAGE=m +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC2947_I2C is not set +# CONFIG_SENSORS_LTC2947_SPI is not set +# CONFIG_SENSORS_LTC2990 is not set +# CONFIG_SENSORS_LTC2992 is not set +CONFIG_SENSORS_LTC4151=m +CONFIG_SENSORS_LTC4215=m +# CONFIG_SENSORS_LTC4222 is not set +CONFIG_SENSORS_LTC4245=m +# CONFIG_SENSORS_LTC4260 is not set +CONFIG_SENSORS_LTC4261=m +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX127 is not set +CONFIG_SENSORS_MAX16065=m +CONFIG_SENSORS_MAX1619=m +CONFIG_SENSORS_MAX1668=m +CONFIG_SENSORS_MAX197=m +# CONFIG_SENSORS_MAX31722 is not set +# CONFIG_SENSORS_MAX31730 is not set +# CONFIG_SENSORS_MAX31760 is not set +# CONFIG_MAX31827 is not set +# CONFIG_SENSORS_MAX6620 is not set +# CONFIG_SENSORS_MAX6621 is not set +CONFIG_SENSORS_MAX6639=m +CONFIG_SENSORS_MAX6650=m +CONFIG_SENSORS_MAX6697=m +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_MC34VR500 is not set +CONFIG_SENSORS_MCP3021=m +# CONFIG_SENSORS_MLXREG_FAN is not set +# CONFIG_SENSORS_TC654 is not set +# CONFIG_SENSORS_TPS23861 is not set +# CONFIG_SENSORS_MR75203 is not set +# CONFIG_SENSORS_ADCXX is not set +CONFIG_SENSORS_LM63=m +# CONFIG_SENSORS_LM70 is not set +CONFIG_SENSORS_LM73=m +CONFIG_SENSORS_LM75=m +CONFIG_SENSORS_LM77=m +CONFIG_SENSORS_LM78=m +CONFIG_SENSORS_LM80=m +CONFIG_SENSORS_LM83=m +CONFIG_SENSORS_LM85=m +CONFIG_SENSORS_LM87=m +CONFIG_SENSORS_LM90=m +CONFIG_SENSORS_LM92=m +CONFIG_SENSORS_LM93=m +CONFIG_SENSORS_LM95234=m +CONFIG_SENSORS_LM95241=m +CONFIG_SENSORS_LM95245=m +CONFIG_SENSORS_PC87360=m +CONFIG_SENSORS_PC87427=m +CONFIG_SENSORS_NTC_THERMISTOR=m +# CONFIG_SENSORS_NCT6683 is not set +CONFIG_SENSORS_NCT6775_CORE=m +CONFIG_SENSORS_NCT6775=m +# CONFIG_SENSORS_NCT6775_I2C is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_NPCM7XX is not set +# CONFIG_SENSORS_NZXT_KRAKEN2 is not set +# CONFIG_SENSORS_NZXT_SMART2 is not set +# CONFIG_SENSORS_OCC_P8_I2C is not set +# CONFIG_SENSORS_OXP is not set +CONFIG_SENSORS_PCF8591=m +CONFIG_PMBUS=m +CONFIG_SENSORS_PMBUS=m +# CONFIG_SENSORS_ACBEL_FSG032 is not set +# CONFIG_SENSORS_ADM1266 is not set +CONFIG_SENSORS_ADM1275=m +# CONFIG_SENSORS_BEL_PFE is not set +# CONFIG_SENSORS_BPA_RS600 is not set +# CONFIG_SENSORS_DELTA_AHE50DC_FAN is not set +# CONFIG_SENSORS_FSP_3Y is not set +# CONFIG_SENSORS_IBM_CFFPS is not set +# CONFIG_SENSORS_DPS920AB is not set +# CONFIG_SENSORS_INSPUR_IPSPS is not set +# CONFIG_SENSORS_IR35221 is not set +# CONFIG_SENSORS_IR36021 is not set +# CONFIG_SENSORS_IR38064 is not set +# CONFIG_SENSORS_IRPS5401 is not set +# CONFIG_SENSORS_ISL68137 is not set +CONFIG_SENSORS_LM25066=m +# CONFIG_SENSORS_LT7182S is not set +CONFIG_SENSORS_LTC2978=m +# CONFIG_SENSORS_LTC3815 is not set +# CONFIG_SENSORS_MAX15301 is not set +CONFIG_SENSORS_MAX16064=m +# CONFIG_SENSORS_MAX16601 is not set +# CONFIG_SENSORS_MAX20730 is not set +# CONFIG_SENSORS_MAX20751 is not set +# CONFIG_SENSORS_MAX31785 is not set +CONFIG_SENSORS_MAX34440=m +CONFIG_SENSORS_MAX8688=m +# CONFIG_SENSORS_MP2888 is not set +# CONFIG_SENSORS_MP2975 is not set +# CONFIG_SENSORS_MP5023 is not set +# CONFIG_SENSORS_MPQ7932 is not set +# CONFIG_SENSORS_PIM4328 is not set +# CONFIG_SENSORS_PLI1209BC is not set +# CONFIG_SENSORS_PM6764TR is not set +# CONFIG_SENSORS_PXE1610 is not set +# CONFIG_SENSORS_Q54SJ108A2 is not set +# CONFIG_SENSORS_STPDDC60 is not set +# CONFIG_SENSORS_TDA38640 is not set +# CONFIG_SENSORS_TPS40422 is not set +# CONFIG_SENSORS_TPS53679 is not set +# CONFIG_SENSORS_TPS546D24 is not set +CONFIG_SENSORS_UCD9000=m +CONFIG_SENSORS_UCD9200=m +# CONFIG_SENSORS_XDPE152 is not set +# CONFIG_SENSORS_XDPE122 is not set +CONFIG_SENSORS_ZL6100=m +# CONFIG_SENSORS_SBTSI is not set +# CONFIG_SENSORS_SBRMI is not set +CONFIG_SENSORS_SHT15=m +CONFIG_SENSORS_SHT21=m +# CONFIG_SENSORS_SHT3x is not set +# CONFIG_SENSORS_SHT4x is not set +# CONFIG_SENSORS_SHTC1 is not set +CONFIG_SENSORS_SIS5595=m +CONFIG_SENSORS_DME1737=m +CONFIG_SENSORS_EMC1403=m +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC2305 is not set +CONFIG_SENSORS_EMC6W201=m +CONFIG_SENSORS_SMSC47M1=m +CONFIG_SENSORS_SMSC47M192=m +CONFIG_SENSORS_SMSC47B397=m +CONFIG_SENSORS_SCH56XX_COMMON=m +CONFIG_SENSORS_SCH5627=m +CONFIG_SENSORS_SCH5636=m +# CONFIG_SENSORS_STTS751 is not set +# CONFIG_SENSORS_ADC128D818 is not set +CONFIG_SENSORS_ADS7828=m +# CONFIG_SENSORS_ADS7871 is not set +CONFIG_SENSORS_AMC6821=m +CONFIG_SENSORS_INA209=m +CONFIG_SENSORS_INA2XX=m +# CONFIG_SENSORS_INA238 is not set +# CONFIG_SENSORS_INA3221 is not set +# CONFIG_SENSORS_TC74 is not set +CONFIG_SENSORS_THMC50=m +CONFIG_SENSORS_TMP102=m +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set +CONFIG_SENSORS_TMP401=m +CONFIG_SENSORS_TMP421=m +# CONFIG_SENSORS_TMP464 is not set +# CONFIG_SENSORS_TMP513 is not set +CONFIG_SENSORS_VIA_CPUTEMP=m +CONFIG_SENSORS_ZHAOXIN_CPUTEMP=m +CONFIG_SENSORS_VIA686A=m +CONFIG_SENSORS_VT1211=m +CONFIG_SENSORS_VT8231=m +# CONFIG_SENSORS_W83773G is not set +CONFIG_SENSORS_W83781D=m +CONFIG_SENSORS_W83791D=m +CONFIG_SENSORS_W83792D=m +CONFIG_SENSORS_W83793=m +CONFIG_SENSORS_W83795=m +# CONFIG_SENSORS_W83795_FANCTRL is not set +CONFIG_SENSORS_W83L785TS=m +CONFIG_SENSORS_W83L786NG=m +CONFIG_SENSORS_W83627HF=m +CONFIG_SENSORS_W83627EHF=m +# CONFIG_SENSORS_XGENE is not set + +# +# ACPI drivers +# +CONFIG_SENSORS_ACPI_POWER=m +CONFIG_SENSORS_ATK0110=m +# CONFIG_SENSORS_ASUS_WMI is not set +# CONFIG_SENSORS_ASUS_EC is not set +# CONFIG_SENSORS_HP_WMI is not set +CONFIG_THERMAL=y +# CONFIG_THERMAL_NETLINK is not set +# CONFIG_THERMAL_STATISTICS is not set +CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0 +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_ACPI=y +CONFIG_THERMAL_WRITABLE_TRIPS=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set +# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set +# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set +CONFIG_THERMAL_GOV_FAIR_SHARE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_BANG_BANG=y +CONFIG_THERMAL_GOV_USER_SPACE=y +# CONFIG_THERMAL_EMULATION is not set + +# +# Intel thermal drivers +# +CONFIG_INTEL_POWERCLAMP=m +CONFIG_X86_THERMAL_VECTOR=y +CONFIG_INTEL_TCC=y +CONFIG_X86_PKG_TEMP_THERMAL=m +CONFIG_INTEL_SOC_DTS_IOSF_CORE=m +# CONFIG_INTEL_SOC_DTS_THERMAL is not set + +# +# ACPI INT340X thermal drivers +# +CONFIG_INT340X_THERMAL=m +CONFIG_ACPI_THERMAL_REL=m +# CONFIG_INT3406_THERMAL is not set +CONFIG_PROC_THERMAL_MMIO_RAPL=m +# end of ACPI INT340X thermal drivers + +CONFIG_INTEL_PCH_THERMAL=m +# CONFIG_INTEL_TCC_COOLING is not set +# CONFIG_INTEL_HFI_THERMAL is not set +# end of Intel thermal drivers + +# CONFIG_GENERIC_ADC_THERMAL is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set +CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED=y +CONFIG_WATCHDOG_OPEN_TIMEOUT=0 +CONFIG_WATCHDOG_SYSFS=y +# CONFIG_WATCHDOG_HRTIMER_PRETIMEOUT is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set + +# +# Watchdog Device Drivers +# +CONFIG_SOFT_WATCHDOG=m +CONFIG_WDAT_WDT=m +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ZIIRAVE_WATCHDOG is not set +# CONFIG_MLX_WDT is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +# CONFIG_ACQUIRE_WDT is not set +# CONFIG_ADVANTECH_WDT is not set +# CONFIG_ADVANTECH_EC_WDT is not set +CONFIG_ALIM1535_WDT=m +CONFIG_ALIM7101_WDT=m +# CONFIG_EBC_C384_WDT is not set +# CONFIG_EXAR_WDT is not set +CONFIG_F71808E_WDT=m +CONFIG_SP5100_TCO=m +CONFIG_SBC_FITPC2_WATCHDOG=m +# CONFIG_EUROTECH_WDT is not set +CONFIG_IB700_WDT=m +CONFIG_IBMASR=m +# CONFIG_WAFER_WDT is not set +CONFIG_I6300ESB_WDT=m +CONFIG_IE6XX_WDT=m +CONFIG_ITCO_WDT=m +CONFIG_ITCO_VENDOR_SUPPORT=y +CONFIG_IT8712F_WDT=m +CONFIG_IT87_WDT=m +CONFIG_HP_WATCHDOG=m +CONFIG_HPWDT_NMI_DECODING=y +# CONFIG_SC1200_WDT is not set +# CONFIG_PC87413_WDT is not set +CONFIG_NV_TCO=m +# CONFIG_60XX_WDT is not set +# CONFIG_CPU5_WDT is not set +CONFIG_SMSC_SCH311X_WDT=m +# CONFIG_SMSC37B787_WDT is not set +# CONFIG_TQMX86_WDT is not set +CONFIG_VIA_WDT=m +CONFIG_W83627HF_WDT=m +CONFIG_W83877F_WDT=m +CONFIG_W83977F_WDT=m +CONFIG_MACHZ_WDT=m +# CONFIG_SBC_EPX_C3_WATCHDOG is not set +CONFIG_INTEL_MEI_WDT=m +# CONFIG_NI903X_WDT is not set +# CONFIG_NIC7018_WDT is not set +# CONFIG_MEN_A21_WDT is not set +CONFIG_XEN_WDT=m + +# +# PCI-based Watchdog Cards +# +CONFIG_PCIPCWATCHDOG=m +CONFIG_WDTPCI=m + +# +# USB-based Watchdog Cards +# +CONFIG_USBPCWATCHDOG=m +CONFIG_SSB_POSSIBLE=y +# CONFIG_SSB is not set +CONFIG_BCMA_POSSIBLE=y +CONFIG_BCMA=m +CONFIG_BCMA_HOST_PCI_POSSIBLE=y +CONFIG_BCMA_HOST_PCI=y +# CONFIG_BCMA_HOST_SOC is not set +CONFIG_BCMA_DRIVER_PCI=y +CONFIG_BCMA_DRIVER_GMAC_CMN=y +CONFIG_BCMA_DRIVER_GPIO=y +# CONFIG_BCMA_DEBUG is not set + +# +# Multifunction device drivers +# +CONFIG_MFD_CORE=y +# CONFIG_MFD_SMPRO is not set +# CONFIG_MFD_BCM590XX is not set +# CONFIG_MFD_BD9571MWV is not set +# CONFIG_MFD_AXP20X_I2C is not set +# CONFIG_MFD_CS42L43_I2C is not set +# CONFIG_MFD_MADERA is not set +# CONFIG_MFD_DA9052_SPI is not set +# CONFIG_MFD_DA9062 is not set +# CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set +# CONFIG_MFD_MC13XXX_SPI is not set +# CONFIG_MFD_MC13XXX_I2C is not set +# CONFIG_MFD_MP2629 is not set +# CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set +CONFIG_LPC_ICH=m +CONFIG_LPC_SCH=m +CONFIG_MFD_INTEL_LPSS=m +CONFIG_MFD_INTEL_LPSS_ACPI=m +CONFIG_MFD_INTEL_LPSS_PCI=m +# CONFIG_MFD_INTEL_PMC_BXT is not set +# CONFIG_MFD_IQS62X is not set +# CONFIG_MFD_JANZ_CMODIO is not set +# CONFIG_MFD_KEMPLD is not set +# CONFIG_MFD_88PM800 is not set +# CONFIG_MFD_88PM805 is not set +# CONFIG_MFD_MAX14577 is not set +# CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX8907 is not set +# CONFIG_MFD_MT6360 is not set +# CONFIG_MFD_MT6370 is not set +# CONFIG_MFD_MT6397 is not set +# CONFIG_MFD_MENF21BMC is not set +# CONFIG_MFD_OCELOT is not set +# CONFIG_EZX_PCAP is not set +CONFIG_MFD_VIPERBOARD=m +# CONFIG_MFD_RETU is not set +# CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_SY7636A is not set +# CONFIG_MFD_RDC321X is not set +# CONFIG_MFD_RT4831 is not set +# CONFIG_MFD_RT5033 is not set +# CONFIG_MFD_RT5120 is not set +# CONFIG_MFD_SI476X_CORE is not set +CONFIG_MFD_SM501=m +CONFIG_MFD_SM501_GPIO=y +# CONFIG_MFD_SKY81452 is not set +# CONFIG_MFD_SYSCON is not set +# CONFIG_MFD_TI_AM335X_TSCADC is not set +# CONFIG_MFD_LP3943 is not set +# CONFIG_MFD_TI_LMU is not set +# CONFIG_TPS6105X is not set +# CONFIG_TPS65010 is not set +# CONFIG_TPS6507X is not set +# CONFIG_MFD_TPS65086 is not set +# CONFIG_MFD_TI_LP873X is not set +# CONFIG_MFD_TPS65912_I2C is not set +# CONFIG_MFD_TPS65912_SPI is not set +# CONFIG_MFD_TPS6594_I2C is not set +# CONFIG_MFD_TPS6594_SPI is not set +# CONFIG_MFD_WL1273_CORE is not set +# CONFIG_MFD_LM3533 is not set +# CONFIG_MFD_TQMX86 is not set +CONFIG_MFD_VX855=m +# CONFIG_MFD_ARIZONA_I2C is not set +# CONFIG_MFD_ARIZONA_SPI is not set +# CONFIG_MFD_WM831X_SPI is not set +# CONFIG_MFD_WM8994 is not set +# CONFIG_MFD_ATC260X_I2C is not set +# CONFIG_MFD_INTEL_M10_BMC_SPI is not set +# end of Multifunction device drivers + +# CONFIG_REGULATOR is not set +CONFIG_RC_CORE=m +# CONFIG_LIRC is not set +CONFIG_RC_MAP=m +CONFIG_RC_DECODERS=y +CONFIG_IR_IMON_DECODER=m +CONFIG_IR_JVC_DECODER=m +CONFIG_IR_MCE_KBD_DECODER=m +CONFIG_IR_NEC_DECODER=m +CONFIG_IR_RC5_DECODER=m +CONFIG_IR_RC6_DECODER=m +# CONFIG_IR_RCMM_DECODER is not set +CONFIG_IR_SANYO_DECODER=m +# CONFIG_IR_SHARP_DECODER is not set +CONFIG_IR_SONY_DECODER=m +# CONFIG_IR_XMP_DECODER is not set +CONFIG_RC_DEVICES=y +CONFIG_IR_ENE=m +CONFIG_IR_FINTEK=m +# CONFIG_IR_IGORPLUGUSB is not set +CONFIG_IR_IGUANA=m +CONFIG_IR_IMON=m +CONFIG_IR_IMON_RAW=m +CONFIG_IR_ITE_CIR=m +CONFIG_IR_MCEUSB=m +CONFIG_IR_NUVOTON=m +CONFIG_IR_REDRAT3=m +CONFIG_IR_SERIAL=m +# CONFIG_IR_SERIAL_TRANSMITTER is not set +CONFIG_IR_STREAMZAP=m +# CONFIG_IR_TOY is not set +CONFIG_IR_TTUSBIR=m +CONFIG_IR_WINBOND_CIR=m +CONFIG_RC_ATI_REMOTE=m +# CONFIG_RC_LOOPBACK is not set +# CONFIG_RC_XBOX_DVD is not set +CONFIG_CEC_CORE=m + +# +# CEC support +# +# CONFIG_MEDIA_CEC_RC is not set +CONFIG_MEDIA_CEC_SUPPORT=y +# CONFIG_CEC_CH7322 is not set +# CONFIG_CEC_GPIO is not set +# CONFIG_CEC_SECO is not set +CONFIG_USB_PULSE8_CEC=m +CONFIG_USB_RAINSHADOW_CEC=m +# end of CEC support + +CONFIG_MEDIA_SUPPORT=m +CONFIG_MEDIA_SUPPORT_FILTER=y +CONFIG_MEDIA_SUBDRV_AUTOSELECT=y + +# +# Media device types +# +# CONFIG_MEDIA_CAMERA_SUPPORT is not set +# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set +# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set +# CONFIG_MEDIA_RADIO_SUPPORT is not set +# CONFIG_MEDIA_SDR_SUPPORT is not set +# CONFIG_MEDIA_PLATFORM_SUPPORT is not set +# CONFIG_MEDIA_TEST_SUPPORT is not set +# end of Media device types + +# +# Media drivers +# + +# +# Drivers filtered as selected at 'Filter media drivers' +# + +# +# Media drivers +# +CONFIG_MEDIA_USB_SUPPORT=y +CONFIG_MEDIA_PCI_SUPPORT=y +# CONFIG_IPU_BRIDGE is not set +# end of Media drivers + +CONFIG_MEDIA_HIDE_ANCILLARY_SUBDRV=y + +# +# Media ancillary drivers +# +# end of Media ancillary drivers + +# +# Graphics support +# +CONFIG_APERTURE_HELPERS=y +CONFIG_VIDEO_CMDLINE=y +CONFIG_VIDEO_NOMODESET=y +# CONFIG_AUXDISPLAY is not set +# CONFIG_PANEL is not set +# CONFIG_AGP is not set +CONFIG_INTEL_GTT=m +CONFIG_VGA_SWITCHEROO=y +CONFIG_DRM=m +CONFIG_DRM_MIPI_DSI=y +CONFIG_DRM_KMS_HELPER=m +CONFIG_DRM_FBDEV_EMULATION=y +CONFIG_DRM_FBDEV_OVERALLOC=100 +CONFIG_DRM_LOAD_EDID_FIRMWARE=y +CONFIG_DRM_DISPLAY_HELPER=m +CONFIG_DRM_DISPLAY_DP_HELPER=y +CONFIG_DRM_DISPLAY_HDCP_HELPER=y +CONFIG_DRM_DISPLAY_HDMI_HELPER=y +# CONFIG_DRM_DP_AUX_CHARDEV is not set +# CONFIG_DRM_DP_CEC is not set +CONFIG_DRM_TTM=m +CONFIG_DRM_EXEC=m +CONFIG_DRM_BUDDY=m +CONFIG_DRM_VRAM_HELPER=m +CONFIG_DRM_TTM_HELPER=m +CONFIG_DRM_GEM_SHMEM_HELPER=m +CONFIG_DRM_SUBALLOC_HELPER=m +CONFIG_DRM_SCHED=m + +# +# I2C encoder or helper chips +# +CONFIG_DRM_I2C_CH7006=m +CONFIG_DRM_I2C_SIL164=m +# CONFIG_DRM_I2C_NXP_TDA998X is not set +# CONFIG_DRM_I2C_NXP_TDA9950 is not set +# end of I2C encoder or helper chips + +# +# ARM devices +# +# end of ARM devices + +CONFIG_DRM_RADEON=m +CONFIG_DRM_RADEON_USERPTR=y +CONFIG_DRM_AMDGPU=m +# CONFIG_DRM_AMDGPU_SI is not set +# CONFIG_DRM_AMDGPU_CIK is not set +# CONFIG_DRM_AMDGPU_USERPTR is not set + +# +# ACP (Audio CoProcessor) Configuration +# +# CONFIG_DRM_AMD_ACP is not set +# end of ACP (Audio CoProcessor) Configuration + +# +# Display Engine Configuration +# +CONFIG_DRM_AMD_DC=y +CONFIG_DRM_AMD_DC_FP=y +# CONFIG_DEBUG_KERNEL_DC is not set +# CONFIG_DRM_AMD_SECURE_DISPLAY is not set +# end of Display Engine Configuration + +# CONFIG_HSA_AMD is not set +CONFIG_DRM_NOUVEAU=m +CONFIG_NOUVEAU_DEBUG=5 +CONFIG_NOUVEAU_DEBUG_DEFAULT=3 +# CONFIG_NOUVEAU_DEBUG_MMU is not set +# CONFIG_NOUVEAU_DEBUG_PUSH is not set +CONFIG_DRM_NOUVEAU_BACKLIGHT=y +CONFIG_DRM_I915=m +CONFIG_DRM_I915_FORCE_PROBE="" +CONFIG_DRM_I915_CAPTURE_ERROR=y +CONFIG_DRM_I915_COMPRESS_ERROR=y +CONFIG_DRM_I915_USERPTR=y +CONFIG_DRM_I915_GVT_KVMGT=m +CONFIG_DRM_I915_REQUEST_TIMEOUT=20000 +CONFIG_DRM_I915_FENCE_TIMEOUT=10000 +CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND=250 +CONFIG_DRM_I915_HEARTBEAT_INTERVAL=2500 +CONFIG_DRM_I915_PREEMPT_TIMEOUT=640 +CONFIG_DRM_I915_PREEMPT_TIMEOUT_COMPUTE=7500 +CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT=8000 +CONFIG_DRM_I915_STOP_TIMEOUT=100 +CONFIG_DRM_I915_TIMESLICE_DURATION=1 +CONFIG_DRM_I915_GVT=y +# CONFIG_DRM_VGEM is not set +CONFIG_DRM_VKMS=m +CONFIG_DRM_VMWGFX=m +# CONFIG_DRM_VMWGFX_MKSSTATS is not set +CONFIG_DRM_GMA500=m +CONFIG_DRM_UDL=m +CONFIG_DRM_AST=m +CONFIG_DRM_MGAG200=m +CONFIG_DRM_QXL=m +CONFIG_DRM_VIRTIO_GPU=m +CONFIG_DRM_VIRTIO_GPU_KMS=y +CONFIG_DRM_PANEL=y + +# +# Display Panels +# +# CONFIG_DRM_PANEL_AUO_A030JTN01 is not set +# CONFIG_DRM_PANEL_ORISETECH_OTA5601A is not set +# CONFIG_DRM_PANEL_RASPBERRYPI_TOUCHSCREEN is not set +# CONFIG_DRM_PANEL_WIDECHIPS_WS2401 is not set +# end of Display Panels + +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PANEL_BRIDGE=y + +# +# Display Interface Bridges +# +# CONFIG_DRM_ANALOGIX_ANX78XX is not set +# end of Display Interface Bridges + +# CONFIG_DRM_LOONGSON is not set +# CONFIG_DRM_ETNAVIV is not set +CONFIG_DRM_BOCHS=m +CONFIG_DRM_CIRRUS_QEMU=m +# CONFIG_DRM_GM12U320 is not set +# CONFIG_DRM_PANEL_MIPI_DBI is not set +# CONFIG_DRM_SIMPLEDRM is not set +# CONFIG_TINYDRM_HX8357D is not set +# CONFIG_TINYDRM_ILI9163 is not set +# CONFIG_TINYDRM_ILI9225 is not set +# CONFIG_TINYDRM_ILI9341 is not set +# CONFIG_TINYDRM_ILI9486 is not set +# CONFIG_TINYDRM_MI0283QT is not set +# CONFIG_TINYDRM_REPAPER is not set +# CONFIG_TINYDRM_ST7586 is not set +# CONFIG_TINYDRM_ST7735R is not set +# CONFIG_DRM_XEN_FRONTEND is not set +# CONFIG_DRM_VBOXVIDEO is not set +# CONFIG_DRM_GUD is not set +# CONFIG_DRM_SSD130X is not set +# CONFIG_DRM_HYPERV is not set +# CONFIG_DRM_LEGACY is not set +CONFIG_DRM_PANEL_ORIENTATION_QUIRKS=y +CONFIG_DRM_PRIVACY_SCREEN=y + +# +# Frame buffer Devices +# +CONFIG_FB=y +# CONFIG_FB_CIRRUS is not set +# CONFIG_FB_PM2 is not set +# CONFIG_FB_CYBER2000 is not set +# CONFIG_FB_ARC is not set +# CONFIG_FB_ASILIANT is not set +# CONFIG_FB_IMSTT is not set +# CONFIG_FB_VGA16 is not set +# CONFIG_FB_UVESA is not set +CONFIG_FB_VESA=y +CONFIG_FB_EFI=y +# CONFIG_FB_N411 is not set +# CONFIG_FB_HGA is not set +# CONFIG_FB_OPENCORES is not set +# CONFIG_FB_S1D13XXX is not set +# CONFIG_FB_NVIDIA is not set +# CONFIG_FB_RIVA is not set +# CONFIG_FB_I740 is not set +# CONFIG_FB_LE80578 is not set +# CONFIG_FB_MATROX is not set +# CONFIG_FB_RADEON is not set +# CONFIG_FB_ATY128 is not set +# CONFIG_FB_ATY is not set +# CONFIG_FB_S3 is not set +# CONFIG_FB_SAVAGE is not set +# CONFIG_FB_SIS is not set +# CONFIG_FB_VIA is not set +# CONFIG_FB_NEOMAGIC is not set +# CONFIG_FB_KYRO is not set +# CONFIG_FB_3DFX is not set +# CONFIG_FB_VOODOO1 is not set +# CONFIG_FB_VT8623 is not set +# CONFIG_FB_TRIDENT is not set +# CONFIG_FB_ARK is not set +# CONFIG_FB_PM3 is not set +# CONFIG_FB_CARMINE is not set +# CONFIG_FB_SM501 is not set +# CONFIG_FB_SMSCUFX is not set +# CONFIG_FB_UDL is not set +# CONFIG_FB_IBM_GXT4500 is not set +# CONFIG_FB_VIRTUAL is not set +# CONFIG_XEN_FBDEV_FRONTEND is not set +# CONFIG_FB_METRONOME is not set +# CONFIG_FB_MB862XX is not set +CONFIG_FB_HYPERV=m +# CONFIG_FB_SIMPLE is not set +# CONFIG_FB_SSD1307 is not set +# CONFIG_FB_SM712 is not set +# CONFIG_FB_LS2K500 is not set +CONFIG_FB_CORE=y +CONFIG_FB_NOTIFY=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_DEVICE=y +CONFIG_FB_CFB_FILLRECT=y +CONFIG_FB_CFB_COPYAREA=y +CONFIG_FB_CFB_IMAGEBLIT=y +CONFIG_FB_SYS_FILLRECT=y +CONFIG_FB_SYS_COPYAREA=y +CONFIG_FB_SYS_IMAGEBLIT=y +# CONFIG_FB_FOREIGN_ENDIAN is not set +CONFIG_FB_SYS_FOPS=y +CONFIG_FB_DEFERRED_IO=y +CONFIG_FB_IOMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS=y +CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y +# CONFIG_FB_MODE_HELPERS is not set +CONFIG_FB_TILEBLITTING=y +# end of Frame buffer Devices + +# +# Backlight & LCD device support +# +CONFIG_LCD_CLASS_DEVICE=m +# CONFIG_LCD_L4F00242T03 is not set +# CONFIG_LCD_LMS283GF05 is not set +# CONFIG_LCD_LTV350QV is not set +# CONFIG_LCD_ILI922X is not set +# CONFIG_LCD_ILI9320 is not set +# CONFIG_LCD_TDO24M is not set +# CONFIG_LCD_VGG2432A4 is not set +CONFIG_LCD_PLATFORM=m +# CONFIG_LCD_AMS369FG06 is not set +# CONFIG_LCD_LMS501KF03 is not set +# CONFIG_LCD_HX8357 is not set +# CONFIG_LCD_OTM3225A is not set +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_BACKLIGHT_KTD253 is not set +# CONFIG_BACKLIGHT_KTZ8866 is not set +# CONFIG_BACKLIGHT_PWM is not set +CONFIG_BACKLIGHT_APPLE=m +# CONFIG_BACKLIGHT_QCOM_WLED is not set +# CONFIG_BACKLIGHT_SAHARA is not set +# CONFIG_BACKLIGHT_ADP8860 is not set +# CONFIG_BACKLIGHT_ADP8870 is not set +# CONFIG_BACKLIGHT_LM3630A is not set +# CONFIG_BACKLIGHT_LM3639 is not set +CONFIG_BACKLIGHT_LP855X=m +# CONFIG_BACKLIGHT_GPIO is not set +# CONFIG_BACKLIGHT_LV5207LP is not set +# CONFIG_BACKLIGHT_BD6107 is not set +# CONFIG_BACKLIGHT_ARCXCNN is not set +# end of Backlight & LCD device support + +CONFIG_HDMI=y + +# +# Console display driver support +# +CONFIG_VGA_CONSOLE=y +CONFIG_DUMMY_CONSOLE=y +CONFIG_DUMMY_CONSOLE_COLUMNS=80 +CONFIG_DUMMY_CONSOLE_ROWS=25 +CONFIG_FRAMEBUFFER_CONSOLE=y +# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set +CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y +CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y +# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set +# end of Console display driver support + +CONFIG_LOGO=y +# CONFIG_LOGO_LINUX_MONO is not set +# CONFIG_LOGO_LINUX_VGA16 is not set +CONFIG_LOGO_LINUX_CLUT224=y +# end of Graphics support + +# CONFIG_DRM_ACCEL is not set +CONFIG_SOUND=m +# CONFIG_SND is not set +CONFIG_HID_SUPPORT=y +CONFIG_HID=y +CONFIG_HID_BATTERY_STRENGTH=y +CONFIG_HIDRAW=y +CONFIG_UHID=m +CONFIG_HID_GENERIC=y + +# +# Special HID drivers +# +CONFIG_HID_A4TECH=m +# CONFIG_HID_ACCUTOUCH is not set +CONFIG_HID_ACRUX=m +# CONFIG_HID_ACRUX_FF is not set +CONFIG_HID_APPLE=m +CONFIG_HID_APPLEIR=m +CONFIG_HID_ASUS=m +CONFIG_HID_AUREAL=m +CONFIG_HID_BELKIN=m +CONFIG_HID_BETOP_FF=m +# CONFIG_HID_BIGBEN_FF is not set +CONFIG_HID_CHERRY=m +CONFIG_HID_CHICONY=m +CONFIG_HID_CORSAIR=m +# CONFIG_HID_COUGAR is not set +# CONFIG_HID_MACALLY is not set +CONFIG_HID_CMEDIA=m +# CONFIG_HID_CP2112 is not set +# CONFIG_HID_CREATIVE_SB0540 is not set +CONFIG_HID_CYPRESS=m +CONFIG_HID_DRAGONRISE=m +# CONFIG_DRAGONRISE_FF is not set +# CONFIG_HID_EMS_FF is not set +CONFIG_HID_ELAN=m +CONFIG_HID_ELECOM=m +CONFIG_HID_ELO=m +# CONFIG_HID_EVISION is not set +CONFIG_HID_EZKEY=m +# CONFIG_HID_FT260 is not set +CONFIG_HID_GEMBIRD=m +CONFIG_HID_GFRM=m +# CONFIG_HID_GLORIOUS is not set +CONFIG_HID_HOLTEK=m +# CONFIG_HOLTEK_FF is not set +# CONFIG_HID_GOOGLE_STADIA_FF is not set +# CONFIG_HID_VIVALDI is not set +CONFIG_HID_GT683R=m +CONFIG_HID_KEYTOUCH=m +CONFIG_HID_KYE=m +CONFIG_HID_UCLOGIC=m +CONFIG_HID_WALTOP=m +# CONFIG_HID_VIEWSONIC is not set +# CONFIG_HID_VRC2 is not set +# CONFIG_HID_XIAOMI is not set +CONFIG_HID_GYRATION=m +CONFIG_HID_ICADE=m +CONFIG_HID_ITE=m +CONFIG_HID_JABRA=m +CONFIG_HID_TWINHAN=m +CONFIG_HID_KENSINGTON=m +CONFIG_HID_LCPOWER=m +CONFIG_HID_LED=m +CONFIG_HID_LENOVO=m +# CONFIG_HID_LETSKETCH is not set +CONFIG_HID_LOGITECH=m +CONFIG_HID_LOGITECH_DJ=m +CONFIG_HID_LOGITECH_HIDPP=m +# CONFIG_LOGITECH_FF is not set +# CONFIG_LOGIRUMBLEPAD2_FF is not set +# CONFIG_LOGIG940_FF is not set +# CONFIG_LOGIWHEELS_FF is not set +CONFIG_HID_MAGICMOUSE=y +# CONFIG_HID_MALTRON is not set +# CONFIG_HID_MAYFLASH is not set +# CONFIG_HID_MEGAWORLD_FF is not set +# CONFIG_HID_REDRAGON is not set +CONFIG_HID_MICROSOFT=m +CONFIG_HID_MONTEREY=m +CONFIG_HID_MULTITOUCH=m +# CONFIG_HID_NINTENDO is not set +CONFIG_HID_NTI=m +CONFIG_HID_NTRIG=y +# CONFIG_HID_NVIDIA_SHIELD is not set +CONFIG_HID_ORTEK=m +CONFIG_HID_PANTHERLORD=m +# CONFIG_PANTHERLORD_FF is not set +CONFIG_HID_PENMOUNT=m +CONFIG_HID_PETALYNX=m +CONFIG_HID_PICOLCD=m +CONFIG_HID_PICOLCD_FB=y +CONFIG_HID_PICOLCD_BACKLIGHT=y +CONFIG_HID_PICOLCD_LCD=y +CONFIG_HID_PICOLCD_LEDS=y +CONFIG_HID_PICOLCD_CIR=y +CONFIG_HID_PLANTRONICS=m +# CONFIG_HID_PXRC is not set +# CONFIG_HID_RAZER is not set +CONFIG_HID_PRIMAX=m +# CONFIG_HID_RETRODE is not set +CONFIG_HID_ROCCAT=m +CONFIG_HID_SAITEK=m +CONFIG_HID_SAMSUNG=m +# CONFIG_HID_SEMITEK is not set +# CONFIG_HID_SIGMAMICRO is not set +CONFIG_HID_SONY=m +CONFIG_SONY_FF=y +CONFIG_HID_SPEEDLINK=m +# CONFIG_HID_STEAM is not set +CONFIG_HID_STEELSERIES=m +CONFIG_HID_SUNPLUS=m +CONFIG_HID_RMI=m +CONFIG_HID_GREENASIA=m +# CONFIG_GREENASIA_FF is not set +CONFIG_HID_HYPERV_MOUSE=m +CONFIG_HID_SMARTJOYPLUS=m +# CONFIG_SMARTJOYPLUS_FF is not set +CONFIG_HID_TIVO=m +CONFIG_HID_TOPSEED=m +# CONFIG_HID_TOPRE is not set +CONFIG_HID_THINGM=m +CONFIG_HID_THRUSTMASTER=m +# CONFIG_THRUSTMASTER_FF is not set +# CONFIG_HID_UDRAW_PS3 is not set +# CONFIG_HID_U2FZERO is not set +CONFIG_HID_WACOM=m +CONFIG_HID_WIIMOTE=m +CONFIG_HID_XINMO=m +CONFIG_HID_ZEROPLUS=m +# CONFIG_ZEROPLUS_FF is not set +CONFIG_HID_ZYDACRON=m +CONFIG_HID_SENSOR_HUB=y +CONFIG_HID_SENSOR_CUSTOM_SENSOR=m +CONFIG_HID_ALPS=m +# CONFIG_HID_MCP2221 is not set +# end of Special HID drivers + +# +# HID-BPF support +# +# CONFIG_HID_BPF is not set +# end of HID-BPF support + +# +# USB HID support +# +CONFIG_USB_HID=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y +# end of USB HID support + +CONFIG_I2C_HID=m +# CONFIG_I2C_HID_ACPI is not set +# CONFIG_I2C_HID_OF is not set + +# +# Intel ISH HID support +# +CONFIG_INTEL_ISH_HID=m +# CONFIG_INTEL_ISH_FIRMWARE_DOWNLOADER is not set +# end of Intel ISH HID support + +# +# AMD SFH HID Support +# +# CONFIG_AMD_SFH_HID is not set +# end of AMD SFH HID Support + +CONFIG_USB_OHCI_LITTLE_ENDIAN=y +CONFIG_USB_SUPPORT=y +CONFIG_USB_COMMON=y +CONFIG_USB_LED_TRIG=y +# CONFIG_USB_ULPI_BUS is not set +# CONFIG_USB_CONN_GPIO is not set +CONFIG_USB_ARCH_HAS_HCD=y +CONFIG_USB=y +CONFIG_USB_PCI=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y + +# +# Miscellaneous USB options +# +CONFIG_USB_DEFAULT_PERSIST=y +# CONFIG_USB_FEW_INIT_RETRIES is not set +# CONFIG_USB_DYNAMIC_MINORS is not set +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_PRODUCTLIST is not set +CONFIG_USB_LEDS_TRIGGER_USBPORT=m +CONFIG_USB_AUTOSUSPEND_DELAY=2 +CONFIG_USB_MON=y + +# +# USB Host Controller Drivers +# +# CONFIG_USB_C67X00_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_DBGCAP=y +CONFIG_USB_XHCI_PCI=y +# CONFIG_USB_XHCI_PCI_RENESAS is not set +# CONFIG_USB_XHCI_PLATFORM is not set +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_EHCI_TT_NEWSCHED=y +CONFIG_USB_EHCI_PCI=y +# CONFIG_USB_EHCI_FSL is not set +# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_OXU210HP_HCD is not set +# CONFIG_USB_ISP116X_HCD is not set +# CONFIG_USB_MAX3421_HCD is not set +CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_PLATFORM is not set +# CONFIG_USB_UHCI_HCD is not set +# CONFIG_USB_SL811_HCD is not set +# CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_BCMA is not set +# CONFIG_USB_HCD_TEST_MODE is not set +# CONFIG_USB_XEN_HCD is not set + +# +# USB Device Class drivers +# +CONFIG_USB_ACM=m +CONFIG_USB_PRINTER=m +CONFIG_USB_WDM=m +CONFIG_USB_TMC=m + +# +# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may +# + +# +# also be needed; see USB_STORAGE Help for more info +# +CONFIG_USB_STORAGE=m +# CONFIG_USB_STORAGE_DEBUG is not set +CONFIG_USB_STORAGE_REALTEK=m +CONFIG_REALTEK_AUTOPM=y +CONFIG_USB_STORAGE_DATAFAB=m +CONFIG_USB_STORAGE_FREECOM=m +CONFIG_USB_STORAGE_ISD200=m +CONFIG_USB_STORAGE_USBAT=m +CONFIG_USB_STORAGE_SDDR09=m +CONFIG_USB_STORAGE_SDDR55=m +CONFIG_USB_STORAGE_JUMPSHOT=m +CONFIG_USB_STORAGE_ALAUDA=m +CONFIG_USB_STORAGE_ONETOUCH=m +CONFIG_USB_STORAGE_KARMA=m +CONFIG_USB_STORAGE_CYPRESS_ATACB=m +CONFIG_USB_STORAGE_ENE_UB6250=m +CONFIG_USB_UAS=m + +# +# USB Imaging devices +# +CONFIG_USB_MDC800=m +CONFIG_USB_MICROTEK=m +# CONFIG_USBIP_CORE is not set + +# +# USB dual-mode controller drivers +# +# CONFIG_USB_CDNS_SUPPORT is not set +# CONFIG_USB_MUSB_HDRC is not set +# CONFIG_USB_DWC3 is not set +# CONFIG_USB_DWC2 is not set +# CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set + +# +# USB port drivers +# +CONFIG_USB_SERIAL=y +CONFIG_USB_SERIAL_CONSOLE=y +CONFIG_USB_SERIAL_GENERIC=y +# CONFIG_USB_SERIAL_SIMPLE is not set +CONFIG_USB_SERIAL_AIRCABLE=m +CONFIG_USB_SERIAL_ARK3116=m +CONFIG_USB_SERIAL_BELKIN=m +CONFIG_USB_SERIAL_CH341=m +CONFIG_USB_SERIAL_WHITEHEAT=m +CONFIG_USB_SERIAL_DIGI_ACCELEPORT=m +CONFIG_USB_SERIAL_CP210X=m +CONFIG_USB_SERIAL_CYPRESS_M8=m +CONFIG_USB_SERIAL_EMPEG=m +CONFIG_USB_SERIAL_FTDI_SIO=m +CONFIG_USB_SERIAL_VISOR=m +CONFIG_USB_SERIAL_IPAQ=m +CONFIG_USB_SERIAL_IR=m +CONFIG_USB_SERIAL_EDGEPORT=m +CONFIG_USB_SERIAL_EDGEPORT_TI=m +# CONFIG_USB_SERIAL_F81232 is not set +CONFIG_USB_SERIAL_F8153X=m +CONFIG_USB_SERIAL_GARMIN=m +CONFIG_USB_SERIAL_IPW=m +CONFIG_USB_SERIAL_IUU=m +CONFIG_USB_SERIAL_KEYSPAN_PDA=m +CONFIG_USB_SERIAL_KEYSPAN=m +CONFIG_USB_SERIAL_KLSI=m +CONFIG_USB_SERIAL_KOBIL_SCT=m +CONFIG_USB_SERIAL_MCT_U232=m +# CONFIG_USB_SERIAL_METRO is not set +CONFIG_USB_SERIAL_MOS7720=m +CONFIG_USB_SERIAL_MOS7715_PARPORT=y +CONFIG_USB_SERIAL_MOS7840=m +CONFIG_USB_SERIAL_MXUPORT=m +CONFIG_USB_SERIAL_NAVMAN=m +CONFIG_USB_SERIAL_PL2303=m +CONFIG_USB_SERIAL_OTI6858=m +CONFIG_USB_SERIAL_QCAUX=m +CONFIG_USB_SERIAL_QUALCOMM=m +CONFIG_USB_SERIAL_SPCP8X5=m +CONFIG_USB_SERIAL_SAFE=m +CONFIG_USB_SERIAL_SAFE_PADDED=y +CONFIG_USB_SERIAL_SIERRAWIRELESS=m +CONFIG_USB_SERIAL_SYMBOL=m +CONFIG_USB_SERIAL_TI=m +CONFIG_USB_SERIAL_CYBERJACK=m +CONFIG_USB_SERIAL_WWAN=m +CONFIG_USB_SERIAL_OPTION=m +CONFIG_USB_SERIAL_OMNINET=m +CONFIG_USB_SERIAL_OPTICON=m +CONFIG_USB_SERIAL_XSENS_MT=m +# CONFIG_USB_SERIAL_WISHBONE is not set +CONFIG_USB_SERIAL_SSU100=m +CONFIG_USB_SERIAL_QT2=m +CONFIG_USB_SERIAL_UPD78F0730=m +# CONFIG_USB_SERIAL_XR is not set +CONFIG_USB_SERIAL_DEBUG=m + +# +# USB Miscellaneous drivers +# +CONFIG_USB_USS720=m +CONFIG_USB_EMI62=m +CONFIG_USB_EMI26=m +CONFIG_USB_ADUTUX=m +CONFIG_USB_SEVSEG=m +CONFIG_USB_LEGOTOWER=m +CONFIG_USB_LCD=m +# CONFIG_USB_CYPRESS_CY7C63 is not set +# CONFIG_USB_CYTHERM is not set +CONFIG_USB_IDMOUSE=m +CONFIG_USB_APPLEDISPLAY=m +# CONFIG_APPLE_MFI_FASTCHARGE is not set +CONFIG_USB_SISUSBVGA=m +CONFIG_USB_LD=m +# CONFIG_USB_TRANCEVIBRATOR is not set +CONFIG_USB_IOWARRIOR=m +# CONFIG_USB_TEST is not set +# CONFIG_USB_EHSET_TEST_FIXTURE is not set +CONFIG_USB_ISIGHTFW=m +# CONFIG_USB_YUREX is not set +CONFIG_USB_EZUSB_FX2=m +# CONFIG_USB_HUB_USB251XB is not set +CONFIG_USB_HSIC_USB3503=m +# CONFIG_USB_HSIC_USB4604 is not set +# CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set +CONFIG_USB_ATM=m +CONFIG_USB_SPEEDTOUCH=m +CONFIG_USB_CXACRU=m +CONFIG_USB_UEAGLEATM=m +CONFIG_USB_XUSBATM=m + +# +# USB Physical Layer drivers +# +# CONFIG_NOP_USB_XCEIV is not set +# CONFIG_USB_GPIO_VBUS is not set +# CONFIG_USB_ISP1301 is not set +# end of USB Physical Layer drivers + +# CONFIG_USB_GADGET is not set +CONFIG_TYPEC=y +CONFIG_TYPEC_TCPM=y +# CONFIG_TYPEC_TCPCI is not set +CONFIG_TYPEC_FUSB302=m +CONFIG_TYPEC_UCSI=y +# CONFIG_UCSI_CCG is not set +CONFIG_UCSI_ACPI=y +# CONFIG_UCSI_STM32G0 is not set +CONFIG_TYPEC_TPS6598X=m +# CONFIG_TYPEC_ANX7411 is not set +# CONFIG_TYPEC_RT1719 is not set +# CONFIG_TYPEC_HD3SS3220 is not set +# CONFIG_TYPEC_STUSB160X is not set +# CONFIG_TYPEC_WUSB3801 is not set + +# +# USB Type-C Multiplexer/DeMultiplexer Switch support +# +# CONFIG_TYPEC_MUX_FSA4480 is not set +# CONFIG_TYPEC_MUX_GPIO_SBU is not set +CONFIG_TYPEC_MUX_PI3USB30532=m +# CONFIG_TYPEC_MUX_NB7VPQ904M is not set +# end of USB Type-C Multiplexer/DeMultiplexer Switch support + +# +# USB Type-C Alternate Mode drivers +# +CONFIG_TYPEC_DP_ALTMODE=m +# CONFIG_TYPEC_NVIDIA_ALTMODE is not set +# end of USB Type-C Alternate Mode drivers + +CONFIG_USB_ROLE_SWITCH=y +CONFIG_USB_ROLES_INTEL_XHCI=y +CONFIG_MMC=m +CONFIG_MMC_BLOCK=m +CONFIG_MMC_BLOCK_MINORS=8 +CONFIG_SDIO_UART=m +# CONFIG_MMC_TEST is not set + +# +# MMC/SD/SDIO Host Controller Drivers +# +# CONFIG_MMC_DEBUG is not set +CONFIG_MMC_SDHCI=m +CONFIG_MMC_SDHCI_IO_ACCESSORS=y +CONFIG_MMC_SDHCI_PCI=m +CONFIG_MMC_RICOH_MMC=y +CONFIG_MMC_SDHCI_ACPI=m +CONFIG_MMC_SDHCI_PLTFM=m +# CONFIG_MMC_SDHCI_F_SDH30 is not set +# CONFIG_MMC_WBSD is not set +CONFIG_MMC_TIFM_SD=m +# CONFIG_MMC_SPI is not set +CONFIG_MMC_CB710=m +CONFIG_MMC_VIA_SDMMC=m +CONFIG_MMC_VUB300=m +CONFIG_MMC_USHC=m +# CONFIG_MMC_USDHI6ROL0 is not set +CONFIG_MMC_REALTEK_PCI=m +CONFIG_MMC_REALTEK_USB=m +CONFIG_MMC_CQHCI=m +# CONFIG_MMC_HSQ is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +# CONFIG_MMC_SDHCI_XENON is not set +# CONFIG_SCSI_UFSHCD is not set +CONFIG_MEMSTICK=m +# CONFIG_MEMSTICK_DEBUG is not set + +# +# MemoryStick drivers +# +# CONFIG_MEMSTICK_UNSAFE_RESUME is not set +CONFIG_MSPRO_BLOCK=m +# CONFIG_MS_BLOCK is not set + +# +# MemoryStick Host Controller Drivers +# +CONFIG_MEMSTICK_TIFM_MS=m +CONFIG_MEMSTICK_JMICRON_38X=m +CONFIG_MEMSTICK_R592=m +CONFIG_MEMSTICK_REALTEK_PCI=m +CONFIG_MEMSTICK_REALTEK_USB=m +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +# CONFIG_LEDS_CLASS_FLASH is not set +# CONFIG_LEDS_CLASS_MULTICOLOR is not set +# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set + +# +# LED drivers +# +# CONFIG_LEDS_APU is not set +# CONFIG_LEDS_AW200XX is not set +CONFIG_LEDS_LM3530=m +# CONFIG_LEDS_LM3532 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_GPIO is not set +CONFIG_LEDS_LP3944=m +# CONFIG_LEDS_LP3952 is not set +# CONFIG_LEDS_LP50XX is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_PCA995X is not set +# CONFIG_LEDS_DAC124S085 is not set +# CONFIG_LEDS_PWM is not set +# CONFIG_LEDS_BD2606MVV is not set +# CONFIG_LEDS_BD2802 is not set +CONFIG_LEDS_INTEL_SS4200=m +# CONFIG_LEDS_LT3593 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set +# CONFIG_LEDS_IS31FL319X is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +CONFIG_LEDS_BLINKM=m +CONFIG_LEDS_MLXCPLD=m +# CONFIG_LEDS_MLXREG is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# Flash and Torch LED drivers +# + +# +# RGB LED drivers +# + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=m +CONFIG_LEDS_TRIGGER_ONESHOT=m +CONFIG_LEDS_TRIGGER_DISK=y +# CONFIG_LEDS_TRIGGER_MTD is not set +CONFIG_LEDS_TRIGGER_HEARTBEAT=m +CONFIG_LEDS_TRIGGER_BACKLIGHT=m +# CONFIG_LEDS_TRIGGER_CPU is not set +# CONFIG_LEDS_TRIGGER_ACTIVITY is not set +CONFIG_LEDS_TRIGGER_DEFAULT_ON=m + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=m +CONFIG_LEDS_TRIGGER_CAMERA=m +# CONFIG_LEDS_TRIGGER_PANIC is not set +# CONFIG_LEDS_TRIGGER_NETDEV is not set +# CONFIG_LEDS_TRIGGER_PATTERN is not set +CONFIG_LEDS_TRIGGER_AUDIO=m +# CONFIG_LEDS_TRIGGER_TTY is not set + +# +# Simple LED drivers +# +# CONFIG_ACCESSIBILITY is not set +CONFIG_INFINIBAND=m +CONFIG_INFINIBAND_USER_MAD=m +CONFIG_INFINIBAND_USER_ACCESS=m +CONFIG_INFINIBAND_USER_MEM=y +CONFIG_INFINIBAND_ON_DEMAND_PAGING=y +CONFIG_INFINIBAND_ADDR_TRANS=y +CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS=y +CONFIG_INFINIBAND_VIRT_DMA=y +CONFIG_INFINIBAND_BNXT_RE=m +CONFIG_INFINIBAND_CXGB4=m +# CONFIG_INFINIBAND_EFA is not set +CONFIG_INFINIBAND_ERDMA=m +CONFIG_INFINIBAND_HFI1=m +# CONFIG_HFI1_DEBUG_SDMA_ORDER is not set +# CONFIG_SDMA_VERBOSITY is not set +# CONFIG_INFINIBAND_IRDMA is not set +CONFIG_MLX4_INFINIBAND=m +CONFIG_MLX5_INFINIBAND=m +# CONFIG_INFINIBAND_MTHCA is not set +# CONFIG_INFINIBAND_OCRDMA is not set +CONFIG_INFINIBAND_QEDR=m +# CONFIG_INFINIBAND_QIB is not set +CONFIG_INFINIBAND_USNIC=m +CONFIG_INFINIBAND_VMWARE_PVRDMA=m +CONFIG_INFINIBAND_RDMAVT=m +CONFIG_RDMA_RXE=m +CONFIG_RDMA_SIW=m +CONFIG_INFINIBAND_IPOIB=m +CONFIG_INFINIBAND_IPOIB_CM=y +CONFIG_INFINIBAND_IPOIB_DEBUG=y +# CONFIG_INFINIBAND_IPOIB_DEBUG_DATA is not set +CONFIG_INFINIBAND_SRP=m +CONFIG_INFINIBAND_SRPT=m +CONFIG_INFINIBAND_ISER=m +CONFIG_INFINIBAND_ISERT=m +# CONFIG_INFINIBAND_RTRS_CLIENT is not set +# CONFIG_INFINIBAND_RTRS_SERVER is not set +CONFIG_INFINIBAND_OPA_VNIC=m +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +CONFIG_EDAC=y +CONFIG_EDAC_LEGACY_SYSFS=y +# CONFIG_EDAC_DEBUG is not set +CONFIG_EDAC_DECODE_MCE=m +CONFIG_EDAC_GHES=y +CONFIG_EDAC_AMD64=m +CONFIG_EDAC_E752X=m +CONFIG_EDAC_I82975X=m +CONFIG_EDAC_I3000=m +CONFIG_EDAC_I3200=m +CONFIG_EDAC_IE31200=m +CONFIG_EDAC_X38=m +CONFIG_EDAC_I5400=m +CONFIG_EDAC_I7CORE=m +CONFIG_EDAC_I5100=m +CONFIG_EDAC_I7300=m +CONFIG_EDAC_SBRIDGE=m +CONFIG_EDAC_SKX=m +CONFIG_EDAC_I10NM=m +CONFIG_EDAC_PND2=m +# CONFIG_EDAC_IGEN6 is not set +CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_HCTOSYS=y +CONFIG_RTC_HCTOSYS_DEVICE="rtc0" +CONFIG_RTC_SYSTOHC=y +CONFIG_RTC_SYSTOHC_DEVICE="rtc0" +# CONFIG_RTC_DEBUG is not set +CONFIG_RTC_NVMEM=y + +# +# RTC interfaces +# +CONFIG_RTC_INTF_SYSFS=y +CONFIG_RTC_INTF_PROC=y +CONFIG_RTC_INTF_DEV=y +# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set +# CONFIG_RTC_DRV_TEST is not set + +# +# I2C RTC drivers +# +# CONFIG_RTC_DRV_ABB5ZES3 is not set +# CONFIG_RTC_DRV_ABEOZ9 is not set +# CONFIG_RTC_DRV_ABX80X is not set +CONFIG_RTC_DRV_DS1307=m +# CONFIG_RTC_DRV_DS1307_CENTURY is not set +CONFIG_RTC_DRV_DS1374=m +# CONFIG_RTC_DRV_DS1374_WDT is not set +CONFIG_RTC_DRV_DS1672=m +CONFIG_RTC_DRV_MAX6900=m +CONFIG_RTC_DRV_RS5C372=m +CONFIG_RTC_DRV_ISL1208=m +CONFIG_RTC_DRV_ISL12022=m +CONFIG_RTC_DRV_X1205=m +CONFIG_RTC_DRV_PCF8523=m +# CONFIG_RTC_DRV_PCF85063 is not set +# CONFIG_RTC_DRV_PCF85363 is not set +CONFIG_RTC_DRV_PCF8563=m +CONFIG_RTC_DRV_PCF8583=m +CONFIG_RTC_DRV_M41T80=m +CONFIG_RTC_DRV_M41T80_WDT=y +CONFIG_RTC_DRV_BQ32K=m +# CONFIG_RTC_DRV_S35390A is not set +CONFIG_RTC_DRV_FM3130=m +# CONFIG_RTC_DRV_RX8010 is not set +CONFIG_RTC_DRV_RX8581=m +CONFIG_RTC_DRV_RX8025=m +CONFIG_RTC_DRV_EM3027=m +# CONFIG_RTC_DRV_RV3028 is not set +# CONFIG_RTC_DRV_RV3032 is not set +# CONFIG_RTC_DRV_RV8803 is not set +# CONFIG_RTC_DRV_SD3078 is not set + +# +# SPI RTC drivers +# +# CONFIG_RTC_DRV_M41T93 is not set +# CONFIG_RTC_DRV_M41T94 is not set +# CONFIG_RTC_DRV_DS1302 is not set +# CONFIG_RTC_DRV_DS1305 is not set +# CONFIG_RTC_DRV_DS1343 is not set +# CONFIG_RTC_DRV_DS1347 is not set +# CONFIG_RTC_DRV_DS1390 is not set +# CONFIG_RTC_DRV_MAX6916 is not set +# CONFIG_RTC_DRV_R9701 is not set +# CONFIG_RTC_DRV_RX4581 is not set +# CONFIG_RTC_DRV_RS5C348 is not set +# CONFIG_RTC_DRV_MAX6902 is not set +# CONFIG_RTC_DRV_PCF2123 is not set +# CONFIG_RTC_DRV_MCP795 is not set +CONFIG_RTC_I2C_AND_SPI=m + +# +# SPI and I2C RTC drivers +# +CONFIG_RTC_DRV_DS3232=m +CONFIG_RTC_DRV_DS3232_HWMON=y +# CONFIG_RTC_DRV_PCF2127 is not set +CONFIG_RTC_DRV_RV3029C2=m +# CONFIG_RTC_DRV_RV3029_HWMON is not set +# CONFIG_RTC_DRV_RX6110 is not set + +# +# Platform RTC drivers +# +CONFIG_RTC_DRV_CMOS=y +CONFIG_RTC_DRV_DS1286=m +CONFIG_RTC_DRV_DS1511=m +CONFIG_RTC_DRV_DS1553=m +# CONFIG_RTC_DRV_DS1685_FAMILY is not set +CONFIG_RTC_DRV_DS1742=m +CONFIG_RTC_DRV_DS2404=m +CONFIG_RTC_DRV_STK17TA8=m +# CONFIG_RTC_DRV_M48T86 is not set +CONFIG_RTC_DRV_M48T35=m +CONFIG_RTC_DRV_M48T59=m +CONFIG_RTC_DRV_MSM6242=m +CONFIG_RTC_DRV_RP5C01=m + +# +# on-CPU RTC drivers +# +# CONFIG_RTC_DRV_FTRTC010 is not set + +# +# HID Sensor RTC drivers +# +# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_DRV_GOLDFISH is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_VIRTUAL_CHANNELS=y +CONFIG_DMA_ACPI=y +# CONFIG_ALTERA_MSGDMA is not set +CONFIG_INTEL_IDMA64=m +CONFIG_INTEL_IDXD_BUS=m +CONFIG_INTEL_IDXD=m +# CONFIG_INTEL_IDXD_COMPAT is not set +CONFIG_INTEL_IDXD_SVM=y +# CONFIG_INTEL_IDXD_PERFMON is not set +CONFIG_INTEL_IOATDMA=m +# CONFIG_PLX_DMA is not set +# CONFIG_XILINX_DMA is not set +# CONFIG_XILINX_XDMA is not set +CONFIG_AMD_PTDMA=m +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +CONFIG_DW_DMAC_CORE=y +CONFIG_DW_DMAC=m +CONFIG_DW_DMAC_PCI=y +# CONFIG_DW_EDMA is not set +CONFIG_HSU_DMA=y +# CONFIG_SF_PDMA is not set +# CONFIG_INTEL_LDMA is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +CONFIG_DMATEST=m +CONFIG_DMA_ENGINE_RAID=y + +# +# DMABUF options +# +CONFIG_SYNC_FILE=y +# CONFIG_SW_SYNC is not set +# CONFIG_UDMABUF is not set +# CONFIG_DMABUF_MOVE_NOTIFY is not set +# CONFIG_DMABUF_DEBUG is not set +# CONFIG_DMABUF_SELFTESTS is not set +# CONFIG_DMABUF_HEAPS is not set +# CONFIG_DMABUF_SYSFS_STATS is not set +# end of DMABUF options + +CONFIG_DCA=m +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_UIO_PDRV_GENIRQ=m +# CONFIG_UIO_DMEM_GENIRQ is not set +CONFIG_UIO_AEC=m +CONFIG_UIO_SERCOS3=m +CONFIG_UIO_PCI_GENERIC=m +# CONFIG_UIO_NETX is not set +# CONFIG_UIO_PRUSS is not set +# CONFIG_UIO_MF624 is not set +CONFIG_UIO_HV_GENERIC=m +CONFIG_VFIO=m +CONFIG_VFIO_GROUP=y +CONFIG_VFIO_CONTAINER=y +CONFIG_VFIO_IOMMU_TYPE1=m +CONFIG_VFIO_NOIOMMU=y +CONFIG_VFIO_VIRQFD=y + +# +# VFIO support for PCI devices +# +CONFIG_VFIO_PCI_CORE=m +CONFIG_VFIO_PCI_MMAP=y +CONFIG_VFIO_PCI_INTX=y +CONFIG_VFIO_PCI=m +# CONFIG_VFIO_PCI_VGA is not set +# CONFIG_VFIO_PCI_IGD is not set +# CONFIG_MLX5_VFIO_PCI is not set +# end of VFIO support for PCI devices + +CONFIG_VFIO_MDEV=m +CONFIG_IRQ_BYPASS_MANAGER=m +CONFIG_VIRT_DRIVERS=y +CONFIG_VMGENID=y +# CONFIG_VBOXGUEST is not set +# CONFIG_NITRO_ENCLAVES is not set +CONFIG_EFI_SECRET=m +CONFIG_SEV_GUEST=m +CONFIG_TDX_GUEST_DRIVER=m +CONFIG_CSV_GUEST=m +CONFIG_VIRTIO_ANCHOR=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_PCI_LIB=y +CONFIG_VIRTIO_PCI_LIB_LEGACY=y +CONFIG_VIRTIO_MENU=y +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_PCI_LEGACY=y +CONFIG_VIRTIO_PMEM=m +CONFIG_VIRTIO_BALLOON=m +CONFIG_VIRTIO_MEM=m +CONFIG_VIRTIO_INPUT=m +CONFIG_VIRTIO_MMIO=y +CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y +CONFIG_VIRTIO_DMA_SHARED_BUFFER=m +# CONFIG_VDPA is not set +CONFIG_VHOST_IOTLB=m +CONFIG_VHOST_TASK=y +CONFIG_VHOST=m +CONFIG_VHOST_MENU=y +CONFIG_VHOST_NET=m +CONFIG_VHOST_SCSI=m +CONFIG_VHOST_VSOCK=m +# CONFIG_VHOST_CROSS_ENDIAN_LEGACY is not set + +# +# Microsoft Hyper-V guest support +# +CONFIG_HYPERV=m +# CONFIG_HYPERV_VTL_MODE is not set +CONFIG_HYPERV_TIMER=y +CONFIG_HYPERV_UTILS=m +CONFIG_HYPERV_BALLOON=m +# end of Microsoft Hyper-V guest support + +# +# Xen driver support +# +# CONFIG_XEN_BALLOON is not set +CONFIG_XEN_DEV_EVTCHN=m +# CONFIG_XEN_BACKEND is not set +CONFIG_XENFS=m +CONFIG_XEN_COMPAT_XENFS=y +CONFIG_XEN_SYS_HYPERVISOR=y +CONFIG_XEN_XENBUS_FRONTEND=y +# CONFIG_XEN_GNTDEV is not set +# CONFIG_XEN_GRANT_DEV_ALLOC is not set +# CONFIG_XEN_GRANT_DMA_ALLOC is not set +# CONFIG_XEN_PVCALLS_FRONTEND is not set +CONFIG_XEN_PRIVCMD=m +CONFIG_XEN_EFI=y +CONFIG_XEN_AUTO_XLATE=y +CONFIG_XEN_ACPI=y +# CONFIG_XEN_UNPOPULATED_ALLOC is not set +# CONFIG_XEN_VIRTIO is not set +# end of Xen driver support + +# CONFIG_GREYBUS is not set +# CONFIG_COMEDI is not set +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set +CONFIG_MELLANOX_PLATFORM=y +CONFIG_MLXREG_HOTPLUG=m +# CONFIG_MLXREG_IO is not set +# CONFIG_MLXREG_LC is not set +# CONFIG_NVSW_SN2201 is not set +CONFIG_SURFACE_PLATFORMS=y +# CONFIG_SURFACE3_WMI is not set +# CONFIG_SURFACE_3_POWER_OPREGION is not set +# CONFIG_SURFACE_GPE is not set +# CONFIG_SURFACE_HOTPLUG is not set +# CONFIG_SURFACE_PRO3_BUTTON is not set +CONFIG_X86_PLATFORM_DEVICES=y +CONFIG_ACPI_WMI=m +CONFIG_WMI_BMOF=m +# CONFIG_HUAWEI_WMI is not set +# CONFIG_UV_SYSFS is not set +CONFIG_MXM_WMI=m +# CONFIG_NVIDIA_WMI_EC_BACKLIGHT is not set +# CONFIG_XIAOMI_WMI is not set +# CONFIG_GIGABYTE_WMI is not set +# CONFIG_YOGABOOK is not set +CONFIG_ACERHDF=m +# CONFIG_ACER_WIRELESS is not set +CONFIG_ACER_WMI=m +# CONFIG_AMD_PMF is not set +# CONFIG_AMD_PMC is not set +# CONFIG_AMD_HSMP is not set +# CONFIG_ADV_SWBUTTON is not set +CONFIG_APPLE_GMUX=m +CONFIG_ASUS_LAPTOP=m +# CONFIG_ASUS_WIRELESS is not set +CONFIG_ASUS_WMI=m +CONFIG_ASUS_NB_WMI=m +# CONFIG_ASUS_TF103C_DOCK is not set +# CONFIG_MERAKI_MX100 is not set +CONFIG_EEEPC_LAPTOP=m +CONFIG_EEEPC_WMI=m +# CONFIG_X86_PLATFORM_DRIVERS_DELL is not set +CONFIG_AMILO_RFKILL=m +CONFIG_FUJITSU_LAPTOP=m +CONFIG_FUJITSU_TABLET=m +# CONFIG_GPD_POCKET_FAN is not set +# CONFIG_X86_PLATFORM_DRIVERS_HP is not set +# CONFIG_WIRELESS_HOTKEY is not set +# CONFIG_IBM_RTL is not set +CONFIG_IDEAPAD_LAPTOP=m +# CONFIG_LENOVO_YMC is not set +CONFIG_SENSORS_HDAPS=m +CONFIG_THINKPAD_ACPI=m +# CONFIG_THINKPAD_ACPI_DEBUGFACILITIES is not set +# CONFIG_THINKPAD_ACPI_DEBUG is not set +# CONFIG_THINKPAD_ACPI_UNSAFE_LEDS is not set +CONFIG_THINKPAD_ACPI_VIDEO=y +CONFIG_THINKPAD_ACPI_HOTKEY_POLL=y +# CONFIG_THINKPAD_LMI is not set +# CONFIG_INTEL_ATOMISP2_PM is not set +CONFIG_INTEL_IFS=m +# CONFIG_INTEL_SAR_INT1092 is not set +CONFIG_INTEL_PMC_CORE=m +CONFIG_INTEL_PMT_CLASS=m +CONFIG_INTEL_PMT_TELEMETRY=m +CONFIG_INTEL_PMT_CRASHLOG=m + +# +# Intel Speed Select Technology interface support +# +CONFIG_INTEL_SPEED_SELECT_TPMI=m +CONFIG_INTEL_SPEED_SELECT_INTERFACE=m +# end of Intel Speed Select Technology interface support + +CONFIG_INTEL_WMI=y +# CONFIG_INTEL_WMI_SBL_FW_UPDATE is not set +CONFIG_INTEL_WMI_THUNDERBOLT=m + +# +# Intel Uncore Frequency Control +# +# CONFIG_INTEL_UNCORE_FREQ_CONTROL is not set +# end of Intel Uncore Frequency Control + +CONFIG_INTEL_HID_EVENT=m +CONFIG_INTEL_VBTN=m +# CONFIG_INTEL_INT0002_VGPIO is not set +CONFIG_INTEL_OAKTRAIL=m +# CONFIG_INTEL_ISHTP_ECLITE is not set +# CONFIG_INTEL_PUNIT_IPC is not set +CONFIG_INTEL_RST=m +# CONFIG_INTEL_SDSI is not set +# CONFIG_INTEL_SMARTCONNECT is not set +CONFIG_INTEL_TPMI=m +CONFIG_INTEL_TURBO_MAX_3=y +CONFIG_INTEL_VSEC=y +# CONFIG_MSI_EC is not set +CONFIG_MSI_LAPTOP=m +CONFIG_MSI_WMI=m +# CONFIG_PCENGINES_APU2 is not set +# CONFIG_BARCO_P50_GPIO is not set +CONFIG_SAMSUNG_LAPTOP=m +CONFIG_SAMSUNG_Q10=m +# CONFIG_ACPI_TOSHIBA is not set +CONFIG_TOSHIBA_BT_RFKILL=m +# CONFIG_TOSHIBA_HAPS is not set +# CONFIG_TOSHIBA_WMI is not set +CONFIG_ACPI_CMPC=m +CONFIG_COMPAL_LAPTOP=m +# CONFIG_LG_LAPTOP is not set +CONFIG_PANASONIC_LAPTOP=m +CONFIG_SONY_LAPTOP=m +CONFIG_SONYPI_COMPAT=y +# CONFIG_SYSTEM76_ACPI is not set +CONFIG_TOPSTAR_LAPTOP=m +# CONFIG_SERIAL_MULTI_INSTANTIATE is not set +CONFIG_MLX_PLATFORM=m +CONFIG_INTEL_IPS=m +# CONFIG_INTEL_SCU_PCI is not set +# CONFIG_INTEL_SCU_PLATFORM is not set +# CONFIG_SIEMENS_SIMATIC_IPC is not set +# CONFIG_WINMATE_FM07_KEYS is not set +# CONFIG_SEL3350_PLATFORM is not set +CONFIG_P2SB=y +CONFIG_HAVE_CLK=y +CONFIG_HAVE_CLK_PREPARE=y +CONFIG_COMMON_CLK=y +# CONFIG_LMK04832 is not set +# CONFIG_COMMON_CLK_MAX9485 is not set +# CONFIG_COMMON_CLK_SI5341 is not set +# CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI544 is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +# CONFIG_COMMON_CLK_CS2000_CP is not set +# CONFIG_COMMON_CLK_PWM is not set +# CONFIG_XILINX_VCU is not set +CONFIG_HWSPINLOCK=y + +# +# Clock Source drivers +# +CONFIG_CLKEVT_I8253=y +CONFIG_I8253_LOCK=y +CONFIG_CLKBLD_I8253=y +# end of Clock Source drivers + +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set +CONFIG_IOMMU_IOVA=y +CONFIG_IOMMU_API=y +CONFIG_IOMMU_SUPPORT=y + +# +# Generic IOMMU Pagetable Support +# +CONFIG_IOMMU_IO_PGTABLE=y +# end of Generic IOMMU Pagetable Support + +# CONFIG_IOMMU_DEBUGFS is not set +# CONFIG_IOMMU_DEFAULT_DMA_STRICT is not set +# CONFIG_IOMMU_DEFAULT_DMA_LAZY is not set +CONFIG_IOMMU_DEFAULT_PASSTHROUGH=y +CONFIG_IOMMU_DMA=y +CONFIG_IOMMU_SVA=y +CONFIG_AMD_IOMMU=y +CONFIG_AMD_IOMMU_V2=m +CONFIG_DMAR_TABLE=y +CONFIG_INTEL_IOMMU=y +CONFIG_INTEL_IOMMU_SVM=y +# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set +CONFIG_INTEL_IOMMU_FLOPPY_WA=y +CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON=y +CONFIG_INTEL_IOMMU_PERF_EVENTS=y +# CONFIG_IOMMUFD is not set +CONFIG_IRQ_REMAP=y +CONFIG_HYPERV_IOMMU=y +# CONFIG_VIRTIO_IOMMU is not set + +# +# Remoteproc drivers +# +# CONFIG_REMOTEPROC is not set +# end of Remoteproc drivers + +# +# Rpmsg drivers +# +# CONFIG_RPMSG_QCOM_GLINK_RPM is not set +# CONFIG_RPMSG_VIRTIO is not set +# end of Rpmsg drivers + +# CONFIG_SOUNDWIRE is not set + +# +# SOC (System On Chip) specific Drivers +# + +# +# Amlogic SoC drivers +# +# end of Amlogic SoC drivers + +# +# Broadcom SoC drivers +# +# end of Broadcom SoC drivers + +# +# NXP/Freescale QorIQ SoC drivers +# +# end of NXP/Freescale QorIQ SoC drivers + +# +# fujitsu SoC drivers +# +# end of fujitsu SoC drivers + +# +# i.MX SoC drivers +# +# end of i.MX SoC drivers + +# +# Enable LiteX SoC Builder specific drivers +# +# end of Enable LiteX SoC Builder specific drivers + +# CONFIG_WPCM450_SOC is not set + +# +# Qualcomm SoC drivers +# +# end of Qualcomm SoC drivers + +# CONFIG_SOC_TI is not set + +# +# Xilinx SoC drivers +# +# end of Xilinx SoC drivers +# end of SOC (System On Chip) specific Drivers + +# CONFIG_PM_DEVFREQ is not set +# CONFIG_EXTCON is not set +# CONFIG_MEMORY is not set +CONFIG_IIO=m +CONFIG_IIO_BUFFER=y +# CONFIG_IIO_BUFFER_CB is not set +# CONFIG_IIO_BUFFER_DMA is not set +# CONFIG_IIO_BUFFER_DMAENGINE is not set +# CONFIG_IIO_BUFFER_HW_CONSUMER is not set +CONFIG_IIO_KFIFO_BUF=m +CONFIG_IIO_TRIGGERED_BUFFER=m +# CONFIG_IIO_CONFIGFS is not set +CONFIG_IIO_TRIGGER=y +CONFIG_IIO_CONSUMERS_PER_TRIGGER=2 +# CONFIG_IIO_SW_DEVICE is not set +# CONFIG_IIO_SW_TRIGGER is not set +# CONFIG_IIO_TRIGGERED_EVENT is not set + +# +# Accelerometers +# +# CONFIG_ADIS16201 is not set +# CONFIG_ADIS16209 is not set +# CONFIG_ADXL313_I2C is not set +# CONFIG_ADXL313_SPI is not set +# CONFIG_ADXL345_I2C is not set +# CONFIG_ADXL345_SPI is not set +# CONFIG_ADXL355_I2C is not set +# CONFIG_ADXL355_SPI is not set +# CONFIG_ADXL367_SPI is not set +# CONFIG_ADXL367_I2C is not set +# CONFIG_ADXL372_SPI is not set +# CONFIG_ADXL372_I2C is not set +# CONFIG_BMA180 is not set +# CONFIG_BMA220 is not set +# CONFIG_BMA400 is not set +# CONFIG_BMC150_ACCEL is not set +# CONFIG_BMI088_ACCEL is not set +# CONFIG_DA280 is not set +# CONFIG_DA311 is not set +# CONFIG_DMARD06 is not set +# CONFIG_DMARD09 is not set +# CONFIG_DMARD10 is not set +# CONFIG_FXLS8962AF_I2C is not set +# CONFIG_FXLS8962AF_SPI is not set +CONFIG_HID_SENSOR_ACCEL_3D=m +# CONFIG_IIO_ST_ACCEL_3AXIS is not set +# CONFIG_IIO_KX022A_SPI is not set +# CONFIG_IIO_KX022A_I2C is not set +# CONFIG_KXSD9 is not set +# CONFIG_KXCJK1013 is not set +# CONFIG_MC3230 is not set +# CONFIG_MMA7455_I2C is not set +# CONFIG_MMA7455_SPI is not set +# CONFIG_MMA7660 is not set +# CONFIG_MMA8452 is not set +# CONFIG_MMA9551 is not set +# CONFIG_MMA9553 is not set +# CONFIG_MSA311 is not set +# CONFIG_MXC4005 is not set +# CONFIG_MXC6255 is not set +# CONFIG_SCA3000 is not set +# CONFIG_SCA3300 is not set +# CONFIG_STK8312 is not set +# CONFIG_STK8BA50 is not set +# end of Accelerometers + +# +# Analog to digital converters +# +# CONFIG_AD4130 is not set +# CONFIG_AD7091R5 is not set +# CONFIG_AD7124 is not set +# CONFIG_AD7192 is not set +# CONFIG_AD7266 is not set +# CONFIG_AD7280 is not set +# CONFIG_AD7291 is not set +# CONFIG_AD7292 is not set +# CONFIG_AD7298 is not set +# CONFIG_AD7476 is not set +# CONFIG_AD7606_IFACE_PARALLEL is not set +# CONFIG_AD7606_IFACE_SPI is not set +# CONFIG_AD7766 is not set +# CONFIG_AD7768_1 is not set +# CONFIG_AD7780 is not set +# CONFIG_AD7791 is not set +# CONFIG_AD7793 is not set +# CONFIG_AD7887 is not set +# CONFIG_AD7923 is not set +# CONFIG_AD7949 is not set +# CONFIG_AD799X is not set +# CONFIG_ENVELOPE_DETECTOR is not set +# CONFIG_HI8435 is not set +# CONFIG_HX711 is not set +# CONFIG_INA2XX_ADC is not set +# CONFIG_LTC2471 is not set +# CONFIG_LTC2485 is not set +# CONFIG_LTC2496 is not set +# CONFIG_LTC2497 is not set +# CONFIG_MAX1027 is not set +# CONFIG_MAX11100 is not set +# CONFIG_MAX1118 is not set +# CONFIG_MAX11205 is not set +# CONFIG_MAX11410 is not set +# CONFIG_MAX1241 is not set +# CONFIG_MAX1363 is not set +# CONFIG_MAX9611 is not set +# CONFIG_MCP320X is not set +# CONFIG_MCP3422 is not set +# CONFIG_MCP3911 is not set +# CONFIG_NAU7802 is not set +# CONFIG_RICHTEK_RTQ6056 is not set +# CONFIG_SD_ADC_MODULATOR is not set +# CONFIG_TI_ADC081C is not set +# CONFIG_TI_ADC0832 is not set +# CONFIG_TI_ADC084S021 is not set +# CONFIG_TI_ADC12138 is not set +# CONFIG_TI_ADC108S102 is not set +# CONFIG_TI_ADC128S052 is not set +# CONFIG_TI_ADC161S626 is not set +# CONFIG_TI_ADS1015 is not set +# CONFIG_TI_ADS7924 is not set +# CONFIG_TI_ADS1100 is not set +# CONFIG_TI_ADS7950 is not set +# CONFIG_TI_ADS8344 is not set +# CONFIG_TI_ADS8688 is not set +# CONFIG_TI_ADS124S08 is not set +# CONFIG_TI_ADS131E08 is not set +# CONFIG_TI_LMP92064 is not set +# CONFIG_TI_TLC4541 is not set +# CONFIG_TI_TSC2046 is not set +# CONFIG_VF610_ADC is not set +# CONFIG_VIPERBOARD_ADC is not set +# CONFIG_XILINX_XADC is not set +# end of Analog to digital converters + +# +# Analog to digital and digital to analog converters +# +# CONFIG_AD74115 is not set +# CONFIG_AD74413R is not set +# end of Analog to digital and digital to analog converters + +# +# Analog Front Ends +# +# CONFIG_IIO_RESCALE is not set +# end of Analog Front Ends + +# +# Amplifiers +# +# CONFIG_AD8366 is not set +# CONFIG_ADA4250 is not set +# CONFIG_HMC425 is not set +# end of Amplifiers + +# +# Capacitance to digital converters +# +# CONFIG_AD7150 is not set +# CONFIG_AD7746 is not set +# end of Capacitance to digital converters + +# +# Chemical Sensors +# +# CONFIG_ATLAS_PH_SENSOR is not set +# CONFIG_ATLAS_EZO_SENSOR is not set +# CONFIG_BME680 is not set +# CONFIG_CCS811 is not set +# CONFIG_IAQCORE is not set +# CONFIG_SCD30_CORE is not set +# CONFIG_SCD4X is not set +# CONFIG_SENSIRION_SGP30 is not set +# CONFIG_SENSIRION_SGP40 is not set +# CONFIG_SPS30_I2C is not set +# CONFIG_SENSEAIR_SUNRISE_CO2 is not set +# CONFIG_VZ89X is not set +# end of Chemical Sensors + +# +# Hid Sensor IIO Common +# +CONFIG_HID_SENSOR_IIO_COMMON=m +CONFIG_HID_SENSOR_IIO_TRIGGER=m +# end of Hid Sensor IIO Common + +# +# IIO SCMI Sensors +# +# end of IIO SCMI Sensors + +# +# SSP Sensor Common +# +# CONFIG_IIO_SSP_SENSORHUB is not set +# end of SSP Sensor Common + +# +# Digital to analog converters +# +# CONFIG_AD3552R is not set +# CONFIG_AD5064 is not set +# CONFIG_AD5360 is not set +# CONFIG_AD5380 is not set +# CONFIG_AD5421 is not set +# CONFIG_AD5446 is not set +# CONFIG_AD5449 is not set +# CONFIG_AD5592R is not set +# CONFIG_AD5593R is not set +# CONFIG_AD5504 is not set +# CONFIG_AD5624R_SPI is not set +# CONFIG_LTC2688 is not set +# CONFIG_AD5686_SPI is not set +# CONFIG_AD5696_I2C is not set +# CONFIG_AD5755 is not set +# CONFIG_AD5758 is not set +# CONFIG_AD5761 is not set +# CONFIG_AD5764 is not set +# CONFIG_AD5766 is not set +# CONFIG_AD5770R is not set +# CONFIG_AD5791 is not set +# CONFIG_AD7293 is not set +# CONFIG_AD7303 is not set +# CONFIG_AD8801 is not set +# CONFIG_DPOT_DAC is not set +# CONFIG_DS4424 is not set +# CONFIG_LTC1660 is not set +# CONFIG_LTC2632 is not set +# CONFIG_M62332 is not set +# CONFIG_MAX517 is not set +# CONFIG_MAX5522 is not set +# CONFIG_MAX5821 is not set +# CONFIG_MCP4725 is not set +# CONFIG_MCP4728 is not set +# CONFIG_MCP4922 is not set +# CONFIG_TI_DAC082S085 is not set +# CONFIG_TI_DAC5571 is not set +# CONFIG_TI_DAC7311 is not set +# CONFIG_TI_DAC7612 is not set +# CONFIG_VF610_DAC is not set +# end of Digital to analog converters + +# +# IIO dummy driver +# +# end of IIO dummy driver + +# +# Filters +# +# CONFIG_ADMV8818 is not set +# end of Filters + +# +# Frequency Synthesizers DDS/PLL +# + +# +# Clock Generator/Distribution +# +# CONFIG_AD9523 is not set +# end of Clock Generator/Distribution + +# +# Phase-Locked Loop (PLL) frequency synthesizers +# +# CONFIG_ADF4350 is not set +# CONFIG_ADF4371 is not set +# CONFIG_ADF4377 is not set +# CONFIG_ADMV1013 is not set +# CONFIG_ADMV1014 is not set +# CONFIG_ADMV4420 is not set +# CONFIG_ADRF6780 is not set +# end of Phase-Locked Loop (PLL) frequency synthesizers +# end of Frequency Synthesizers DDS/PLL + +# +# Digital gyroscope sensors +# +# CONFIG_ADIS16080 is not set +# CONFIG_ADIS16130 is not set +# CONFIG_ADIS16136 is not set +# CONFIG_ADIS16260 is not set +# CONFIG_ADXRS290 is not set +# CONFIG_ADXRS450 is not set +# CONFIG_BMG160 is not set +# CONFIG_FXAS21002C is not set +CONFIG_HID_SENSOR_GYRO_3D=m +# CONFIG_MPU3050_I2C is not set +# CONFIG_IIO_ST_GYRO_3AXIS is not set +# CONFIG_ITG3200 is not set +# end of Digital gyroscope sensors + +# +# Health Sensors +# + +# +# Heart Rate Monitors +# +# CONFIG_AFE4403 is not set +# CONFIG_AFE4404 is not set +# CONFIG_MAX30100 is not set +# CONFIG_MAX30102 is not set +# end of Heart Rate Monitors +# end of Health Sensors + +# +# Humidity sensors +# +# CONFIG_AM2315 is not set +# CONFIG_DHT11 is not set +# CONFIG_HDC100X is not set +# CONFIG_HDC2010 is not set +CONFIG_HID_SENSOR_HUMIDITY=m +# CONFIG_HTS221 is not set +# CONFIG_HTU21 is not set +# CONFIG_SI7005 is not set +# CONFIG_SI7020 is not set +# end of Humidity sensors + +# +# Inertial measurement units +# +# CONFIG_ADIS16400 is not set +# CONFIG_ADIS16460 is not set +# CONFIG_ADIS16475 is not set +# CONFIG_ADIS16480 is not set +# CONFIG_BMI160_I2C is not set +# CONFIG_BMI160_SPI is not set +# CONFIG_BOSCH_BNO055_I2C is not set +# CONFIG_FXOS8700_I2C is not set +# CONFIG_FXOS8700_SPI is not set +# CONFIG_KMX61 is not set +# CONFIG_INV_ICM42600_I2C is not set +# CONFIG_INV_ICM42600_SPI is not set +# CONFIG_INV_MPU6050_I2C is not set +# CONFIG_INV_MPU6050_SPI is not set +# CONFIG_IIO_ST_LSM6DSX is not set +# CONFIG_IIO_ST_LSM9DS0 is not set +# end of Inertial measurement units + +# +# Light sensors +# +# CONFIG_ACPI_ALS is not set +# CONFIG_ADJD_S311 is not set +# CONFIG_ADUX1020 is not set +# CONFIG_AL3010 is not set +# CONFIG_AL3320A is not set +# CONFIG_APDS9300 is not set +# CONFIG_APDS9960 is not set +# CONFIG_AS73211 is not set +# CONFIG_BH1750 is not set +# CONFIG_BH1780 is not set +# CONFIG_CM32181 is not set +# CONFIG_CM3232 is not set +# CONFIG_CM3323 is not set +# CONFIG_CM3605 is not set +# CONFIG_CM36651 is not set +# CONFIG_GP2AP002 is not set +# CONFIG_GP2AP020A00F is not set +# CONFIG_SENSORS_ISL29018 is not set +# CONFIG_SENSORS_ISL29028 is not set +# CONFIG_ISL29125 is not set +CONFIG_HID_SENSOR_ALS=m +CONFIG_HID_SENSOR_PROX=m +# CONFIG_JSA1212 is not set +# CONFIG_ROHM_BU27008 is not set +# CONFIG_ROHM_BU27034 is not set +# CONFIG_RPR0521 is not set +# CONFIG_LTR501 is not set +# CONFIG_LTRF216A is not set +# CONFIG_LV0104CS is not set +# CONFIG_MAX44000 is not set +# CONFIG_MAX44009 is not set +# CONFIG_NOA1305 is not set +# CONFIG_OPT3001 is not set +# CONFIG_OPT4001 is not set +# CONFIG_PA12203001 is not set +# CONFIG_SI1133 is not set +# CONFIG_SI1145 is not set +# CONFIG_STK3310 is not set +# CONFIG_ST_UVIS25 is not set +# CONFIG_TCS3414 is not set +# CONFIG_TCS3472 is not set +# CONFIG_SENSORS_TSL2563 is not set +# CONFIG_TSL2583 is not set +# CONFIG_TSL2591 is not set +# CONFIG_TSL2772 is not set +# CONFIG_TSL4531 is not set +# CONFIG_US5182D is not set +# CONFIG_VCNL4000 is not set +# CONFIG_VCNL4035 is not set +# CONFIG_VEML6030 is not set +# CONFIG_VEML6070 is not set +# CONFIG_VL6180 is not set +# CONFIG_ZOPT2201 is not set +# end of Light sensors + +# +# Magnetometer sensors +# +# CONFIG_AK8974 is not set +# CONFIG_AK8975 is not set +# CONFIG_AK09911 is not set +# CONFIG_BMC150_MAGN_I2C is not set +# CONFIG_BMC150_MAGN_SPI is not set +# CONFIG_MAG3110 is not set +CONFIG_HID_SENSOR_MAGNETOMETER_3D=m +# CONFIG_MMC35240 is not set +# CONFIG_IIO_ST_MAGN_3AXIS is not set +# CONFIG_SENSORS_HMC5843_I2C is not set +# CONFIG_SENSORS_HMC5843_SPI is not set +# CONFIG_SENSORS_RM3100_I2C is not set +# CONFIG_SENSORS_RM3100_SPI is not set +# CONFIG_TI_TMAG5273 is not set +# CONFIG_YAMAHA_YAS530 is not set +# end of Magnetometer sensors + +# +# Multiplexers +# +# CONFIG_IIO_MUX is not set +# end of Multiplexers + +# +# Inclinometer sensors +# +CONFIG_HID_SENSOR_INCLINOMETER_3D=m +CONFIG_HID_SENSOR_DEVICE_ROTATION=m +# end of Inclinometer sensors + +# +# Triggers - standalone +# +# CONFIG_IIO_INTERRUPT_TRIGGER is not set +# CONFIG_IIO_SYSFS_TRIGGER is not set +# end of Triggers - standalone + +# +# Linear and angular position sensors +# +# CONFIG_HID_SENSOR_CUSTOM_INTEL_HINGE is not set +# end of Linear and angular position sensors + +# +# Digital potentiometers +# +# CONFIG_AD5110 is not set +# CONFIG_AD5272 is not set +# CONFIG_DS1803 is not set +# CONFIG_MAX5432 is not set +# CONFIG_MAX5481 is not set +# CONFIG_MAX5487 is not set +# CONFIG_MCP4018 is not set +# CONFIG_MCP4131 is not set +# CONFIG_MCP4531 is not set +# CONFIG_MCP41010 is not set +# CONFIG_TPL0102 is not set +# CONFIG_X9250 is not set +# end of Digital potentiometers + +# +# Digital potentiostats +# +# CONFIG_LMP91000 is not set +# end of Digital potentiostats + +# +# Pressure sensors +# +# CONFIG_ABP060MG is not set +# CONFIG_BMP280 is not set +# CONFIG_DLHL60D is not set +# CONFIG_DPS310 is not set +CONFIG_HID_SENSOR_PRESS=m +# CONFIG_HP03 is not set +# CONFIG_ICP10100 is not set +# CONFIG_MPL115_I2C is not set +# CONFIG_MPL115_SPI is not set +# CONFIG_MPL3115 is not set +# CONFIG_MPRLS0025PA is not set +# CONFIG_MS5611 is not set +# CONFIG_MS5637 is not set +# CONFIG_IIO_ST_PRESS is not set +# CONFIG_T5403 is not set +# CONFIG_HP206C is not set +# CONFIG_ZPA2326 is not set +# end of Pressure sensors + +# +# Lightning sensors +# +# CONFIG_AS3935 is not set +# end of Lightning sensors + +# +# Proximity and distance sensors +# +# CONFIG_IRSD200 is not set +# CONFIG_ISL29501 is not set +# CONFIG_LIDAR_LITE_V2 is not set +# CONFIG_MB1232 is not set +# CONFIG_PING is not set +# CONFIG_RFD77402 is not set +# CONFIG_SRF04 is not set +# CONFIG_SX9310 is not set +# CONFIG_SX9324 is not set +# CONFIG_SX9360 is not set +# CONFIG_SX9500 is not set +# CONFIG_SRF08 is not set +# CONFIG_VCNL3020 is not set +# CONFIG_VL53L0X_I2C is not set +# end of Proximity and distance sensors + +# +# Resolver to digital converters +# +# CONFIG_AD2S90 is not set +# CONFIG_AD2S1200 is not set +# end of Resolver to digital converters + +# +# Temperature sensors +# +# CONFIG_LTC2983 is not set +# CONFIG_MAXIM_THERMOCOUPLE is not set +CONFIG_HID_SENSOR_TEMP=m +# CONFIG_MLX90614 is not set +# CONFIG_MLX90632 is not set +# CONFIG_TMP006 is not set +# CONFIG_TMP007 is not set +# CONFIG_TMP117 is not set +# CONFIG_TSYS01 is not set +# CONFIG_TSYS02D is not set +# CONFIG_MAX30208 is not set +# CONFIG_MAX31856 is not set +# CONFIG_MAX31865 is not set +# end of Temperature sensors + +CONFIG_NTB=m +# CONFIG_NTB_MSI is not set +# CONFIG_NTB_AMD is not set +# CONFIG_NTB_IDT is not set +# CONFIG_NTB_INTEL is not set +# CONFIG_NTB_EPF is not set +# CONFIG_NTB_SWITCHTEC is not set +# CONFIG_NTB_PINGPONG is not set +# CONFIG_NTB_TOOL is not set +# CONFIG_NTB_PERF is not set +# CONFIG_NTB_TRANSPORT is not set +CONFIG_PWM=y +CONFIG_PWM_SYSFS=y +# CONFIG_PWM_DEBUG is not set +# CONFIG_PWM_CLK is not set +# CONFIG_PWM_DWC is not set +CONFIG_PWM_LPSS=m +CONFIG_PWM_LPSS_PCI=m +CONFIG_PWM_LPSS_PLATFORM=m +# CONFIG_PWM_PCA9685 is not set + +# +# IRQ chip support +# +# end of IRQ chip support + +# CONFIG_IPACK_BUS is not set +CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_TI_SYSCON is not set +# CONFIG_RESET_TI_TPS380X is not set + +# +# PHY Subsystem +# +# CONFIG_GENERIC_PHY is not set +# CONFIG_USB_LGM_PHY is not set +# CONFIG_PHY_CAN_TRANSCEIVER is not set + +# +# PHY drivers for Broadcom platforms +# +# CONFIG_BCM_KONA_USB2_PHY is not set +# end of PHY drivers for Broadcom platforms + +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set +# CONFIG_PHY_CPCAP_USB is not set +# CONFIG_PHY_INTEL_LGM_EMMC is not set +# end of PHY Subsystem + +CONFIG_POWERCAP=y +CONFIG_INTEL_RAPL_CORE=m +CONFIG_INTEL_RAPL=m +# CONFIG_INTEL_RAPL_TPMI is not set +CONFIG_IDLE_INJECT=y +# CONFIG_MCB is not set + +# +# Performance monitor support +# +# CONFIG_DWC_PCIE_PMU is not set +# end of Performance monitor support + +CONFIG_RAS=y +CONFIG_RAS_CEC=y +# CONFIG_RAS_CEC_DEBUG is not set +# CONFIG_USB4 is not set + +# +# Android +# +# CONFIG_ANDROID_BINDER_IPC is not set +# end of Android + +CONFIG_LIBNVDIMM=y +CONFIG_BLK_DEV_PMEM=y +CONFIG_ND_CLAIM=y +CONFIG_ND_BTT=y +CONFIG_BTT=y +CONFIG_ND_PFN=y +CONFIG_NVDIMM_PFN=y +CONFIG_NVDIMM_DAX=y +CONFIG_NVDIMM_KEYS=y +# CONFIG_NVDIMM_SECURITY_TEST is not set +CONFIG_DAX=y +CONFIG_DEV_DAX=y +CONFIG_DEV_DAX_PMEM=y +CONFIG_DEV_DAX_HMEM=y +CONFIG_DEV_DAX_CXL=m +CONFIG_DEV_DAX_HMEM_DEVICES=y +CONFIG_DEV_DAX_KMEM=y +CONFIG_NVMEM=y +CONFIG_NVMEM_SYSFS=y + +# +# Layout Types +# +# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set +# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set +# end of Layout Types + +# CONFIG_NVMEM_RMEM is not set + +# +# HW tracing support +# +CONFIG_STM=m +CONFIG_STM_PROTO_BASIC=m +CONFIG_STM_PROTO_SYS_T=m +CONFIG_STM_DUMMY=m +CONFIG_STM_SOURCE_CONSOLE=m +CONFIG_STM_SOURCE_HEARTBEAT=m +CONFIG_STM_SOURCE_FTRACE=m +CONFIG_INTEL_TH=m +CONFIG_INTEL_TH_PCI=m +CONFIG_INTEL_TH_ACPI=m +CONFIG_INTEL_TH_GTH=m +CONFIG_INTEL_TH_STH=m +CONFIG_INTEL_TH_MSU=m +CONFIG_INTEL_TH_PTI=m +# CONFIG_INTEL_TH_DEBUG is not set +# end of HW tracing support + +# CONFIG_FPGA is not set +CONFIG_TEE=m +CONFIG_AMDTEE=m +# CONFIG_SIOX is not set +# CONFIG_SLIMBUS is not set +# CONFIG_INTERCONNECT is not set +# CONFIG_COUNTER is not set +# CONFIG_MOST is not set +# CONFIG_PECI is not set +# CONFIG_HTE is not set +# end of Device Drivers + +# +# File systems +# +CONFIG_DCACHE_WORD_ACCESS=y +# CONFIG_VALIDATE_FS_PARSER is not set +CONFIG_FS_IOMAP=y +CONFIG_BUFFER_HEAD=y +CONFIG_LEGACY_DIRECT_IO=y +# CONFIG_EXT2_FS is not set +CONFIG_EXT3_FS=m +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set +CONFIG_EXT4_FS=y +CONFIG_EXT4_USE_FOR_EXT2=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y +# CONFIG_EXT4_DEBUG is not set +CONFIG_JBD2=y +# CONFIG_JBD2_DEBUG is not set +CONFIG_FS_MBCACHE=y +# CONFIG_REISERFS_FS is not set +# CONFIG_JFS_FS is not set +CONFIG_XFS_FS=m +CONFIG_XFS_SUPPORT_V4=y +CONFIG_XFS_SUPPORT_ASCII_CI=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +# CONFIG_XFS_RT is not set +# CONFIG_XFS_ONLINE_SCRUB is not set +# CONFIG_XFS_WARN is not set +# CONFIG_XFS_DEBUG is not set +CONFIG_GFS2_FS=m +CONFIG_GFS2_FS_LOCKING_DLM=y +# CONFIG_OCFS2_FS is not set +CONFIG_BTRFS_FS=m +# CONFIG_BTRFS_FS_POSIX_ACL is not set +# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set +# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set +# CONFIG_BTRFS_DEBUG is not set +# CONFIG_BTRFS_ASSERT is not set +# CONFIG_BTRFS_FS_REF_VERIFY is not set +# CONFIG_NILFS2_FS is not set +# CONFIG_F2FS_FS is not set +# CONFIG_ZONEFS_FS is not set +CONFIG_FS_DAX=y +CONFIG_FS_DAX_PMD=y +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y +CONFIG_EXPORTFS_BLOCK_OPS=y +CONFIG_FILE_LOCKING=y +# CONFIG_FS_ENCRYPTION is not set +# CONFIG_FS_VERITY is not set +CONFIG_FSNOTIFY=y +CONFIG_DNOTIFY=y +CONFIG_INOTIFY_USER=y +CONFIG_FANOTIFY=y +CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +CONFIG_QUOTA=y +CONFIG_QUOTA_NETLINK_INTERFACE=y +# CONFIG_QUOTA_DEBUG is not set +CONFIG_QUOTA_TREE=y +# CONFIG_QFMT_V1 is not set +CONFIG_QFMT_V2=y +CONFIG_QUOTACTL=y +CONFIG_AUTOFS_FS=y +CONFIG_FUSE_FS=m +CONFIG_CUSE=m +CONFIG_VIRTIO_FS=m +CONFIG_FUSE_DAX=y +CONFIG_VIRT_FUSE=m +CONFIG_OVERLAY_FS=m +CONFIG_OVERLAY_FS_REDIRECT_DIR=y +CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW=y +CONFIG_OVERLAY_FS_INDEX=y +# CONFIG_OVERLAY_FS_NFS_EXPORT is not set +# CONFIG_OVERLAY_FS_XINO_AUTO is not set +# CONFIG_OVERLAY_FS_METACOPY is not set +# CONFIG_OVERLAY_FS_DEBUG is not set + +# +# Caches +# +CONFIG_NETFS_SUPPORT=m +CONFIG_NETFS_STATS=y +CONFIG_FSCACHE=m +CONFIG_FSCACHE_STATS=y +# CONFIG_FSCACHE_DEBUG is not set +CONFIG_CACHEFILES=m +# CONFIG_CACHEFILES_DEBUG is not set +# CONFIG_CACHEFILES_ERROR_INJECTION is not set +CONFIG_CACHEFILES_ONDEMAND=y +# end of Caches + +# +# CD-ROM/DVD Filesystems +# +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +# end of CD-ROM/DVD Filesystems + +# +# DOS/FAT/EXFAT/NT Filesystems +# +CONFIG_FAT_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_FAT_DEFAULT_CODEPAGE=437 +CONFIG_FAT_DEFAULT_IOCHARSET="ascii" +# CONFIG_FAT_DEFAULT_UTF8 is not set +# CONFIG_EXFAT_FS is not set +# CONFIG_NTFS_FS is not set +CONFIG_NTFS3_FS=m +# CONFIG_NTFS3_64BIT_CLUSTER is not set +# CONFIG_NTFS3_LZX_XPRESS is not set +# CONFIG_NTFS3_FS_POSIX_ACL is not set +# end of DOS/FAT/EXFAT/NT Filesystems + +# +# Pseudo filesystems +# +CONFIG_PROC_FS=y +CONFIG_PROC_KCORE=y +CONFIG_PROC_VMCORE=y +CONFIG_PROC_VMCORE_DEVICE_DUMP=y +CONFIG_PROC_SYSCTL=y +CONFIG_PROC_PAGE_MONITOR=y +CONFIG_PROC_CHILDREN=y +CONFIG_PROC_PID_ARCH_STATUS=y +CONFIG_PROC_CPU_RESCTRL=y +CONFIG_KERNFS=y +CONFIG_SYSFS=y +CONFIG_TMPFS=y +CONFIG_TMPFS_POSIX_ACL=y +CONFIG_TMPFS_XATTR=y +# CONFIG_TMPFS_INODE64 is not set +# CONFIG_TMPFS_QUOTA is not set +CONFIG_HUGETLBFS=y +CONFIG_HUGETLB_PAGE=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP=y +CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y +CONFIG_CONFIGFS_FS=y +CONFIG_EFIVAR_FS=y +# end of Pseudo filesystems + +CONFIG_MISC_FILESYSTEMS=y +# CONFIG_ORANGEFS_FS is not set +# CONFIG_ADFS_FS is not set +# CONFIG_AFFS_FS is not set +# CONFIG_ECRYPT_FS is not set +# CONFIG_HFS_FS is not set +# CONFIG_HFSPLUS_FS is not set +# CONFIG_BEFS_FS is not set +# CONFIG_BFS_FS is not set +# CONFIG_EFS_FS is not set +# CONFIG_JFFS2_FS is not set +# CONFIG_UBIFS_FS is not set +CONFIG_CRAMFS=m +CONFIG_CRAMFS_BLOCKDEV=y +# CONFIG_CRAMFS_MTD is not set +CONFIG_SQUASHFS=m +# CONFIG_SQUASHFS_FILE_CACHE is not set +CONFIG_SQUASHFS_FILE_DIRECT=y +CONFIG_SQUASHFS_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set +CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set +# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set +CONFIG_SQUASHFS_XATTR=y +CONFIG_SQUASHFS_ZLIB=y +# CONFIG_SQUASHFS_LZ4 is not set +CONFIG_SQUASHFS_LZO=y +CONFIG_SQUASHFS_XZ=y +# CONFIG_SQUASHFS_ZSTD is not set +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set +# CONFIG_SQUASHFS_EMBEDDED is not set +CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_VXFS_FS is not set +# CONFIG_MINIX_FS is not set +# CONFIG_OMFS_FS is not set +# CONFIG_HPFS_FS is not set +# CONFIG_QNX4FS_FS is not set +# CONFIG_QNX6FS_FS is not set +# CONFIG_ROMFS_FS is not set +CONFIG_PSTORE=y +CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240 +CONFIG_PSTORE_COMPRESS=y +CONFIG_PSTORE_CONSOLE=y +# CONFIG_PSTORE_PMSG is not set +# CONFIG_PSTORE_FTRACE is not set +CONFIG_PSTORE_RAM=y +# CONFIG_PSTORE_BLK is not set +# CONFIG_SYSV_FS is not set +# CONFIG_UFS_FS is not set +CONFIG_EROFS_FS=m +# CONFIG_EROFS_FS_DEBUG is not set +CONFIG_EROFS_FS_XATTR=y +CONFIG_EROFS_FS_POSIX_ACL=y +CONFIG_EROFS_FS_SECURITY=y +CONFIG_EROFS_FS_ZIP=y +CONFIG_EROFS_FS_ZIP_LZMA=y +CONFIG_EROFS_FS_ZIP_DEFLATE=y +CONFIG_EROFS_FS_ONDEMAND=y +# CONFIG_EROFS_FS_PCPU_KTHREAD is not set +CONFIG_NETWORK_FILESYSTEMS=y +CONFIG_NFS_FS=m +# CONFIG_NFS_V2 is not set +CONFIG_NFS_V3=m +CONFIG_NFS_V3_ACL=y +CONFIG_NFS_V4=m +# CONFIG_NFS_SWAP is not set +CONFIG_NFS_V4_1=y +CONFIG_NFS_V4_2=y +CONFIG_PNFS_FILE_LAYOUT=m +CONFIG_PNFS_BLOCK=m +CONFIG_PNFS_FLEXFILE_LAYOUT=m +CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" +# CONFIG_NFS_V4_1_MIGRATION is not set +CONFIG_NFS_V4_SECURITY_LABEL=y +CONFIG_NFS_FSCACHE=y +# CONFIG_NFS_USE_LEGACY_DNS is not set +CONFIG_NFS_USE_KERNEL_DNS=y +CONFIG_NFS_DEBUG=y +CONFIG_NFS_DISABLE_UDP_SUPPORT=y +# CONFIG_NFS_V4_2_READ_PLUS is not set +CONFIG_NFSD=m +# CONFIG_NFSD_V2 is not set +CONFIG_NFSD_V3_ACL=y +CONFIG_NFSD_V4=y +CONFIG_NFSD_PNFS=y +# CONFIG_NFSD_BLOCKLAYOUT is not set +CONFIG_NFSD_SCSILAYOUT=y +# CONFIG_NFSD_FLEXFILELAYOUT is not set +# CONFIG_NFSD_V4_2_INTER_SSC is not set +CONFIG_NFSD_V4_SECURITY_LABEL=y +CONFIG_GRACE_PERIOD=m +CONFIG_LOCKD=m +CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=m +CONFIG_NFS_COMMON=y +CONFIG_NFS_V4_2_SSC_HELPER=y +CONFIG_SUNRPC=m +CONFIG_SUNRPC_GSS=m +CONFIG_SUNRPC_BACKCHANNEL=y +CONFIG_RPCSEC_GSS_KRB5=m +CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA is not set +# CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 is not set +CONFIG_SUNRPC_DEBUG=y +CONFIG_SUNRPC_XPRT_RDMA=m +CONFIG_CEPH_FS=m +# CONFIG_CEPH_FSCACHE is not set +CONFIG_CEPH_FS_POSIX_ACL=y +# CONFIG_CEPH_FS_SECURITY_LABEL is not set +CONFIG_CIFS=m +# CONFIG_CIFS_STATS2 is not set +CONFIG_CIFS_ALLOW_INSECURE_LEGACY=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_DEBUG=y +# CONFIG_CIFS_DEBUG2 is not set +# CONFIG_CIFS_DEBUG_DUMP_KEYS is not set +CONFIG_CIFS_DFS_UPCALL=y +# CONFIG_CIFS_SWN_UPCALL is not set +# CONFIG_CIFS_SMB_DIRECT is not set +# CONFIG_CIFS_FSCACHE is not set +# CONFIG_SMB_SERVER is not set +CONFIG_SMBFS=m +# CONFIG_CODA_FS is not set +# CONFIG_AFS_FS is not set +CONFIG_NLS=y +CONFIG_NLS_DEFAULT="utf8" +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_NLS_MAC_ROMAN=m +CONFIG_NLS_MAC_CELTIC=m +CONFIG_NLS_MAC_CENTEURO=m +CONFIG_NLS_MAC_CROATIAN=m +CONFIG_NLS_MAC_CYRILLIC=m +CONFIG_NLS_MAC_GAELIC=m +CONFIG_NLS_MAC_GREEK=m +CONFIG_NLS_MAC_ICELAND=m +CONFIG_NLS_MAC_INUIT=m +CONFIG_NLS_MAC_ROMANIAN=m +CONFIG_NLS_MAC_TURKISH=m +CONFIG_NLS_UTF8=m +CONFIG_NLS_UCS2_UTILS=m +CONFIG_DLM=m +CONFIG_DLM_DEBUG=y +# CONFIG_UNICODE is not set +CONFIG_IO_WQ=y +# end of File systems + +# +# Security options +# +CONFIG_KEYS=y +# CONFIG_KEYS_REQUEST_CACHE is not set +CONFIG_PERSISTENT_KEYRINGS=y +CONFIG_TRUSTED_KEYS=y +CONFIG_TRUSTED_KEYS_TPM=y +CONFIG_ENCRYPTED_KEYS=y +# CONFIG_USER_DECRYPTED_DATA is not set +# CONFIG_KEY_DH_OPERATIONS is not set +# CONFIG_SECURITY_DMESG_RESTRICT is not set +CONFIG_SECURITY=y +CONFIG_SECURITYFS=y +CONFIG_SECURITY_NETWORK=y +CONFIG_SECURITY_INFINIBAND=y +CONFIG_SECURITY_NETWORK_XFRM=y +CONFIG_SECURITY_PATH=y +CONFIG_INTEL_TXT=y +CONFIG_LSM_MMAP_MIN_ADDR=65535 +CONFIG_HARDENED_USERCOPY=y +CONFIG_FORTIFY_SOURCE=y +# CONFIG_STATIC_USERMODEHELPER is not set +CONFIG_SECURITY_SELINUX=y +CONFIG_SECURITY_SELINUX_BOOTPARAM=y +CONFIG_SECURITY_SELINUX_DEVELOP=y +CONFIG_SECURITY_SELINUX_AVC_STATS=y +CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS=9 +CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE=256 +# CONFIG_SECURITY_SELINUX_DEBUG is not set +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +# CONFIG_SECURITY_SAFESETID is not set +# CONFIG_SECURITY_LOCKDOWN_LSM is not set +# CONFIG_SECURITY_LANDLOCK is not set +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +CONFIG_INTEGRITY_PLATFORM_KEYRING=y +# CONFIG_INTEGRITY_MACHINE_KEYRING is not set +CONFIG_LOAD_UEFI_KEYS=y +CONFIG_INTEGRITY_AUDIT=y +CONFIG_IMA=y +# CONFIG_IMA_KEXEC is not set +CONFIG_IMA_MEASURE_PCR_IDX=10 +CONFIG_IMA_LSM_RULES=y +# CONFIG_IMA_NG_TEMPLATE is not set +CONFIG_IMA_SIG_TEMPLATE=y +CONFIG_IMA_DEFAULT_TEMPLATE="ima-sig" +# CONFIG_IMA_DEFAULT_HASH_SHA1 is not set +CONFIG_IMA_DEFAULT_HASH_SHA256=y +# CONFIG_IMA_DEFAULT_HASH_SHA512 is not set +# CONFIG_IMA_DEFAULT_HASH_SM3 is not set +CONFIG_IMA_DEFAULT_HASH="sha256" +CONFIG_IMA_WRITE_POLICY=y +CONFIG_IMA_READ_POLICY=y +CONFIG_IMA_APPRAISE=y +# CONFIG_IMA_ARCH_POLICY is not set +CONFIG_IMA_APPRAISE_BUILD_POLICY=y +# CONFIG_IMA_APPRAISE_REQUIRE_FIRMWARE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_KEXEC_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_MODULE_SIGS is not set +# CONFIG_IMA_APPRAISE_REQUIRE_POLICY_SIGS is not set +CONFIG_IMA_APPRAISE_BOOTPARAM=y +# CONFIG_IMA_APPRAISE_MODSIG is not set +CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY=y +CONFIG_IMA_BLACKLIST_KEYRING=y +CONFIG_IMA_LOAD_X509=y +CONFIG_IMA_X509_PATH="/etc/keys/x509_ima.der" +# CONFIG_IMA_APPRAISE_SIGNED_INIT is not set +CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS=y +CONFIG_IMA_QUEUE_EARLY_BOOT_KEYS=y +# CONFIG_IMA_SECURE_AND_OR_TRUSTED_BOOT is not set +# CONFIG_IMA_DISABLE_HTABLE is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_ADD_XATTRS is not set +CONFIG_EVM_LOAD_X509=y +CONFIG_EVM_X509_PATH="/etc/keys/x509_evm.der" +CONFIG_DEFAULT_SECURITY_SELINUX=y +# CONFIG_DEFAULT_SECURITY_DAC is not set +CONFIG_LSM="integrity,selinux,smack,tomoyo,apparmor" + +# +# Kernel hardening options +# + +# +# Memory initialization +# +CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y +CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y +CONFIG_INIT_STACK_NONE=y +# CONFIG_INIT_STACK_ALL_PATTERN is not set +# CONFIG_INIT_STACK_ALL_ZERO is not set +# CONFIG_GCC_PLUGIN_STACKLEAK is not set +# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set +# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set +CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y +# CONFIG_ZERO_CALL_USED_REGS is not set +# end of Memory initialization + +# +# Hardening of kernel data structures +# +CONFIG_LIST_HARDENED=y +CONFIG_BUG_ON_DATA_CORRUPTION=y +# end of Hardening of kernel data structures + +CONFIG_CC_HAS_RANDSTRUCT=y +CONFIG_RANDSTRUCT_NONE=y +# CONFIG_RANDSTRUCT_FULL is not set +# CONFIG_RANDSTRUCT_PERFORMANCE is not set +# end of Kernel hardening options +# end of Security options + +CONFIG_XOR_BLOCKS=m +CONFIG_ASYNC_CORE=m +CONFIG_ASYNC_MEMCPY=m +CONFIG_ASYNC_XOR=m +CONFIG_ASYNC_PQ=m +CONFIG_ASYNC_RAID6_RECOV=m +CONFIG_CRYPTO=y + +# +# Crypto core or helper +# +CONFIG_CRYPTO_FIPS=y +CONFIG_CRYPTO_FIPS_NAME="Linux Kernel Cryptographic API" +# CONFIG_CRYPTO_FIPS_CUSTOM_VERSION is not set +CONFIG_CRYPTO_ALGAPI=y +CONFIG_CRYPTO_ALGAPI2=y +CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD2=y +CONFIG_CRYPTO_SIG2=y +CONFIG_CRYPTO_SKCIPHER=y +CONFIG_CRYPTO_SKCIPHER2=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m +CONFIG_CRYPTO_AKCIPHER2=y +CONFIG_CRYPTO_AKCIPHER=y +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=m +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_MANAGER=y +CONFIG_CRYPTO_MANAGER2=y +CONFIG_CRYPTO_USER=m +# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set +# CONFIG_CRYPTO_MANAGER_EXTRA_TESTS is not set +CONFIG_CRYPTO_NULL=y +CONFIG_CRYPTO_NULL2=y +CONFIG_CRYPTO_PCRYPT=m +CONFIG_CRYPTO_CRYPTD=y +CONFIG_CRYPTO_AUTHENC=m +CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_SIMD=y +# end of Crypto core or helper + +# +# Public-key cryptography +# +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=m +# CONFIG_CRYPTO_DH_RFC7919_GROUPS is not set +CONFIG_CRYPTO_ECC=m +CONFIG_CRYPTO_ECDH=m +# CONFIG_CRYPTO_ECDSA is not set +# CONFIG_CRYPTO_ECRDSA is not set +CONFIG_CRYPTO_SM2=y +# CONFIG_CRYPTO_CURVE25519 is not set +# end of Public-key cryptography + +# +# Block ciphers +# +CONFIG_CRYPTO_AES=y +# CONFIG_CRYPTO_AES_TI is not set +CONFIG_CRYPTO_ANUBIS=m +# CONFIG_CRYPTO_ARIA is not set +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_BLOWFISH_COMMON=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST_COMMON=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_DES=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SEED=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_SM4=m +CONFIG_CRYPTO_SM4_GENERIC=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +CONFIG_CRYPTO_TWOFISH_COMMON=m +# end of Block ciphers + +# +# Length-preserving ciphers and modes +# +# CONFIG_CRYPTO_ADIANTUM is not set +CONFIG_CRYPTO_ARC4=m +CONFIG_CRYPTO_CHACHA20=m +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_CFB=y +CONFIG_CRYPTO_CTR=y +CONFIG_CRYPTO_CTS=y +CONFIG_CRYPTO_ECB=y +# CONFIG_CRYPTO_HCTR2 is not set +# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_OFB=y +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_XTS=y +# end of Length-preserving ciphers and modes + +# +# AEAD (authenticated encryption with associated data) ciphers +# +# CONFIG_CRYPTO_AEGIS128 is not set +CONFIG_CRYPTO_CHACHA20POLY1305=m +CONFIG_CRYPTO_CCM=m +CONFIG_CRYPTO_GCM=y +CONFIG_CRYPTO_GENIV=m +CONFIG_CRYPTO_SEQIV=m +CONFIG_CRYPTO_ECHAINIV=m +CONFIG_CRYPTO_ESSIV=m +# end of AEAD (authenticated encryption with associated data) ciphers + +# +# Hashes, digests, and MACs +# +CONFIG_CRYPTO_BLAKE2B=m +CONFIG_CRYPTO_CMAC=m +CONFIG_CRYPTO_GHASH=y +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_MD5=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_POLY1305=m +CONFIG_CRYPTO_RMD160=m +CONFIG_CRYPTO_SHA1=y +CONFIG_CRYPTO_SHA256=y +CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y +CONFIG_CRYPTO_SM3=y +CONFIG_CRYPTO_SM3_GENERIC=y +# CONFIG_CRYPTO_STREEBOG is not set +CONFIG_CRYPTO_VMAC=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_XXHASH=m +# end of Hashes, digests, and MACs + +# +# CRCs (cyclic redundancy checks) +# +CONFIG_CRYPTO_CRC32C=y +CONFIG_CRYPTO_CRC32=m +CONFIG_CRYPTO_CRCT10DIF=y +CONFIG_CRYPTO_CRC64_ROCKSOFT=m +# end of CRCs (cyclic redundancy checks) + +# +# Compression +# +CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set +CONFIG_CRYPTO_LZ4=m +CONFIG_CRYPTO_LZ4HC=m +CONFIG_CRYPTO_ZSTD=m +# end of Compression + +# +# Random number generation +# +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=y +CONFIG_CRYPTO_DRBG_HMAC=y +CONFIG_CRYPTO_DRBG_HASH=y +CONFIG_CRYPTO_DRBG_CTR=y +CONFIG_CRYPTO_DRBG=y +CONFIG_CRYPTO_JITTERENTROPY=y +# CONFIG_CRYPTO_JITTERENTROPY_TESTINTERFACE is not set +# end of Random number generation + +# +# Userspace interface +# +CONFIG_CRYPTO_USER_API=y +CONFIG_CRYPTO_USER_API_HASH=y +CONFIG_CRYPTO_USER_API_SKCIPHER=y +CONFIG_CRYPTO_USER_API_RNG=y +# CONFIG_CRYPTO_USER_API_RNG_CAVP is not set +CONFIG_CRYPTO_USER_API_AEAD=y +CONFIG_CRYPTO_USER_API_ENABLE_OBSOLETE=y +# CONFIG_CRYPTO_STATS is not set +# end of Userspace interface + +CONFIG_CRYPTO_HASH_INFO=y + +# +# Accelerated Cryptographic Algorithms for CPU (x86) +# +CONFIG_CRYPTO_CURVE25519_X86=m +CONFIG_CRYPTO_AES_NI_INTEL=y +CONFIG_CRYPTO_BLOWFISH_X86_64=m +CONFIG_CRYPTO_CAMELLIA_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_CAST5_AVX_X86_64=m +CONFIG_CRYPTO_CAST6_AVX_X86_64=m +CONFIG_CRYPTO_DES3_EDE_X86_64=m +CONFIG_CRYPTO_SERPENT_SSE2_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX_X86_64=m +CONFIG_CRYPTO_SERPENT_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX_X86_64=m +CONFIG_CRYPTO_SM4_AESNI_AVX2_X86_64=m +CONFIG_CRYPTO_SM4_ZHAOXIN_GMI=m +CONFIG_CRYPTO_TWOFISH_X86_64=m +CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=m +CONFIG_CRYPTO_TWOFISH_AVX_X86_64=m +# CONFIG_CRYPTO_ARIA_AESNI_AVX_X86_64 is not set +# CONFIG_CRYPTO_ARIA_AESNI_AVX2_X86_64 is not set +# CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=m +# CONFIG_CRYPTO_AEGIS128_AESNI_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_SSE2 is not set +# CONFIG_CRYPTO_NHPOLY1305_AVX2 is not set +CONFIG_CRYPTO_BLAKE2S_X86=y +# CONFIG_CRYPTO_POLYVAL_CLMUL_NI is not set +CONFIG_CRYPTO_POLY1305_X86_64=m +CONFIG_CRYPTO_SHA1_SSSE3=y +CONFIG_CRYPTO_SHA256_SSSE3=y +CONFIG_CRYPTO_SHA512_SSSE3=y +CONFIG_CRYPTO_SM3_AVX_X86_64=m +CONFIG_CRYPTO_SM3_ZHAOXIN_GMI=m +CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=m +CONFIG_CRYPTO_CRC32C_INTEL=m +CONFIG_CRYPTO_CRC32_PCLMUL=m +CONFIG_CRYPTO_CRCT10DIF_PCLMUL=m +CONFIG_CRYPTO_SM2_ZHAOXIN_GMI=m +# end of Accelerated Cryptographic Algorithms for CPU (x86) + +CONFIG_CRYPTO_HW=y +CONFIG_CRYPTO_DEV_PADLOCK=m +CONFIG_CRYPTO_DEV_PADLOCK_AES=m +CONFIG_CRYPTO_DEV_PADLOCK_SHA=m +CONFIG_CRYPTO_DEV_ZHAOXIN=m +CONFIG_CRYPTO_DEV_ZHAOXIN_AES=m +CONFIG_CRYPTO_DEV_ZHAOXIN_SHA=m +# CONFIG_CRYPTO_DEV_ATMEL_ECC is not set +# CONFIG_CRYPTO_DEV_ATMEL_SHA204A is not set +CONFIG_CRYPTO_DEV_CCP=y +CONFIG_CRYPTO_DEV_CCP_DD=m +CONFIG_CRYPTO_DEV_SP_CCP=y +CONFIG_CRYPTO_DEV_CCP_CRYPTO=m +CONFIG_CRYPTO_DEV_SP_PSP=y +CONFIG_HYGON_PSP2CPU_CMD=y +CONFIG_TCG_HYGON=m +CONFIG_TCM_HYGON=m +CONFIG_TDM_DEV_HYGON=y +CONFIG_TDM_KERNEL_GUARD=m +CONFIG_HYGON_GM=y +# CONFIG_CRYPTO_DEV_CCP_DEBUGFS is not set +CONFIG_CRYPTO_DEV_NITROX=m +CONFIG_CRYPTO_DEV_NITROX_CNN55XX=m +CONFIG_CRYPTO_DEV_QAT=m +CONFIG_CRYPTO_DEV_QAT_DH895xCC=m +CONFIG_CRYPTO_DEV_QAT_C3XXX=m +CONFIG_CRYPTO_DEV_QAT_C62X=m +CONFIG_CRYPTO_DEV_QAT_4XXX=m +# CONFIG_CRYPTO_DEV_QAT_420XX is not set +CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=m +CONFIG_CRYPTO_DEV_QAT_C3XXXVF=m +CONFIG_CRYPTO_DEV_QAT_C62XVF=m +# CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION is not set +CONFIG_CRYPTO_DEV_CHELSIO=m +# CONFIG_CRYPTO_DEV_VIRTIO is not set +# CONFIG_CRYPTO_DEV_SAFEXCEL is not set +# CONFIG_CRYPTO_DEV_AMLOGIC_GXL is not set +CONFIG_CRYPTO_DEV_TSSE=m +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set +CONFIG_PKCS7_MESSAGE_PARSER=y +# CONFIG_PKCS7_TEST_KEY is not set +CONFIG_SIGNED_PE_FILE_VERIFICATION=y +# CONFIG_FIPS_SIGNATURE_SELFTEST is not set + +# +# Certificates for signature checking +# +CONFIG_MODULE_SIG_KEY="certs/signing_key.pem" +CONFIG_MODULE_SIG_KEY_TYPE_RSA=y +# CONFIG_MODULE_SIG_KEY_TYPE_ECDSA is not set +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +CONFIG_SYSTEM_EXTRA_CERTIFICATE=y +CONFIG_SYSTEM_EXTRA_CERTIFICATE_SIZE=8192 +CONFIG_SECONDARY_TRUSTED_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_KEYRING=y +CONFIG_SYSTEM_BLACKLIST_HASH_LIST="" +# CONFIG_SYSTEM_REVOCATION_LIST is not set +# CONFIG_SYSTEM_BLACKLIST_AUTH_UPDATE is not set +# end of Certificates for signature checking + +CONFIG_BINARY_PRINTF=y + +# +# Library routines +# +CONFIG_RAID6_PQ=m +CONFIG_RAID6_PQ_BENCHMARK=y +# CONFIG_PACKING is not set +CONFIG_BITREVERSE=y +CONFIG_GENERIC_STRNCPY_FROM_USER=y +CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GENERIC_NET_UTILS=y +CONFIG_CORDIC=m +# CONFIG_PRIME_NUMBERS is not set +CONFIG_RATIONAL=y +CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_IOMAP=y +CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y +CONFIG_ARCH_HAS_FAST_MULTIPLIER=y +CONFIG_ARCH_USE_SYM_ANNOTATIONS=y + +# +# Crypto library routines +# +CONFIG_CRYPTO_LIB_UTILS=y +CONFIG_CRYPTO_LIB_AES=y +CONFIG_CRYPTO_LIB_ARC4=m +CONFIG_CRYPTO_LIB_GF128MUL=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S=y +CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y +CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=m +CONFIG_CRYPTO_LIB_CHACHA_GENERIC=m +CONFIG_CRYPTO_LIB_CHACHA=m +CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=m +CONFIG_CRYPTO_LIB_CURVE25519=m +CONFIG_CRYPTO_LIB_DES=m +CONFIG_CRYPTO_LIB_POLY1305_RSIZE=11 +CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_POLY1305_GENERIC=m +CONFIG_CRYPTO_LIB_POLY1305=m +CONFIG_CRYPTO_LIB_CHACHA20POLY1305=m +CONFIG_CRYPTO_LIB_SHA1=y +CONFIG_CRYPTO_LIB_SHA256=y +# end of Crypto library routines + +CONFIG_CRC_CCITT=y +CONFIG_CRC16=y +CONFIG_CRC_T10DIF=y +CONFIG_CRC64_ROCKSOFT=m +CONFIG_CRC_ITU_T=m +CONFIG_CRC32=y +# CONFIG_CRC32_SELFTEST is not set +CONFIG_CRC32_SLICEBY8=y +# CONFIG_CRC32_SLICEBY4 is not set +# CONFIG_CRC32_SARWATE is not set +# CONFIG_CRC32_BIT is not set +CONFIG_CRC64=m +# CONFIG_CRC4 is not set +CONFIG_CRC7=m +CONFIG_LIBCRC32C=m +CONFIG_CRC8=m +CONFIG_XXHASH=y +# CONFIG_RANDOM32_SELFTEST is not set +CONFIG_ZLIB_INFLATE=y +CONFIG_ZLIB_DEFLATE=y +CONFIG_LZO_COMPRESS=y +CONFIG_LZO_DECOMPRESS=y +CONFIG_LZ4_COMPRESS=m +CONFIG_LZ4HC_COMPRESS=m +CONFIG_LZ4_DECOMPRESS=y +CONFIG_ZSTD_COMMON=y +CONFIG_ZSTD_COMPRESS=m +CONFIG_ZSTD_DECOMPRESS=y +CONFIG_XZ_DEC=y +CONFIG_XZ_DEC_X86=y +CONFIG_XZ_DEC_POWERPC=y +CONFIG_XZ_DEC_IA64=y +CONFIG_XZ_DEC_ARM=y +CONFIG_XZ_DEC_ARMTHUMB=y +CONFIG_XZ_DEC_SPARC=y +CONFIG_XZ_DEC_MICROLZMA=y +CONFIG_XZ_DEC_BCJ=y +# CONFIG_XZ_DEC_TEST is not set +CONFIG_DECOMPRESS_GZIP=y +CONFIG_DECOMPRESS_BZIP2=y +CONFIG_DECOMPRESS_LZMA=y +CONFIG_DECOMPRESS_XZ=y +CONFIG_DECOMPRESS_LZO=y +CONFIG_DECOMPRESS_LZ4=y +CONFIG_DECOMPRESS_ZSTD=y +CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=m +CONFIG_TEXTSEARCH_BM=m +CONFIG_TEXTSEARCH_FSM=m +CONFIG_BTREE=y +CONFIG_INTERVAL_TREE=y +CONFIG_XARRAY_MULTI=y +CONFIG_ASSOCIATIVE_ARRAY=y +CONFIG_HAS_IOMEM=y +CONFIG_HAS_IOPORT=y +CONFIG_HAS_IOPORT_MAP=y +CONFIG_HAS_DMA=y +CONFIG_DMA_OPS=y +CONFIG_NEED_SG_DMA_FLAGS=y +CONFIG_NEED_SG_DMA_LENGTH=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_DMA_ADDR_T_64BIT=y +CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y +CONFIG_SWIOTLB=y +# CONFIG_SWIOTLB_DYNAMIC is not set +CONFIG_DMA_COHERENT_POOL=y +CONFIG_DMA_CMA=y +# CONFIG_DMA_NUMA_CMA is not set + +# +# Default contiguous memory area size: +# +CONFIG_CMA_SIZE_MBYTES=0 +CONFIG_CMA_SIZE_SEL_MBYTES=y +# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set +# CONFIG_CMA_SIZE_SEL_MIN is not set +# CONFIG_CMA_SIZE_SEL_MAX is not set +CONFIG_CMA_ALIGNMENT=8 +# CONFIG_DMA_API_DEBUG is not set +# CONFIG_DMA_MAP_BENCHMARK is not set +CONFIG_SGL_ALLOC=y +CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y +CONFIG_CPU_RMAP=y +CONFIG_DQL=y +CONFIG_GLOB=y +# CONFIG_GLOB_SELFTEST is not set +CONFIG_NLATTR=y +CONFIG_CLZ_TAB=y +CONFIG_IRQ_POLL=y +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_DIMLIB=y +CONFIG_OID_REGISTRY=y +CONFIG_UCS2_STRING=y +CONFIG_HAVE_GENERIC_VDSO=y +CONFIG_GENERIC_GETTIMEOFDAY=y +CONFIG_GENERIC_VDSO_TIME_NS=y +CONFIG_FONT_SUPPORT=y +# CONFIG_FONTS is not set +CONFIG_FONT_8x8=y +CONFIG_FONT_8x16=y +CONFIG_SG_POOL=y +CONFIG_ARCH_HAS_PMEM_API=y +CONFIG_MEMREGION=y +CONFIG_ARCH_HAS_CPU_CACHE_INVALIDATE_MEMREGION=y +CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE=y +CONFIG_ARCH_HAS_COPY_MC=y +CONFIG_ARCH_STACKWALK=y +CONFIG_STACKDEPOT=y +CONFIG_SBITMAP=y +CONFIG_PARMAN=m +CONFIG_OBJAGG=m +# end of Library routines + +CONFIG_PLDMFW=y +CONFIG_ASN1_ENCODER=y + +# +# Kernel hacking +# + +# +# printk and dmesg options +# +CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_CALLER is not set +# CONFIG_STACKTRACE_BUILD_ID is not set +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 +CONFIG_CONSOLE_LOGLEVEL_QUIET=4 +CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 +CONFIG_BOOT_PRINTK_DELAY=y +CONFIG_DYNAMIC_DEBUG=y +CONFIG_DYNAMIC_DEBUG_CORE=y +CONFIG_SYMBOLIC_ERRNAME=y +CONFIG_DEBUG_BUGVERBOSE=y +# end of printk and dmesg options + +CONFIG_DEBUG_KERNEL=y +CONFIG_DEBUG_MISC=y + +# +# Compile-time checks and compiler options +# +CONFIG_DEBUG_INFO=y +CONFIG_AS_HAS_NON_CONST_LEB128=y +# CONFIG_DEBUG_INFO_NONE is not set +CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y +# CONFIG_DEBUG_INFO_DWARF4 is not set +# CONFIG_DEBUG_INFO_DWARF5 is not set +# CONFIG_DEBUG_INFO_REDUCED is not set +CONFIG_DEBUG_INFO_COMPRESSED_NONE=y +# CONFIG_DEBUG_INFO_COMPRESSED_ZLIB is not set +# CONFIG_DEBUG_INFO_COMPRESSED_ZSTD is not set +# CONFIG_DEBUG_INFO_SPLIT is not set +CONFIG_DEBUG_INFO_BTF=y +# CONFIG_GDB_SCRIPTS is not set +CONFIG_FRAME_WARN=2048 +CONFIG_STRIP_ASM_SYMS=y +# CONFIG_READABLE_ASM is not set +# CONFIG_HEADERS_INSTALL is not set +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y +CONFIG_OBJTOOL=y +# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set +# end of Compile-time checks and compiler options + +# +# Generic Kernel Debugging Instruments +# +CONFIG_MAGIC_SYSRQ=y +CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 +CONFIG_MAGIC_SYSRQ_SERIAL=y +CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE="" +CONFIG_DEBUG_FS=y +CONFIG_DEBUG_FS_ALLOW_ALL=y +# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set +# CONFIG_DEBUG_FS_ALLOW_NONE is not set +CONFIG_HAVE_ARCH_KGDB=y +CONFIG_KGDB=y +CONFIG_KGDB_HONOUR_BLOCKLIST=y +CONFIG_KGDB_SERIAL_CONSOLE=y +CONFIG_KGDB_TESTS=y +# CONFIG_KGDB_TESTS_ON_BOOT is not set +CONFIG_KGDB_LOW_LEVEL_TRAP=y +CONFIG_KGDB_KDB=y +CONFIG_KDB_DEFAULT_ENABLE=0x1 +CONFIG_KDB_KEYBOARD=y +CONFIG_KDB_CONTINUE_CATASTROPHIC=0 +CONFIG_ARCH_HAS_EARLY_DEBUG=y +CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_UBSAN is not set +CONFIG_HAVE_ARCH_KCSAN=y +CONFIG_HAVE_KCSAN_COMPILER=y +# CONFIG_KCSAN is not set +# end of Generic Kernel Debugging Instruments + +# +# Networking Debugging +# +# CONFIG_NET_DEV_REFCNT_TRACKER is not set +# CONFIG_NET_NS_REFCNT_TRACKER is not set +# CONFIG_DEBUG_NET is not set +# end of Networking Debugging + +# +# Memory Debugging +# +# CONFIG_PAGE_EXTENSION is not set +# CONFIG_DEBUG_PAGEALLOC is not set +CONFIG_SLUB_DEBUG=y +# CONFIG_SLUB_DEBUG_ON is not set +# CONFIG_PAGE_OWNER is not set +# CONFIG_PAGE_TABLE_CHECK is not set +# CONFIG_PAGE_POISONING is not set +# CONFIG_DEBUG_PAGE_REF is not set +# CONFIG_DEBUG_RODATA_TEST is not set +CONFIG_ARCH_HAS_DEBUG_WX=y +# CONFIG_DEBUG_WX is not set +CONFIG_GENERIC_PTDUMP=y +# CONFIG_PTDUMP_DEBUGFS is not set +CONFIG_HAVE_DEBUG_KMEMLEAK=y +# CONFIG_DEBUG_KMEMLEAK is not set +# CONFIG_PER_VMA_LOCK_STATS is not set +# CONFIG_DEBUG_OBJECTS is not set +# CONFIG_SHRINKER_DEBUG is not set +# CONFIG_DEBUG_STACK_USAGE is not set +# CONFIG_SCHED_STACK_END_CHECK is not set +CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y +# CONFIG_DEBUG_VM is not set +# CONFIG_DEBUG_VM_PGTABLE is not set +CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y +# CONFIG_DEBUG_VIRTUAL is not set +CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_PER_CPU_MAPS is not set +CONFIG_ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP=y +# CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP is not set +CONFIG_HAVE_ARCH_KASAN=y +CONFIG_HAVE_ARCH_KASAN_VMALLOC=y +CONFIG_CC_HAS_KASAN_GENERIC=y +CONFIG_CC_HAS_KASAN_SW_TAGS=y +CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y +# CONFIG_KASAN is not set +CONFIG_HAVE_ARCH_KFENCE=y +CONFIG_KFENCE=y +CONFIG_KFENCE_SAMPLE_INTERVAL=0 +CONFIG_KFENCE_NUM_OBJECTS=255 +CONFIG_KFENCE_DEFERRABLE=y +CONFIG_KFENCE_STRESS_TEST_FAULTS=0 +CONFIG_HAVE_ARCH_KMSAN=y +# end of Memory Debugging + +CONFIG_DEBUG_SHIRQ=y + +# +# Debug Oops, Lockups and Hangs +# +CONFIG_PANIC_ON_OOPS=y +CONFIG_PANIC_ON_OOPS_VALUE=1 +CONFIG_PANIC_TIMEOUT=1 +CONFIG_LOCKUP_DETECTOR=y +CONFIG_SOFTLOCKUP_DETECTOR=y +# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set +CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y +# CONFIG_SDEI_WATCHDOG is not set +CONFIG_HARDLOCKUP_DETECTOR=y +# CONFIG_HARDLOCKUP_DETECTOR_PREFER_BUDDY is not set +CONFIG_HARDLOCKUP_DETECTOR_PERF=y +# CONFIG_HARDLOCKUP_DETECTOR_BUDDY is not set +# CONFIG_HARDLOCKUP_DETECTOR_ARCH is not set +CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER=y +CONFIG_HARDLOCKUP_CHECK_TIMESTAMP=y +CONFIG_BOOTPARAM_HARDLOCKUP_PANIC=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 +# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set +# CONFIG_WQ_WATCHDOG is not set +# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set +# CONFIG_TEST_LOCKUP is not set +# end of Debug Oops, Lockups and Hangs + +# +# Scheduler Debugging +# +CONFIG_SCHED_DEBUG=y +CONFIG_SCHED_INFO=y +CONFIG_SCHEDSTATS=y +CONFIG_SCHED_ACPU=y +# end of Scheduler Debugging + +# CONFIG_DEBUG_TIMEKEEPING is not set +# CONFIG_DEBUG_PREEMPT is not set + +# +# Lock Debugging (spinlocks, mutexes, etc...) +# +CONFIG_LOCK_DEBUGGING_SUPPORT=y +# CONFIG_PROVE_LOCKING is not set +# CONFIG_LOCK_STAT is not set +# CONFIG_DEBUG_RT_MUTEXES is not set +# CONFIG_DEBUG_SPINLOCK is not set +# CONFIG_DEBUG_MUTEXES is not set +# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set +# CONFIG_DEBUG_RWSEMS is not set +# CONFIG_DEBUG_LOCK_ALLOC is not set +# CONFIG_DEBUG_ATOMIC_SLEEP is not set +# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set +# CONFIG_LOCK_TORTURE_TEST is not set +# CONFIG_WW_MUTEX_SELFTEST is not set +# CONFIG_SCF_TORTURE_TEST is not set +# CONFIG_CSD_LOCK_WAIT_DEBUG is not set +# end of Lock Debugging (spinlocks, mutexes, etc...) + +# CONFIG_NMI_CHECK_CPU is not set +# CONFIG_DEBUG_IRQFLAGS is not set +CONFIG_STACKTRACE=y +# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set +# CONFIG_DEBUG_KOBJECT is not set + +# +# Debug kernel data structures +# +CONFIG_DEBUG_LIST=y +# CONFIG_DEBUG_PLIST is not set +# CONFIG_DEBUG_SG is not set +# CONFIG_DEBUG_NOTIFIERS is not set +# CONFIG_DEBUG_MAPLE_TREE is not set +# end of Debug kernel data structures + +# CONFIG_DEBUG_CREDENTIALS is not set + +# +# RCU Debugging +# +# CONFIG_RCU_SCALE_TEST is not set +# CONFIG_RCU_TORTURE_TEST is not set +# CONFIG_RCU_REF_SCALE_TEST is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0 +# CONFIG_RCU_CPU_STALL_CPUTIME is not set +# CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set +# end of RCU Debugging + +# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set +# CONFIG_CPU_HOTPLUG_STATE_CONTROL is not set +# CONFIG_LATENCYTOP is not set +# CONFIG_DEBUG_CGROUP_REF is not set +CONFIG_USER_STACKTRACE_SUPPORT=y +CONFIG_NOP_TRACER=y +CONFIG_HAVE_RETHOOK=y +CONFIG_RETHOOK=y +CONFIG_HAVE_FUNCTION_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y +CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y +CONFIG_HAVE_DYNAMIC_FTRACE=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y +CONFIG_HAVE_DYNAMIC_FTRACE_NO_PATCHABLE=y +CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y +CONFIG_HAVE_SYSCALL_TRACEPOINTS=y +CONFIG_HAVE_FENTRY=y +CONFIG_HAVE_OBJTOOL_MCOUNT=y +CONFIG_HAVE_OBJTOOL_NOP_MCOUNT=y +CONFIG_HAVE_C_RECORDMCOUNT=y +CONFIG_HAVE_BUILDTIME_MCOUNT_SORT=y +CONFIG_BUILDTIME_MCOUNT_SORT=y +CONFIG_TRACER_MAX_TRACE=y +CONFIG_TRACE_CLOCK=y +CONFIG_RING_BUFFER=y +CONFIG_EVENT_TRACING=y +CONFIG_CONTEXT_SWITCH_TRACER=y +CONFIG_TRACING=y +CONFIG_GENERIC_TRACER=y +CONFIG_TRACING_SUPPORT=y +CONFIG_FTRACE=y +# CONFIG_BOOTTIME_TRACING is not set +CONFIG_FUNCTION_TRACER=y +CONFIG_FUNCTION_GRAPH_TRACER=y +# CONFIG_FUNCTION_GRAPH_RETVAL is not set +CONFIG_DYNAMIC_FTRACE=y +CONFIG_DYNAMIC_FTRACE_WITH_REGS=y +CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS=y +CONFIG_DYNAMIC_FTRACE_WITH_ARGS=y +# CONFIG_FPROBE is not set +CONFIG_FUNCTION_PROFILER=y +CONFIG_STACK_TRACER=y +# CONFIG_IRQSOFF_TRACER is not set +# CONFIG_PREEMPT_TRACER is not set +CONFIG_SCHED_TRACER=y +CONFIG_HWLAT_TRACER=y +CONFIG_OSNOISE_TRACER=y +CONFIG_TIMERLAT_TRACER=y +# CONFIG_MMIOTRACE is not set +CONFIG_FTRACE_SYSCALLS=y +CONFIG_TRACER_SNAPSHOT=y +# CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP is not set +CONFIG_BRANCH_PROFILE_NONE=y +# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set +CONFIG_BLK_DEV_IO_TRACE=y +CONFIG_PROBE_EVENTS_BTF_ARGS=y +CONFIG_KPROBE_EVENTS=y +# CONFIG_KPROBE_EVENTS_ON_NOTRACE is not set +CONFIG_UPROBE_EVENTS=y +CONFIG_BPF_EVENTS=y +CONFIG_DYNAMIC_EVENTS=y +CONFIG_PROBE_EVENTS=y +# CONFIG_BPF_KPROBE_OVERRIDE is not set +CONFIG_FTRACE_MCOUNT_RECORD=y +CONFIG_FTRACE_MCOUNT_USE_CC=y +CONFIG_TRACING_MAP=y +CONFIG_SYNTH_EVENTS=y +# CONFIG_USER_EVENTS is not set +CONFIG_HIST_TRIGGERS=y +# CONFIG_TRACE_EVENT_INJECT is not set +# CONFIG_TRACEPOINT_BENCHMARK is not set +CONFIG_RING_BUFFER_BENCHMARK=m +# CONFIG_TRACE_EVAL_MAP_FILE is not set +# CONFIG_FTRACE_RECORD_RECURSION is not set +# CONFIG_FTRACE_STARTUP_TEST is not set +# CONFIG_FTRACE_SORT_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_STARTUP_TEST is not set +# CONFIG_RING_BUFFER_VALIDATE_TIME_DELTAS is not set +# CONFIG_PREEMPTIRQ_DELAY_TEST is not set +# CONFIG_SYNTH_EVENT_GEN_TEST is not set +# CONFIG_KPROBE_EVENT_GEN_TEST is not set +# CONFIG_HIST_TRIGGERS_DEBUG is not set +# CONFIG_RV is not set +CONFIG_PROVIDE_OHCI1394_DMA_INIT=y +# CONFIG_SAMPLES is not set +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y +CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y +CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y +CONFIG_STRICT_DEVMEM=y +# CONFIG_IO_STRICT_DEVMEM is not set + +# +# x86 Debugging +# +CONFIG_EARLY_PRINTK_USB=y +# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_EARLY_PRINTK=y +CONFIG_EARLY_PRINTK_DBGP=y +CONFIG_EARLY_PRINTK_USB_XDBC=y +# CONFIG_EFI_PGT_DUMP is not set +# CONFIG_DEBUG_TLBFLUSH is not set +CONFIG_HAVE_MMIOTRACE_SUPPORT=y +CONFIG_X86_DECODER_SELFTEST=y +CONFIG_IO_DELAY_0X80=y +# CONFIG_IO_DELAY_0XED is not set +# CONFIG_IO_DELAY_UDELAY is not set +# CONFIG_IO_DELAY_NONE is not set +CONFIG_DEBUG_BOOT_PARAMS=y +# CONFIG_CPA_DEBUG is not set +# CONFIG_DEBUG_ENTRY is not set +# CONFIG_DEBUG_NMI_SELFTEST is not set +# CONFIG_X86_DEBUG_FPU is not set +# CONFIG_PUNIT_ATOM_DEBUG is not set +CONFIG_UNWINDER_ORC=y +# CONFIG_UNWINDER_FRAME_POINTER is not set +# end of x86 Debugging + +# +# Kernel Testing and Coverage +# +# CONFIG_KUNIT is not set +# CONFIG_NOTIFIER_ERROR_INJECTION is not set +CONFIG_FUNCTION_ERROR_INJECTION=y +# CONFIG_FAULT_INJECTION is not set +CONFIG_ARCH_HAS_KCOV=y +CONFIG_CC_HAS_SANCOV_TRACE_PC=y +# CONFIG_KCOV is not set +CONFIG_RUNTIME_TESTING_MENU=y +# CONFIG_TEST_DHRY is not set +# CONFIG_LKDTM is not set +# CONFIG_TEST_MIN_HEAP is not set +# CONFIG_TEST_DIV64 is not set +# CONFIG_BACKTRACE_SELF_TEST is not set +# CONFIG_TEST_REF_TRACKER is not set +# CONFIG_RBTREE_TEST is not set +# CONFIG_REED_SOLOMON_TEST is not set +# CONFIG_INTERVAL_TREE_TEST is not set +# CONFIG_PERCPU_TEST is not set +CONFIG_ATOMIC64_SELFTEST=y +CONFIG_ASYNC_RAID6_TEST=m +# CONFIG_TEST_HEXDUMP is not set +# CONFIG_STRING_SELFTEST is not set +# CONFIG_TEST_STRING_HELPERS is not set +CONFIG_TEST_KSTRTOX=y +# CONFIG_TEST_PRINTF is not set +# CONFIG_TEST_SCANF is not set +# CONFIG_TEST_BITMAP is not set +# CONFIG_TEST_UUID is not set +# CONFIG_TEST_XARRAY is not set +# CONFIG_TEST_MAPLE_TREE is not set +# CONFIG_TEST_RHASHTABLE is not set +# CONFIG_TEST_IDA is not set +# CONFIG_TEST_PARMAN is not set +# CONFIG_TEST_LKM is not set +# CONFIG_TEST_BITOPS is not set +# CONFIG_TEST_VMALLOC is not set +# CONFIG_TEST_USER_COPY is not set +CONFIG_TEST_BPF=m +# CONFIG_TEST_BLACKHOLE_DEV is not set +# CONFIG_FIND_BIT_BENCHMARK is not set +# CONFIG_TEST_FIRMWARE is not set +# CONFIG_TEST_SYSCTL is not set +# CONFIG_TEST_UDELAY is not set +# CONFIG_TEST_STATIC_KEYS is not set +# CONFIG_TEST_DYNAMIC_DEBUG is not set +# CONFIG_TEST_KMOD is not set +# CONFIG_TEST_MEMCAT_P is not set +CONFIG_TEST_LIVEPATCH=m +# CONFIG_TEST_OBJAGG is not set +# CONFIG_TEST_MEMINIT is not set +# CONFIG_TEST_HMM is not set +# CONFIG_TEST_FREE_PAGES is not set +# CONFIG_TEST_FPU is not set +# CONFIG_TEST_CLOCKSOURCE_WATCHDOG is not set +CONFIG_ARCH_USE_MEMTEST=y +# CONFIG_MEMTEST is not set +# CONFIG_HYPERV_TESTING is not set +# end of Kernel Testing and Coverage + +# +# Rust hacking +# +# end of Rust hacking +# end of Kernel hacking diff --git a/arch/x86/crypto/Kconfig b/arch/x86/crypto/Kconfig index 9bbfd01cfa2f13fde9124ca521fa76c33f7661de..2d2d807e3b00c162b57ab1a830ad962f824901ad 100644 --- a/arch/x86/crypto/Kconfig +++ b/arch/x86/crypto/Kconfig @@ -231,6 +231,26 @@ config CRYPTO_SM4_AESNI_AVX2_X86_64 If unsure, say N. +config CRYPTO_SM4_ZHAOXIN_GMI + tristate "Ciphers: SM4 with modes: ECB, CBC, CTR, CFB, OFB (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_SKCIPHER + select CRYPTO_SIMD + select CRYPTO_ALGAPI + select CRYPTO_SM4 + help + SM4 cipher algorithms (Zhaoxin GMI Instruction). + + SM4 (GBT.32907-2016) is a cryptographic standard issued by the + Organization of State Commercial Administration of China (OSCCA) + as an authorized cryptographic algorithms for the use within China. + + This is SM4 optimized implementation using Zhaoxin GMI + instruction set for block cipher. + + If unsure, say N. + config CRYPTO_TWOFISH_586 tristate "Ciphers: Twofish (32-bit)" depends on (X86 || UML_X86) && !64BIT @@ -477,6 +497,20 @@ config CRYPTO_SM3_AVX_X86_64 If unsure, say N. +config CRYPTO_SM3_ZHAOXIN_GMI + tristate "Hash functions: SM3 (Zhaoxin GMI)" + depends on X86 && CRYPTO + default m + select CRYPTO_HASH + select CRYPTO_SM3 + help + SM3 cipher algorithms (Zhaoxin GMI Instruction). + + SM3 secure hash function as defined by OSCCA GM/T 0004-2012 SM3). + It is part of the Chinese Commercial Cryptography suite. + + If unsure, say N. + config CRYPTO_GHASH_CLMUL_NI_INTEL tristate "Hash functions: GHASH (CLMUL-NI)" depends on X86 && 64BIT @@ -519,4 +553,15 @@ config CRYPTO_CRCT10DIF_PCLMUL Architecture: x86_64 using: - PCLMULQDQ (carry-less multiplication) +config CRYPTO_SM2_ZHAOXIN_GMI + tristate "SM2 Cipher algorithm (Zhaoxin GMI Instruction)" + depends on X86 && (CPU_SUP_CENTAUR || CPU_SUP_ZHAOXIN) + select CRYPTO_AKCIPHER + select CRYPTO_MANAGER + help + SM2 (ShangMi 2) public key algorithm by Zhaoxin GMI Instruction + + Published by State Encryption Management Bureau, China, + as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012. + endmenu diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 9aa46093c91b619da0d1641cefa73afa7d97d2e6..e5480c50a8d9bf7b5f79943d3f93f8fc321cfc8e 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -109,6 +109,10 @@ aria-aesni-avx2-x86_64-y := aria-aesni-avx2-asm_64.o aria_aesni_avx2_glue.o obj-$(CONFIG_CRYPTO_ARIA_GFNI_AVX512_X86_64) += aria-gfni-avx512-x86_64.o aria-gfni-avx512-x86_64-y := aria-gfni-avx512-asm_64.o aria_gfni_avx512_glue.o +obj-$(CONFIG_CRYPTO_SM2_ZHAOXIN_GMI) += sm2-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM3_ZHAOXIN_GMI) += sm3-zhaoxin-gmi.o +obj-$(CONFIG_CRYPTO_SM4_ZHAOXIN_GMI) += sm4-zhaoxin-gmi.o + quiet_cmd_perlasm = PERLASM $@ cmd_perlasm = $(PERL) $< > $@ $(obj)/%.S: $(src)/%.pl FORCE diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c index feccb5254c7e5ec59179d21072f15cfacf6a1fb1..91d318b08fb70b14abc6c77026fc527ff5bd5f8d 100644 --- a/arch/x86/crypto/crc32c-intel_glue.c +++ b/arch/x86/crypto/crc32c-intel_glue.c @@ -224,6 +224,11 @@ static int __init crc32c_intel_mod_init(void) { if (!x86_match_cpu(crc32c_cpu_id)) return -ENODEV; + + /* Don't merit use low performance CRC32C instruction */ + if (boot_cpu_has(X86_FEATURE_CRC32C_LOW_PERF)) + return -ENODEV; + #ifdef CONFIG_X86_64 if (boot_cpu_has(X86_FEATURE_PCLMULQDQ)) { alg.update = crc32c_pcl_intel_update; diff --git a/arch/x86/crypto/sm2-zhaoxin-gmi.c b/arch/x86/crypto/sm2-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..a0430c6611fcfac4b921bda40af3d24afc05724c --- /dev/null +++ b/arch/x86/crypto/sm2-zhaoxin-gmi.c @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SM2 asymmetric public-key algorithm + * as specified by OSCCA GM/T 0003.1-2012 -- 0003.5-2012 SM2 and + * described at https://tools.ietf.org/html/draft-shen-sm2-ecdsa-02 + * + * Copyright (c) 2023 Shanghai Zhaoxin Semiconductor LTD. + * Authors: YunShen + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define SCRATCH_SIZE (4 * 2048) + +#define SM2_CWORD_VERIFY 0x8 +#define SM2_VERIFY_PASS 1 + +struct sm2_cipher_data { + u8 pub_key[65]; /* public key */ +}; + +/* Load supported features of the CPU to see if the SM2 is available. */ +static int zhaoxin_gmi_available(void) +{ + if (!boot_cpu_has(X86_FEATURE_SM2_EN)) { + pr_err("can't enable hardware SM2 if Zhaoxin GMI SM2 is not enabled\n"); + return -ENODEV; + } + return 0; +} + +/* Zhaoxin sm2 verify function */ +static inline size_t zhaoxin_gmi_sm2_verify(unsigned char *key, unsigned char *hash, + unsigned char *sig, unsigned char *scratch) +{ + size_t result; + + asm volatile( + ".byte 0xf2, 0x0f, 0xa6, 0xc0" + : "=c"(result) + : "a"(hash), "b"(key), "d"(SM2_CWORD_VERIFY), "S"(scratch), "D"(sig) + : "memory"); + + return result; +} + +/* Zhaoxin sm2 verify function */ +static int _zhaoxin_sm2_verify(struct sm2_cipher_data *ec, unsigned char *hash, unsigned char *sig) +{ + unsigned char *scratch = kzalloc(SCRATCH_SIZE, GFP_KERNEL); + int ret = -EKEYREJECTED; + size_t result; + + result = zhaoxin_gmi_sm2_verify(ec->pub_key, hash, sig, scratch); + if (result == SM2_VERIFY_PASS) + ret = 0; + + kfree(scratch); + + return ret; +} + +static int zhaoxin_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + unsigned char *buffer; + int ret, buf_len; + + buf_len = req->src_len + req->dst_len; + buffer = kmalloc(buf_len, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, sg_nents_for_len(req->src, buf_len), buffer, buf_len, 0); + ret = _zhaoxin_sm2_verify(ec, buffer + req->src_len, buffer); + + kfree(buffer); + + return ret; +} + +static int zhaoxin_sm2_set_pub_key(struct crypto_akcipher *tfm, const void *key, + unsigned int keylen) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memcpy(ec->pub_key, key, keylen); + + return 0; +} + +static unsigned int zhaoxin_sm2_max_size(struct crypto_akcipher *tfm) +{ + /* Unlimited max size */ + return PAGE_SIZE; +} + +static int zhaoxin_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + return zhaoxin_gmi_available(); +} + +static void zhaoxin_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ + struct sm2_cipher_data *ec = akcipher_tfm_ctx(tfm); + + memset(ec, 0, sizeof(*ec)); +} + +static struct akcipher_alg zhaoxin_sm2 = { + .verify = zhaoxin_sm2_verify, + .set_pub_key = zhaoxin_sm2_set_pub_key, + .max_size = zhaoxin_sm2_max_size, + .init = zhaoxin_sm2_init_tfm, + .exit = zhaoxin_sm2_exit_tfm, + .base = { + .cra_name = "sm2", + .cra_driver_name = "zhaoxin-gmi-sm2", + .cra_priority = 150, + .cra_module = THIS_MODULE, + .cra_ctxsize = sizeof(struct sm2_cipher_data), + }, +}; + +static const struct x86_cpu_id zhaoxin_sm2_cpu_ids[] = { + X86_MATCH_FEATURE(X86_FEATURE_SM2, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sm2_cpu_ids); + +static int __init zhaoxin_sm2_init(void) +{ + if (!x86_match_cpu(zhaoxin_sm2_cpu_ids)) + return -ENODEV; + + return crypto_register_akcipher(&zhaoxin_sm2); +} + +static void __exit zhaoxin_sm2_exit(void) +{ + crypto_unregister_akcipher(&zhaoxin_sm2); +} + +module_init(zhaoxin_sm2_init); +module_exit(zhaoxin_sm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("YunShen "); +MODULE_DESCRIPTION("SM2 Zhaoxin GMI Algorithm"); +MODULE_ALIAS_CRYPTO("zhaoxin-gmi-sm2"); diff --git a/arch/x86/crypto/sm3-zhaoxin-gmi.c b/arch/x86/crypto/sm3-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..e393133d572d719e00647ba0e8600fdaaace53cf --- /dev/null +++ b/arch/x86/crypto/sm3-zhaoxin-gmi.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * sm3_zhaoxin_gmi.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const u8 zx_sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B +}; +EXPORT_SYMBOL_GPL(zx_sm3_zero_message_hash); + +/* + * Load supported features of the CPU to see if the SM3/SM4 is available. + */ +static int gmi_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || + ((c->x86 == 6) && (c->x86_model == 0x09)) || + (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM3 detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM3 is available\n"); + return 0; + } + return -ENODEV; +} + +void sm3_generic_block_fn(struct sm3_state *sst, const u8 *inp, int blockcnt) +{ + unsigned long in, out, cnt; + + if (!blockcnt) + return; + + in = (unsigned long)inp; + out = (unsigned long)(sst->state); + cnt = (unsigned long)blockcnt; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n" + "pushq %%rbx\n" + "pushq %%rsi\n" + "pushq %%rdi\n" + "movq $-1, %%rax\n" + "movq $0x20, %%rbx\n" + #else + "pushl %%ebp\n" + "pushl %%ebx\n" + "pushl %%esi\n" + "pushl %%edi\n" + "movl $-1, %%eax\n" + "movl $0x20, %%ebx\n" + #endif + ".byte 0xf3,0x0f,0xa6,0xe8\n" + #ifdef __x86_64__ + "popq %%rdi\n" + "popq %%rsi\n" + "popq %%rbx\n" + "popq %%rbp\n" + #else + "popl %%edi\n" + "popl %%esi\n" + "popl %%ebx\n" + "popl %%ebp\n" + #endif + : + : "S"(in), "D"(out), "c"(cnt) + : + ); +} + +static inline int zx_sm3_init(struct shash_desc *desc) +{ + struct sm3_state *sctx; + + if (!desc) + return -EINVAL; + + sctx = shash_desc_ctx(desc); + + sctx->state[0] = 0x6f168073UL; + sctx->state[1] = 0xb9b21449UL; + sctx->state[2] = 0xd7422417UL; + sctx->state[3] = 0x00068adaUL; + sctx->state[4] = 0xbc306fa9UL; + sctx->state[5] = 0xaa383116UL; + sctx->state[6] = 0x4dee8de3UL; + sctx->state[7] = 0x4e0efbb0UL; + + sctx->count = 0; + + return 0; +} + +static inline int zx_sm3_base_finish(struct shash_desc *desc, u8 *out) +{ + struct sm3_state *sctx = shash_desc_ctx(desc); + __be32 *digest = (__be32 *)out; + + memcpy(digest, sctx->state, SM3_DIGEST_SIZE); + + *sctx = (struct sm3_state){}; + return 0; +} + +int zx_sm3_update(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + return sm3_base_do_update(desc, data, len, sm3_generic_block_fn); +} +EXPORT_SYMBOL(zx_sm3_update); + +static int zx_sm3_final(struct shash_desc *desc, u8 *out) +{ + sm3_base_do_finalize(desc, sm3_generic_block_fn); + + return zx_sm3_base_finish(desc, out); +} + +int zx_sm3_finup(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *hash) +{ + sm3_base_do_update(desc, data, len, sm3_generic_block_fn); + + return zx_sm3_final(desc, hash); +} +EXPORT_SYMBOL(zx_sm3_finup); + +static struct shash_alg zx_sm3_alg = { + .digestsize = SM3_DIGEST_SIZE, + .init = zx_sm3_init, + .update = zx_sm3_update, + .final = zx_sm3_final, + .finup = zx_sm3_finup, + .descsize = sizeof(struct sm3_state), + .base = { + .cra_name = "sm3", + .cra_driver_name = "sm3-zhaoxin-gmi", + .cra_priority = 300, + .cra_blocksize = SM3_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static int __init zx_sm3_generic_mod_init(void) +{ + if (gmi_available() == 0) + return crypto_register_shash(&zx_sm3_alg); + + pr_warn("GMI is unavailable on this platform."); + return -ENODEV; +} + +static void __exit zx_sm3_generic_mod_fini(void) +{ + crypto_unregister_shash(&zx_sm3_alg); +} + +module_init(zx_sm3_generic_mod_init); +module_exit(zx_sm3_generic_mod_fini); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SM3 Secure Hash Algorithm"); + +MODULE_ALIAS_CRYPTO("sm3-zhaoxin"); +MODULE_ALIAS_CRYPTO("sm3-zhaoxin-gmi"); diff --git a/arch/x86/crypto/sm4-zhaoxin-gmi.c b/arch/x86/crypto/sm4-zhaoxin-gmi.c new file mode 100644 index 0000000000000000000000000000000000000000..ec57b4ca4644626ce8e9ef4c1bf19d741a050264 --- /dev/null +++ b/arch/x86/crypto/sm4-zhaoxin-gmi.c @@ -0,0 +1,858 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin-gmi-sm4.c - wrapper code for Zhaoxin GMI. + * + * Copyright (C) 2023 Shanghai Zhaoxin Semiconductor LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#define SM4_ECB (1<<6) +#define SM4_CBC (1<<7) +#define SM4_CFB (1<<8) +#define SM4_OFB (1<<9) +#define SM4_CTR (1<<10) + +#define ZX_GMI_ALIGNMENT 16 + +#define GETU16(p) ((u16)(p)[0]<<8 | (u16)(p)[1]) + +/* Control word. */ +struct sm4_cipher_data { + u8 iv[SM4_BLOCK_SIZE]; /* Initialization vector */ + union { + u32 pad; + struct { + u32 encdec:1; + u32 func:5; + u32 mode:5; + u32 digest:1; + } b; + } cword; /* Control word */ + struct sm4_ctx keys; /* Encryption key */ +}; + +static u8 *rep_xcrypt(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + unsigned long rax = sm4_data->cword.pad; + + // Set the flag for encryption or decryption + if (sm4_data->cword.b.encdec == 1) + rax &= ~0x01; + else + rax |= 0x01; + + __asm__ __volatile__( + #ifdef __x86_64__ + "pushq %%rbp\n\n" + "pushq %%rbx\n\n" + "pushq %%rcx\n\n" + "pushq %%rsi\n\n" + "pushq %%rdi\n\n" + #else + "pushl %%ebp\n\n" + "pushl %%ebx\n\n" + "pushl %%ecx\n\n" + "pushl %%esi\n\n" + "pushl %%edi\n\n" + #endif + ".byte 0xf3,0x0f,0xa7,0xf0\n" + #ifdef __x86_64__ + "popq %%rdi\n\n" + "popq %%rsi\n\n" + "popq %%rcx\n\n" + "popq %%rbx\n\n" + "popq %%rbp\n\n" + #else + "popl %%edi\n\n" + "popl %%esi\n\n" + "popl %%ecx\n\n" + "popl %%ebx\n\n" + "popl %%ebp\n\n" + #endif + : + : "S"(input), "D"(output), "a"(rax), "b"(key), "c"((unsigned long)count), "d"(iv)); + return iv; +} + +static u8 *rep_xcrypt_ctr(const u8 *input, u8 *output, void *key, u8 *iv, + struct sm4_cipher_data *sm4_data, u64 count) +{ + u8 oiv[SM4_BLOCK_SIZE] = {0}; + u16 cnt_tmp; + u32 i; + u8 *in_tmp = (u8 *)input, *out_tmp = output; + + //Backup the original IV if it is not NULL. + if (iv) + memcpy(oiv, iv, SM4_BLOCK_SIZE); + + // Get the current counter. + cnt_tmp = GETU16(&iv[14]); + + // Get the available counter space before overflow. + cnt_tmp = 0x10000 - cnt_tmp; + + // + // Check there is enough counter space for the required blocks. + // + if (cnt_tmp < count) { + + // Process the first part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + // Only increase the counter by SW when overflow occurs. + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + for (i = 0; i < cnt_tmp; i++) + crypto_inc(iv, SM4_BLOCK_SIZE); + + out_tmp = output + cnt_tmp * SM4_BLOCK_SIZE; + in_tmp = (u8 *)(input + cnt_tmp * SM4_BLOCK_SIZE); + + // Get the number of data blocks that have not been encrypted. + cnt_tmp = count - cnt_tmp; + // Process the remaining part of data blocks. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, cnt_tmp); + } else { + // Counter space is big enough, the counter will not overflow. + rep_xcrypt(in_tmp, out_tmp, key, iv, sm4_data, count); + } + + // Restore the iv if not null + if (iv) + memcpy(iv, oiv, SM4_BLOCK_SIZE); + + return iv; +} + +static u8 *rep_xcrypt_ecb_ONE(const u8 *input, u8 *output, void *key, + u8 *iv, struct sm4_cipher_data *sm4_data, u64 count) +{ + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + return rep_xcrypt(input, output, key, iv, &cw, 1); +} + +/** + * gmi_sm4_set_key - Set the sm4 key. + * @tfm: The %crypto_skcipher that is used in the context. + * @in_key: The input key. + * @key_len:The size of the key. + */ +int gmi_sm4_set_key(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + + if (key_len != SM4_KEY_SIZE) { + pr_warn("The key_len must be 16 bytes. please check\n"); + return -EINVAL; + } + + memcpy(ctx->rkey_enc, in_key, key_len); + memcpy(ctx->rkey_dec, in_key, key_len); + + return 0; +} +EXPORT_SYMBOL_GPL(gmi_sm4_set_key); + + +static int sm4_cipher_common(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u8 *iv; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + iv = rep_xcrypt(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + + +static int ecb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int ecb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_ECB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +static int cbc_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CBC; + + err = sm4_cipher_common(req, &cw); + + return err; +} + + +/* + * sm4_cipher_ctr is used for ZX-E and newer + */ +static int sm4_cipher_ctr(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks, nbytes; + int err; + u8 *iv, *dst, *src; + u8 keystream[SM4_BLOCK_SIZE]; + u32 i; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + blocks = nbytes/SM4_BLOCK_SIZE; + iv = rep_xcrypt_ctr(walk.src.virt.addr, walk.dst.virt.addr, ctx->rkey_enc, + walk.iv, cw, blocks); + + for (i = 0; i < blocks; i++) + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + dst += blocks * SM4_BLOCK_SIZE; + src += blocks * SM4_BLOCK_SIZE; + nbytes -= blocks * SM4_BLOCK_SIZE; + } + + if (walk.nbytes == walk.total && nbytes > 0) { + rep_xcrypt_ecb_ONE(walk.iv, keystream, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, keystream, src, nbytes); + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt is used for ZX-E and newer + */ +static int ctr_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * ctr_decrypt is used for ZX-E and newer + */ +static int ctr_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_cipher_ctr(req, &cw); + + return err; +} + +/* + * sm4_ctr_zxc is used for ZXC+ + */ +static int sm4_ctr_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + u8 *iv = NULL, *dst, *src; + u8 en_iv[SM4_BLOCK_SIZE] = {0}; + + err = skcipher_walk_virt(&walk, req, true); + + while ((nbytes = walk.nbytes) > 0) { + + src = walk.src.virt.addr; + dst = walk.dst.virt.addr; + + while (nbytes >= SM4_BLOCK_SIZE) { + + iv = rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_inc(walk.iv, SM4_BLOCK_SIZE); + + crypto_xor_cpy(dst, en_iv, src, SM4_BLOCK_SIZE); + + dst += SM4_BLOCK_SIZE; + src += SM4_BLOCK_SIZE; + nbytes -= SM4_BLOCK_SIZE; + } + + // tail + if (walk.nbytes == walk.total && nbytes > 0) { + + rep_xcrypt_ecb_ONE(walk.iv, en_iv, ctx->rkey_enc, walk.iv, cw, 1); + crypto_xor_cpy(dst, en_iv, src, nbytes); + + dst += nbytes; + src += nbytes; + nbytes = 0; + } + + err = skcipher_walk_done(&walk, nbytes); + } + + return err; +} + +/* + * ctr_encrypt_zxc is used for ZX-C+ + */ +static int ctr_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ctr_decrypt_zxc is used for ZX-C+ + */ +static int ctr_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CTR; + + err = sm4_ctr_zxc(req, &cw); + + return err; +} + +/* + * ofb_encrypt is used for ZX-E and newer + */ +static int ofb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * ofb_decrypt is used for ZX-E and newer + */ +static int ofb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * sm4_ofb_zxc is used for ZX-C+ + */ +static int sm4_ofb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + + u32 n; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ + *(size_t *)(walk.src.virt.addr + n); + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * ofb_encrypt_zxc is used for ZX-C+ + */ +static int ofb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + +/* + * ofb_decrypt_zxc is used for ZX-C+ + */ +static int ofb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_OFB; + + err = sm4_ofb_zxc(req, &cw); + + return err; +} + + +/* + * cfb_encrypt is used for ZX-E and newer. + */ +static int cfb_encrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; +} + +/* + * cfb_decrypt is used for ZX-E and newer. + */ + +static int cfb_decrypt(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cipher_common(req, &cw); + + return err; + +} + +/* + * sm4_cfb_zxc is used for ZX-C+ + */ +static int sm4_cfb_zxc(struct skcipher_request *req, struct sm4_cipher_data *cw) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct sm4_ctx *ctx = crypto_skcipher_ctx(tfm); + struct skcipher_walk walk; + unsigned int blocks; + int err; + u32 n; + size_t t; + + err = skcipher_walk_virt(&walk, req, true); + + while ((blocks = (walk.nbytes / SM4_BLOCK_SIZE))) { + while (blocks--) { + rep_xcrypt_ecb_ONE(walk.iv, walk.iv, ctx->rkey_enc, NULL, cw, 1); + + if (cw->cword.b.encdec) + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^= + *(size_t *)(walk.src.virt.addr + n); + + else + for (n = 0; n < SM4_BLOCK_SIZE; n += sizeof(size_t)) { + t = *(size_t *)(walk.src.virt.addr + n); + *(size_t *)(walk.dst.virt.addr + n) = + *(size_t *)(walk.iv + n) ^ t; + *(size_t *)(walk.iv + n) = t; + } + + walk.src.virt.addr += SM4_BLOCK_SIZE; + walk.dst.virt.addr += SM4_BLOCK_SIZE; + } + + err = skcipher_walk_done(&walk, walk.nbytes % SM4_BLOCK_SIZE); + } + + return err; +} + +/* + * cfb_encrypt_zxc is used for ZX-C+ + */ +static int cfb_encrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 1; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + +/* + * cfb_decrypt_zxc is used for ZX-C+ + */ +static int cfb_decrypt_zxc(struct skcipher_request *req) +{ + int err; + struct sm4_cipher_data cw; + + cw.cword.pad = 0; + cw.cword.b.encdec = 0; + cw.cword.pad |= 0x20|SM4_CFB; + + err = sm4_cfb_zxc(req, &cw); + + return err; +} + + +static struct skcipher_alg sm4_algs[] = { + { + .base = { + .cra_name = "__ecb(sm4)", + .cra_driver_name = "__ecb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ecb_encrypt, + .decrypt = ecb_decrypt, + }, + + { + .base = { + .cra_name = "__cbc(sm4)", + .cra_driver_name = "__cbc-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cbc_encrypt, + .decrypt = cbc_decrypt, + }, + + { + .base = { + .cra_name = "__ctr(sm4)", + .cra_driver_name = "__ctr-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = 1, //SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ctr_encrypt, + .decrypt = ctr_decrypt, + }, + + { + .base = { + .cra_name = "__ofb(sm4)", + .cra_driver_name = "__ofb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = ofb_encrypt, + .decrypt = ofb_decrypt, + }, + + { + .base = { + .cra_name = "__cfb(sm4)", + .cra_driver_name = "__cfb-sm4-gmi", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_INTERNAL, + .cra_blocksize = SM4_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct sm4_ctx), + .cra_module = THIS_MODULE, + }, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .chunksize = SM4_BLOCK_SIZE, + .walksize = 8 * SM4_BLOCK_SIZE, + .setkey = gmi_sm4_set_key, + .encrypt = cfb_encrypt, + .decrypt = cfb_decrypt, + } +}; + +static struct simd_skcipher_alg *sm4_simd_algs[ARRAY_SIZE(sm4_algs)]; + +static int gmi_zxc_check(void) +{ + int f_zxc = 0; + + struct cpuinfo_x86 *c = &cpu_data(0); + + if ((c->x86 > 6)) { + f_zxc = 0; + } else if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + ) { + f_zxc = 1; + } + + return f_zxc; +} + +/* + * Load supported features of the CPU to see if the SM4 is available. + */ +static int gmi_ccs_available(void) +{ + struct cpuinfo_x86 *c = &cpu_data(0); + u32 eax, edx; + + if (((c->x86 == 6) && (c->x86_model >= 0x0f)) + || ((c->x86 == 6) && (c->x86_model == 0x09)) + || (c->x86 > 6)) { + if (!boot_cpu_has(X86_FEATURE_CCS) || !boot_cpu_has(X86_FEATURE_CCS_EN)) { + + eax = 0xC0000001; + __asm__ __volatile__ ("cpuid":"=d"(edx):"a"(eax) : ); + + if ((edx & 0x0030) != 0x0030) + return -ENODEV; + + pr_notice("GMI SM4 is detected by CPUID\n"); + return 0; + } + pr_notice("GMI SM4 is available\n"); + return 0; + + } + return -ENODEV; +} + + +static void gmi_sm4_exit(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(sm4_simd_algs) && sm4_simd_algs[i]; i++) + simd_skcipher_free(sm4_simd_algs[i]); + + crypto_unregister_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); +} +static int __init gmi_sm4_init(void) +{ + struct simd_skcipher_alg *simd; + const char *basename; + const char *algname; + const char *drvname; + int err; + int i; + + if (gmi_ccs_available() != 0) + return -ENODEV; + + if (gmi_zxc_check()) { + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (!strcmp(sm4_algs[i].base.cra_name, "__ctr(sm4)")) { + + sm4_algs[i].encrypt = ctr_encrypt_zxc; + sm4_algs[i].decrypt = ctr_decrypt_zxc; + } else if (!strcmp(sm4_algs[i].base.cra_name, "__cfb(sm4)")) { + + sm4_algs[i].encrypt = cfb_encrypt_zxc; + sm4_algs[i].decrypt = cfb_decrypt_zxc; + + } else if (!strcmp(sm4_algs[i].base.cra_name, "__ofb(sm4)")) { + + sm4_algs[i].encrypt = ofb_encrypt_zxc; + sm4_algs[i].decrypt = ofb_decrypt_zxc; + } + } + } + + err = crypto_register_skciphers(sm4_algs, ARRAY_SIZE(sm4_algs)); + if (err) + return err; + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + algname = sm4_algs[i].base.cra_name + 2; + drvname = sm4_algs[i].base.cra_driver_name + 2; + basename = sm4_algs[i].base.cra_driver_name; + simd = simd_skcipher_create_compat(algname, drvname, basename); + err = PTR_ERR(simd); + if (IS_ERR(simd)) + goto unregister_simds; + + sm4_simd_algs[i] = simd; + } + + return 0; + +unregister_simds: + gmi_sm4_exit(); + return err; +} + +late_initcall(gmi_sm4_init); +module_exit(gmi_sm4_exit); + +MODULE_DESCRIPTION("SM4-ECB/CBC/CTR/CFB/OFB using Zhaoxin GMI"); +MODULE_AUTHOR("GRX"); +MODULE_LICENSE("GPL"); diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index e24976593a298a5600dda460c9e81275928343e0..4ee6390b45c903037f81c69e9c9d2ca0b0d22976 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -940,7 +940,7 @@ static int amd_pmu_v2_handle_irq(struct pt_regs *regs) continue; if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 83f15fe411b3f4834b20ea588146dfe913a850d0..5100469fef323206881b62c9963c8f7f17ec31ee 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -196,10 +196,21 @@ static void amd_uncore_del(struct perf_event *event, int flags) */ static u64 l3_thread_slice_mask(u64 config) { - if (boot_cpu_data.x86 <= 0x18) + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 <= 0x18) return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + if (boot_cpu_data.x86_model == 0x6) + return ((config & HYGON_L3_SLICE_MASK) ? : HYGON_L3_SLICE_MASK) | + ((config & HYGON_L3_THREAD_MASK) ? : HYGON_L3_THREAD_MASK); + else + return ((config & AMD64_L3_SLICE_MASK) ? : AMD64_L3_SLICE_MASK) | + ((config & AMD64_L3_THREAD_MASK) ? : AMD64_L3_THREAD_MASK); + } + /* * If the user doesn't specify a threadmask, they're not trying to * count core 0, so we enable all cores & threads. @@ -268,6 +279,13 @@ amd_f17h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) attr->mode : 0; } +static umode_t +hygon_f18h_m6h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + return boot_cpu_data.x86 == 0x18 && boot_cpu_data.x86_model == 0x6 ? + attr->mode : 0; +} + static umode_t amd_f19h_uncore_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -325,6 +343,8 @@ DEFINE_UNCORE_FORMAT_ATTR(threadmask2, threadmask, "config:56-57"); /* F19h L DEFINE_UNCORE_FORMAT_ATTR(enallslices, enallslices, "config:46"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(enallcores, enallcores, "config:47"); /* F19h L3 */ DEFINE_UNCORE_FORMAT_ATTR(sliceid, sliceid, "config:48-50"); /* F19h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(slicemask4, slicemask, "config:28-31"); /* F18h L3 */ +DEFINE_UNCORE_FORMAT_ATTR(threadmask32, threadmask, "config:32-63"); /* F18h L3 */ /* Common DF and NB attributes */ static struct attribute *amd_uncore_df_format_attr[] = { @@ -347,6 +367,12 @@ static struct attribute *amd_f17h_uncore_l3_format_attr[] = { NULL, }; +/* F18h M06h unique L3 attributes */ +static struct attribute *hygon_f18h_m6h_uncore_l3_format_attr[] = { + &format_attr_slicemask4.attr, /* slicemask */ + NULL, +}; + /* F19h unique L3 attributes */ static struct attribute *amd_f19h_uncore_l3_format_attr[] = { &format_attr_coreid.attr, /* coreid */ @@ -372,6 +398,12 @@ static struct attribute_group amd_f17h_uncore_l3_format_group = { .is_visible = amd_f17h_uncore_is_visible, }; +static struct attribute_group hygon_f18h_m6h_uncore_l3_format_group = { + .name = "format", + .attrs = hygon_f18h_m6h_uncore_l3_format_attr, + .is_visible = hygon_f18h_m6h_uncore_is_visible, +}; + static struct attribute_group amd_f19h_uncore_l3_format_group = { .name = "format", .attrs = amd_f19h_uncore_l3_format_attr, @@ -396,6 +428,11 @@ static const struct attribute_group *amd_uncore_l3_attr_update[] = { NULL, }; +static const struct attribute_group *hygon_uncore_l3_attr_update[] = { + &hygon_f18h_m6h_uncore_l3_format_group, + NULL, +}; + static struct pmu amd_nb_pmu = { .task_ctx_nr = perf_invalid_context, .attr_groups = amd_uncore_df_attr_groups, @@ -709,10 +746,21 @@ static int __init amd_uncore_init(void) *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask2.attr; - } else if (boot_cpu_data.x86 >= 0x17) { + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && + boot_cpu_data.x86 >= 0x17) { *l3_attr++ = &format_attr_event8.attr; *l3_attr++ = &format_attr_umask8.attr; *l3_attr++ = &format_attr_threadmask8.attr; + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + *l3_attr++ = &format_attr_event8.attr; + *l3_attr++ = &format_attr_umask8.attr; + if (boot_cpu_data.x86_model == 0x6) { + *l3_attr++ = &format_attr_threadmask32.attr; + amd_llc_pmu.attr_update = hygon_uncore_l3_attr_update; + } else { + *l3_attr++ = &format_attr_threadmask8.attr; + } } amd_uncore_llc = alloc_percpu(struct amd_uncore *); diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 185f902e5f2859e2315db9bd2366a79117888b9d..b56e9c3a0cc8a64de3c18918355d966f6fe94243 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -601,7 +601,7 @@ int x86_pmu_hw_config(struct perf_event *event) } } - if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK) + if (branch_sample_call_stack(event)) event->attach_state |= PERF_ATTACH_TASK_DATA; /* @@ -1702,7 +1702,7 @@ int x86_pmu_handle_irq(struct pt_regs *regs) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(&data, event, &cpuc->lbr_stack, NULL); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fa355d3658a6522df3db78ee6d692d1a586b2d9b..5f506d02caf75497f150020b1ad7e8d450241501 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2518,9 +2518,14 @@ static void intel_pmu_assign_event(struct perf_event *event, int idx) perf_report_aux_output_id(event, idx); } +static __always_inline bool intel_pmu_needs_branch_stack(struct perf_event *event) +{ + return event->hw.flags & PERF_X86_EVENT_NEEDS_BRANCH_STACK; +} + static void intel_pmu_del_event(struct perf_event *event) { - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_del(event); if (event->attr.precise_ip) intel_pmu_pebs_del(event); @@ -2798,6 +2803,7 @@ static void intel_pmu_enable_fixed(struct perf_event *event) static void intel_pmu_enable_event(struct perf_event *event) { + u64 enable_mask = ARCH_PERFMON_EVENTSEL_ENABLE; struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; @@ -2806,8 +2812,10 @@ static void intel_pmu_enable_event(struct perf_event *event) switch (idx) { case 0 ... INTEL_PMC_IDX_FIXED - 1: + if (branch_sample_counters(event)) + enable_mask |= ARCH_PERFMON_EVENTSEL_BR_CNTR; intel_set_masks(event, idx); - __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); + __x86_pmu_enable_event(hwc, enable_mask); break; case INTEL_PMC_IDX_FIXED ... INTEL_PMC_IDX_FIXED_BTS - 1: case INTEL_PMC_IDX_METRIC_BASE ... INTEL_PMC_IDX_METRIC_END: @@ -2831,7 +2839,7 @@ static void intel_pmu_add_event(struct perf_event *event) { if (event->attr.precise_ip) intel_pmu_pebs_add(event); - if (needs_branch_stack(event)) + if (intel_pmu_needs_branch_stack(event)) intel_pmu_lbr_add(event); } @@ -3058,7 +3066,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) perf_sample_data_init(&data, 0, event->hw.last_period); if (has_branch_stack(event)) - perf_sample_save_brstack(&data, event, &cpuc->lbr_stack); + intel_pmu_lbr_save_brstack(&data, cpuc, event); if (perf_event_overflow(event, &data, regs)) x86_pmu_stop(event, 0); @@ -3623,6 +3631,13 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx, if (cpuc->excl_cntrs) return intel_get_excl_constraints(cpuc, event, idx, c2); + /* Not all counters support the branch counter feature. */ + if (branch_sample_counters(event)) { + c2 = dyn_constraint(cpuc, c2, idx); + c2->idxmsk64 &= x86_pmu.lbr_counters; + c2->weight = hweight64(c2->idxmsk64); + } + return c2; } @@ -3908,7 +3923,62 @@ static int intel_pmu_hw_config(struct perf_event *event) x86_pmu.pebs_aliases(event); } - if (needs_branch_stack(event)) { + if (needs_branch_stack(event) && is_sampling_event(event)) + event->hw.flags |= PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + if (branch_sample_counters(event)) { + struct perf_event *leader, *sibling; + int num = 0; + + if (!(x86_pmu.flags & PMU_FL_BR_CNTR) || + (event->attr.config & ~INTEL_ARCH_EVENT_MASK)) + return -EINVAL; + + /* + * The branch counter logging is not supported in the call stack + * mode yet, since we cannot simply flush the LBR during e.g., + * multiplexing. Also, there is no obvious usage with the call + * stack mode. Simply forbids it for now. + * + * If any events in the group enable the branch counter logging + * feature, the group is treated as a branch counter logging + * group, which requires the extra space to store the counters. + */ + leader = event->group_leader; + if (branch_sample_call_stack(leader)) + return -EINVAL; + if (branch_sample_counters(leader)) + num++; + leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS; + + for_each_sibling_event(sibling, leader) { + if (branch_sample_call_stack(sibling)) + return -EINVAL; + if (branch_sample_counters(sibling)) + num++; + } + + if (num > fls(x86_pmu.lbr_counters)) + return -EINVAL; + /* + * Only applying the PERF_SAMPLE_BRANCH_COUNTERS doesn't + * require any branch stack setup. + * Clear the bit to avoid unnecessary branch stack setup. + */ + if (0 == (event->attr.branch_sample_type & + ~(PERF_SAMPLE_BRANCH_PLM_ALL | + PERF_SAMPLE_BRANCH_COUNTERS))) + event->hw.flags &= ~PERF_X86_EVENT_NEEDS_BRANCH_STACK; + + /* + * Force the leader to be a LBR event. So LBRs can be reset + * with the leader event. See intel_pmu_lbr_del() for details. + */ + if (!intel_pmu_needs_branch_stack(leader)) + return -EINVAL; + } + + if (intel_pmu_needs_branch_stack(event)) { ret = intel_pmu_setup_lbr_filter(event); if (ret) return ret; @@ -4386,8 +4456,13 @@ cmt_get_event_constraints(struct cpu_hw_events *cpuc, int idx, */ if (event->attr.precise_ip == 3) { /* Force instruction:ppp on PMC0, 1 and Fixed counter 0 */ - if (constraint_match(&fixed0_constraint, event->hw.config)) - return &fixed0_counter0_1_constraint; + if (constraint_match(&fixed0_constraint, event->hw.config)) { + /* The fixed counter 0 doesn't support LBR event logging. */ + if (branch_sample_counters(event)) + return &counter0_1_constraint; + else + return &fixed0_counter0_1_constraint; + } switch (c->idxmsk64 & 0x3ull) { case 0x1: @@ -4566,7 +4641,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) goto err; } - if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) { + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA | PMU_FL_BR_CNTR)) { size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint); cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); @@ -5463,11 +5538,41 @@ static ssize_t branches_show(struct device *cdev, static DEVICE_ATTR_RO(branches); +static ssize_t branch_counter_nr_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", fls(x86_pmu.lbr_counters)); +} + +static DEVICE_ATTR_RO(branch_counter_nr); + +static ssize_t branch_counter_width_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", LBR_INFO_BR_CNTR_BITS); +} + +static DEVICE_ATTR_RO(branch_counter_width); + static struct attribute *lbr_attrs[] = { &dev_attr_branches.attr, + &dev_attr_branch_counter_nr.attr, + &dev_attr_branch_counter_width.attr, NULL }; +static umode_t +lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + /* branches */ + if (i == 0) + return x86_pmu.lbr_nr ? attr->mode : 0; + + return (x86_pmu.flags & PMU_FL_BR_CNTR) ? attr->mode : 0; +} + static char pmu_name_str[30]; static ssize_t pmu_name_show(struct device *cdev, @@ -5494,6 +5599,15 @@ static struct attribute *intel_pmu_attrs[] = { NULL, }; +static umode_t +default_is_visible(struct kobject *kobj, struct attribute *attr, int i) +{ + if (attr == &dev_attr_allow_tsx_force_abort.attr) + return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; + + return attr->mode; +} + static umode_t tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i) { @@ -5515,27 +5629,12 @@ mem_is_visible(struct kobject *kobj, struct attribute *attr, int i) return pebs_is_visible(kobj, attr, i); } -static umode_t -lbr_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - return x86_pmu.lbr_nr ? attr->mode : 0; -} - static umode_t exra_is_visible(struct kobject *kobj, struct attribute *attr, int i) { return x86_pmu.version >= 2 ? attr->mode : 0; } -static umode_t -default_is_visible(struct kobject *kobj, struct attribute *attr, int i) -{ - if (attr == &dev_attr_allow_tsx_force_abort.attr) - return x86_pmu.flags & PMU_FL_TFA ? attr->mode : 0; - - return attr->mode; -} - static struct attribute_group group_events_td = { .name = "events", }; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 96fffb2d521d2a992f02eed7ce072daba22764c9..6b65ee785da99328a3ebd0ced5266ae0d1c1a894 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -41,7 +41,7 @@ * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 * Available model: SLM,AMT,GLM,CNL,ICX,TNT,ADL,RPL - * MTL + * MTL,SRF,GRR * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 @@ -52,7 +52,8 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF, + * GRR * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 @@ -75,7 +76,7 @@ * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, * SKL,KNL,GLM,CNL,KBL,CML,ICL,ICX, - * TGL,TNT,RKL,ADL,RPL,SPR,MTL + * TGL,TNT,RKL,ADL,RPL,SPR,MTL,SRF * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 @@ -97,6 +98,10 @@ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL, * TNT,RKL,ADL,RPL,MTL * Scope: Package (physical package) + * MSR_MODULE_C6_RES_MS: Module C6 Residency Counter. + * perf code: 0x00 + * Available model: SRF,GRR + * Scope: A cluster of cores shared L2 cache * */ @@ -130,6 +135,7 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, struct cstate_model { unsigned long core_events; unsigned long pkg_events; + unsigned long module_events; unsigned long quirks; }; @@ -189,20 +195,20 @@ static struct attribute *attrs_empty[] = { * "events" group (with empty attrs) before updating * it with detected events. */ -static struct attribute_group core_events_attr_group = { +static struct attribute_group cstate_events_attr_group = { .name = "events", .attrs = attrs_empty, }; -DEFINE_CSTATE_FORMAT_ATTR(core_event, event, "config:0-63"); -static struct attribute *core_format_attrs[] = { - &format_attr_core_event.attr, +DEFINE_CSTATE_FORMAT_ATTR(cstate_event, event, "config:0-63"); +static struct attribute *cstate_format_attrs[] = { + &format_attr_cstate_event.attr, NULL, }; -static struct attribute_group core_format_attr_group = { +static struct attribute_group cstate_format_attr_group = { .name = "format", - .attrs = core_format_attrs, + .attrs = cstate_format_attrs, }; static cpumask_t cstate_core_cpu_mask; @@ -217,9 +223,9 @@ static struct attribute_group cpumask_attr_group = { .attrs = cstate_cpumask_attrs, }; -static const struct attribute_group *core_attr_groups[] = { - &core_events_attr_group, - &core_format_attr_group, +static const struct attribute_group *cstate_attr_groups[] = { + &cstate_events_attr_group, + &cstate_format_attr_group, &cpumask_attr_group, NULL, }; @@ -268,30 +274,30 @@ static struct perf_msr pkg_msr[] = { [PERF_CSTATE_PKG_C10_RES] = { MSR_PKG_C10_RESIDENCY, &group_cstate_pkg_c10, test_msr }, }; -static struct attribute_group pkg_events_attr_group = { - .name = "events", - .attrs = attrs_empty, -}; +static cpumask_t cstate_pkg_cpu_mask; -DEFINE_CSTATE_FORMAT_ATTR(pkg_event, event, "config:0-63"); -static struct attribute *pkg_format_attrs[] = { - &format_attr_pkg_event.attr, - NULL, -}; -static struct attribute_group pkg_format_attr_group = { - .name = "format", - .attrs = pkg_format_attrs, +/* cstate_module PMU */ +static struct pmu cstate_module_pmu; +static bool has_cstate_module; + +enum perf_cstate_module_events { + PERF_CSTATE_MODULE_C6_RES = 0, + + PERF_CSTATE_MODULE_EVENT_MAX, }; -static cpumask_t cstate_pkg_cpu_mask; +PMU_EVENT_ATTR_STRING(c6-residency, attr_cstate_module_c6, "event=0x00"); -static const struct attribute_group *pkg_attr_groups[] = { - &pkg_events_attr_group, - &pkg_format_attr_group, - &cpumask_attr_group, - NULL, +static unsigned long module_msr_mask; + +PMU_EVENT_GROUP(events, cstate_module_c6); + +static struct perf_msr module_msr[] = { + [PERF_CSTATE_MODULE_C6_RES] = { MSR_MODULE_C6_RES_MS, &group_cstate_module_c6, test_msr }, }; +static cpumask_t cstate_module_cpu_mask; + static ssize_t cstate_get_attr_cpumask(struct device *dev, struct device_attribute *attr, char *buf) @@ -302,6 +308,8 @@ static ssize_t cstate_get_attr_cpumask(struct device *dev, return cpumap_print_to_pagebuf(true, buf, &cstate_core_cpu_mask); else if (pmu == &cstate_pkg_pmu) return cpumap_print_to_pagebuf(true, buf, &cstate_pkg_cpu_mask); + else if (pmu == &cstate_module_pmu) + return cpumap_print_to_pagebuf(true, buf, &cstate_module_cpu_mask); else return 0; } @@ -339,6 +347,15 @@ static int cstate_pmu_event_init(struct perf_event *event) event->hw.event_base = pkg_msr[cfg].msr; cpu = cpumask_any_and(&cstate_pkg_cpu_mask, topology_die_cpumask(event->cpu)); + } else if (event->pmu == &cstate_module_pmu) { + if (cfg >= PERF_CSTATE_MODULE_EVENT_MAX) + return -EINVAL; + cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_MODULE_EVENT_MAX); + if (!(module_msr_mask & (1 << cfg))) + return -EINVAL; + event->hw.event_base = module_msr[cfg].msr; + cpu = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(event->cpu)); } else { return -ENOENT; } @@ -426,6 +443,17 @@ static int cstate_cpu_exit(unsigned int cpu) perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); } } + + if (has_cstate_module && + cpumask_test_and_clear_cpu(cpu, &cstate_module_cpu_mask)) { + + target = cpumask_any_but(topology_cluster_cpumask(cpu), cpu); + /* Migrate events if there is a valid target */ + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &cstate_module_cpu_mask); + perf_pmu_migrate_context(&cstate_module_pmu, cpu, target); + } + } return 0; } @@ -452,6 +480,15 @@ static int cstate_cpu_init(unsigned int cpu) if (has_cstate_pkg && target >= nr_cpu_ids) cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); + /* + * If this is the first online thread of that cluster, set it + * in the cluster cpu mask as the designated reader. + */ + target = cpumask_any_and(&cstate_module_cpu_mask, + topology_cluster_cpumask(cpu)); + if (has_cstate_module && target >= nr_cpu_ids) + cpumask_set_cpu(cpu, &cstate_module_cpu_mask); + return 0; } @@ -474,8 +511,13 @@ static const struct attribute_group *pkg_attr_update[] = { NULL, }; +static const struct attribute_group *module_attr_update[] = { + &group_cstate_module_c6, + NULL +}; + static struct pmu cstate_core_pmu = { - .attr_groups = core_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = core_attr_update, .name = "cstate_core", .task_ctx_nr = perf_invalid_context, @@ -490,7 +532,7 @@ static struct pmu cstate_core_pmu = { }; static struct pmu cstate_pkg_pmu = { - .attr_groups = pkg_attr_groups, + .attr_groups = cstate_attr_groups, .attr_update = pkg_attr_update, .name = "cstate_pkg", .task_ctx_nr = perf_invalid_context, @@ -504,6 +546,21 @@ static struct pmu cstate_pkg_pmu = { .module = THIS_MODULE, }; +static struct pmu cstate_module_pmu = { + .attr_groups = cstate_attr_groups, + .attr_update = module_attr_update, + .name = "cstate_module", + .task_ctx_nr = perf_invalid_context, + .event_init = cstate_pmu_event_init, + .add = cstate_pmu_event_add, + .del = cstate_pmu_event_del, + .start = cstate_pmu_event_start, + .stop = cstate_pmu_event_stop, + .read = cstate_pmu_event_update, + .capabilities = PERF_PMU_CAP_NO_INTERRUPT | PERF_PMU_CAP_NO_EXCLUDE, + .module = THIS_MODULE, +}; + static const struct cstate_model nhm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C3_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -618,6 +675,22 @@ static const struct cstate_model glm_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model grr_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + +static const struct cstate_model srf_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C6_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C6_RES), + + .module_events = BIT(PERF_CSTATE_MODULE_C6_RES), +}; + static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &nhm_cstates), @@ -670,6 +743,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_L, &glm_cstates), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &srf_cstates), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &grr_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, &icl_cstates), X86_MATCH_INTEL_FAM6_MODEL(ICELAKE, &icl_cstates), @@ -711,10 +786,14 @@ static int __init cstate_probe(const struct cstate_model *cm) pkg_msr_mask = perf_msr_probe(pkg_msr, PERF_CSTATE_PKG_EVENT_MAX, true, (void *) &cm->pkg_events); + module_msr_mask = perf_msr_probe(module_msr, PERF_CSTATE_MODULE_EVENT_MAX, + true, (void *) &cm->module_events); + has_cstate_core = !!core_msr_mask; has_cstate_pkg = !!pkg_msr_mask; + has_cstate_module = !!module_msr_mask; - return (has_cstate_core || has_cstate_pkg) ? 0 : -ENODEV; + return (has_cstate_core || has_cstate_pkg || has_cstate_module) ? 0 : -ENODEV; } static inline void cstate_cleanup(void) @@ -727,6 +806,9 @@ static inline void cstate_cleanup(void) if (has_cstate_pkg) perf_pmu_unregister(&cstate_pkg_pmu); + + if (has_cstate_module) + perf_pmu_unregister(&cstate_module_pmu); } static int __init cstate_init(void) @@ -763,6 +845,16 @@ static int __init cstate_init(void) return err; } } + + if (has_cstate_module) { + err = perf_pmu_register(&cstate_module_pmu, cstate_module_pmu.name, -1); + if (err) { + has_cstate_module = false; + pr_info("Failed to register cstate cluster pmu\n"); + cstate_cleanup(); + return err; + } + } return 0; } diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index eb8dd8b8a1e860cd53e24436121a9e031a4f9ae9..5ff81dbf8aa3c71b92bf2948aa0fbe1990565048 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1755,7 +1755,7 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event, setup_pebs_time(event, data, pebs->tsc); if (has_branch_stack(event)) - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); } static void adaptive_pebs_save_regs(struct pt_regs *regs, @@ -1912,7 +1912,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event, if (has_branch_stack(event)) { intel_pmu_store_pebs_lbrs(lbr); - perf_sample_save_brstack(data, event, &cpuc->lbr_stack); + intel_pmu_lbr_save_brstack(data, cpuc, event); } } diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index c3b0d15a9841b5edec20e10240850facae803772..78cd5084104e9c205a6e949f6ee1ce6b93060fb0 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -676,6 +676,25 @@ void intel_pmu_lbr_del(struct perf_event *event) WARN_ON_ONCE(cpuc->lbr_users < 0); WARN_ON_ONCE(cpuc->lbr_pebs_users < 0); perf_sched_cb_dec(event->pmu); + + /* + * The logged occurrences information is only valid for the + * current LBR group. If another LBR group is scheduled in + * later, the information from the stale LBRs will be wrongly + * interpreted. Reset the LBRs here. + * + * Only clear once for a branch counter group with the leader + * event. Because + * - Cannot simply reset the LBRs with the !cpuc->lbr_users. + * Because it's possible that the last LBR user is not in a + * branch counter group, e.g., a branch_counters group + + * several normal LBR events. + * - The LBR reset can be done with any one of the events in a + * branch counter group, since they are always scheduled together. + * It's easy to force the leader event an LBR event. + */ + if (is_branch_counters_group(event) && event == event->group_leader) + intel_pmu_lbr_reset(); } static inline bool vlbr_exclude_host(void) @@ -866,6 +885,8 @@ static __always_inline u16 get_lbr_cycles(u64 info) return cycles; } +static_assert((64 - PERF_BRANCH_ENTRY_INFO_BITS_MAX) > LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS); + static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, struct lbr_entry *entries) { @@ -898,11 +919,67 @@ static void intel_pmu_store_lbr(struct cpu_hw_events *cpuc, e->abort = !!(info & LBR_INFO_ABORT); e->cycles = get_lbr_cycles(info); e->type = get_lbr_br_type(info); + + /* + * Leverage the reserved field of cpuc->lbr_entries[i] to + * temporarily store the branch counters information. + * The later code will decide what content can be disclosed + * to the perf tool. Pleae see intel_pmu_lbr_counters_reorder(). + */ + e->reserved = (info >> LBR_INFO_BR_CNTR_OFFSET) & LBR_INFO_BR_CNTR_FULL_MASK; } cpuc->lbr_stack.nr = i; } +/* + * The enabled order may be different from the counter order. + * Update the lbr_counters with the enabled order. + */ +static void intel_pmu_lbr_counters_reorder(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i, j, pos = 0, order[X86_PMC_IDX_MAX]; + struct perf_event *leader, *sibling; + u64 src, dst, cnt; + + leader = event->group_leader; + if (branch_sample_counters(leader)) + order[pos++] = leader->hw.idx; + + for_each_sibling_event(sibling, leader) { + if (!branch_sample_counters(sibling)) + continue; + order[pos++] = sibling->hw.idx; + } + + WARN_ON_ONCE(!pos); + + for (i = 0; i < cpuc->lbr_stack.nr; i++) { + src = cpuc->lbr_entries[i].reserved; + dst = 0; + for (j = 0; j < pos; j++) { + cnt = (src >> (order[j] * LBR_INFO_BR_CNTR_BITS)) & LBR_INFO_BR_CNTR_MASK; + dst |= cnt << j * LBR_INFO_BR_CNTR_BITS; + } + cpuc->lbr_counters[i] = dst; + cpuc->lbr_entries[i].reserved = 0; + } +} + +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + if (is_branch_counters_group(event)) { + intel_pmu_lbr_counters_reorder(cpuc, event); + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, cpuc->lbr_counters); + return; + } + + perf_sample_save_brstack(data, event, &cpuc->lbr_stack, NULL); +} + static void intel_pmu_arch_lbr_read(struct cpu_hw_events *cpuc) { intel_pmu_store_lbr(cpuc, NULL); @@ -1173,8 +1250,10 @@ intel_pmu_lbr_filter(struct cpu_hw_events *cpuc) for (i = 0; i < cpuc->lbr_stack.nr; ) { if (!cpuc->lbr_entries[i].from) { j = i; - while (++j < cpuc->lbr_stack.nr) + while (++j < cpuc->lbr_stack.nr) { cpuc->lbr_entries[j-1] = cpuc->lbr_entries[j]; + cpuc->lbr_counters[j-1] = cpuc->lbr_counters[j]; + } cpuc->lbr_stack.nr--; if (!cpuc->lbr_entries[i].from) continue; @@ -1525,8 +1604,12 @@ void __init intel_pmu_arch_lbr_init(void) x86_pmu.lbr_mispred = ecx.split.lbr_mispred; x86_pmu.lbr_timed_lbr = ecx.split.lbr_timed_lbr; x86_pmu.lbr_br_type = ecx.split.lbr_br_type; + x86_pmu.lbr_counters = ecx.split.lbr_counters; x86_pmu.lbr_nr = lbr_nr; + if (!!x86_pmu.lbr_counters) + x86_pmu.flags |= PMU_FL_BR_CNTR; + if (x86_pmu.lbr_mispred) static_branch_enable(&x86_lbr_mispred); if (x86_pmu.lbr_timed_lbr) diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 69043e02e8a7d828ec2f096ba940648c5c3f39ea..4e26a28536dea7c66af55a2a44415fd14f42771f 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1814,6 +1814,14 @@ static const struct intel_uncore_init_fun spr_uncore_init __initconst = { .uncore_units_ignore = spr_uncore_units_ignore, }; +static const struct intel_uncore_init_fun gnr_uncore_init __initconst = { + .cpu_init = gnr_uncore_cpu_init, + .pci_init = gnr_uncore_pci_init, + .mmio_init = gnr_uncore_mmio_init, + .use_discovery = true, + .uncore_units_ignore = gnr_uncore_units_ignore, +}; + static const struct intel_uncore_init_fun generic_uncore_init __initconst = { .cpu_init = intel_uncore_generic_uncore_cpu_init, .pci_init = intel_uncore_generic_uncore_pci_init, @@ -1865,8 +1873,12 @@ static const struct x86_cpu_id intel_uncore_match[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(METEORLAKE_L, &mtl_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &spr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &spr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(GRANITERAPIDS_D, &gnr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &snr_uncore_init), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GRACEMONT, &adl_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &gnr_uncore_init), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT, &gnr_uncore_init), {}, }; MODULE_DEVICE_TABLE(x86cpu, intel_uncore_match); diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index c30fb5bb1222b39ed54e4eabc28e1fd826b03455..4838502d89aed34dbedf07211322eb3abb3ff59c 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -72,9 +72,9 @@ struct intel_uncore_type { unsigned single_fixed:1; unsigned pair_ctr_ctl:1; union { - unsigned *msr_offsets; - unsigned *pci_offsets; - unsigned *mmio_offsets; + u64 *msr_offsets; + u64 *pci_offsets; + u64 *mmio_offsets; }; unsigned *box_ids; struct event_constraint unconstrainted; @@ -593,6 +593,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; extern int spr_uncore_units_ignore[]; +extern int gnr_uncore_units_ignore[]; /* uncore_snb.c */ int snb_uncore_pci_init(void); @@ -634,6 +635,9 @@ void icx_uncore_mmio_init(void); int spr_uncore_pci_init(void); void spr_uncore_cpu_init(void); void spr_uncore_mmio_init(void); +int gnr_uncore_pci_init(void); +void gnr_uncore_cpu_init(void); +void gnr_uncore_mmio_init(void); /* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); diff --git a/arch/x86/events/intel/uncore_discovery.c b/arch/x86/events/intel/uncore_discovery.c index cb488e41807c76cbf7c200c88be9ceff97ff04d2..9a698a92962a10a3c10e70c1ca393603b2f551db 100644 --- a/arch/x86/events/intel/uncore_discovery.c +++ b/arch/x86/events/intel/uncore_discovery.c @@ -125,7 +125,8 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, int die, bool parsed) { struct intel_uncore_discovery_type *type; - unsigned int *box_offset, *ids; + unsigned int *ids; + u64 *box_offset; int i; if (!unit->ctl || !unit->ctl_offset || !unit->ctr_offset) { @@ -153,7 +154,7 @@ uncore_insert_box_info(struct uncore_unit_discovery *unit, if (!type) return; - box_offset = kcalloc(type->num_boxes + 1, sizeof(unsigned int), GFP_KERNEL); + box_offset = kcalloc(type->num_boxes + 1, sizeof(u64), GFP_KERNEL); if (!box_offset) return; diff --git a/arch/x86/events/intel/uncore_discovery.h b/arch/x86/events/intel/uncore_discovery.h index 6ee80ad3423e4e71d68d6da9fd7214cf3a0f33e0..22e769a81103935d52e9e92c086d022a96864007 100644 --- a/arch/x86/events/intel/uncore_discovery.h +++ b/arch/x86/events/intel/uncore_discovery.h @@ -125,7 +125,7 @@ struct intel_uncore_discovery_type { u8 ctr_offset; /* Counter 0 offset */ u16 num_boxes; /* number of boxes for the uncore block */ unsigned int *ids; /* Box IDs */ - unsigned int *box_offset; /* Box offset */ + u64 *box_offset; /* Box offset */ }; bool intel_uncore_has_discovery_tables(int *ignore); diff --git a/arch/x86/events/intel/uncore_nhmex.c b/arch/x86/events/intel/uncore_nhmex.c index 173e2674be6ef24293c5113b43d738af3d3f1981..56eea2c66cfb8cc7d9cc395e58c24a7319afc944 100644 --- a/arch/x86/events/intel/uncore_nhmex.c +++ b/arch/x86/events/intel/uncore_nhmex.c @@ -306,7 +306,7 @@ static const struct attribute_group nhmex_uncore_cbox_format_group = { }; /* msr offset for each instance of cbox */ -static unsigned nhmex_cbox_msr_offsets[] = { +static u64 nhmex_cbox_msr_offsets[] = { 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, }; diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 8250f0f59c2bbe4865703e5b6c3d009bcc03fdab..aeaa8efe3c62476c9d4b297fc5eac8266012d0fe 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -5278,7 +5278,7 @@ void snr_uncore_mmio_init(void) /* ICX uncore support */ -static unsigned icx_cha_msr_offsets[] = { +static u64 icx_cha_msr_offsets[] = { 0x2a0, 0x2ae, 0x2bc, 0x2ca, 0x2d8, 0x2e6, 0x2f4, 0x302, 0x310, 0x31e, 0x32c, 0x33a, 0x348, 0x356, 0x364, 0x372, 0x380, 0x38e, 0x3aa, 0x3b8, 0x3c6, 0x3d4, 0x3e2, 0x3f0, 0x3fe, 0x40c, 0x41a, @@ -5326,7 +5326,7 @@ static struct intel_uncore_type icx_uncore_chabox = { .format_group = &snr_uncore_chabox_format_group, }; -static unsigned icx_msr_offsets[] = { +static u64 icx_msr_offsets[] = { 0x0, 0x20, 0x40, 0x90, 0xb0, 0xd0, }; @@ -6079,13 +6079,16 @@ static struct uncore_event_desc spr_uncore_imc_events[] = { { /* end: all zeroes */ }, }; +#define SPR_UNCORE_MMIO_COMMON_FORMAT() \ + SPR_UNCORE_COMMON_FORMAT(), \ + .ops = &spr_uncore_mmio_ops + static struct intel_uncore_type spr_uncore_imc = { - SPR_UNCORE_COMMON_FORMAT(), + SPR_UNCORE_MMIO_COMMON_FORMAT(), .name = "imc", .fixed_ctr_bits = 48, .fixed_ctr = SNR_IMC_MMIO_PMON_FIXED_CTR, .fixed_ctl = SNR_IMC_MMIO_PMON_FIXED_CTL, - .ops = &spr_uncore_mmio_ops, .event_descs = spr_uncore_imc_events, }; @@ -6181,7 +6184,7 @@ static struct intel_uncore_type *spr_uncores[UNCORE_SPR_NUM_UNCORE_TYPES] = { */ #define SPR_UNCORE_UPI_NUM_BOXES 4 -static unsigned int spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { +static u64 spr_upi_pci_offsets[SPR_UNCORE_UPI_NUM_BOXES] = { 0, 0x8000, 0x10000, 0x18000 }; @@ -6412,7 +6415,8 @@ static void uncore_type_customized_copy(struct intel_uncore_type *to_type, static struct intel_uncore_type ** uncore_get_uncores(enum uncore_access_type type_id, int num_extra, - struct intel_uncore_type **extra) + struct intel_uncore_type **extra, int max_num_types, + struct intel_uncore_type **uncores) { struct intel_uncore_type **types, **start_types; int i; @@ -6421,9 +6425,9 @@ uncore_get_uncores(enum uncore_access_type type_id, int num_extra, /* Only copy the customized features */ for (; *types; types++) { - if ((*types)->type_id >= UNCORE_SPR_NUM_UNCORE_TYPES) + if ((*types)->type_id >= max_num_types) continue; - uncore_type_customized_copy(*types, spr_uncores[(*types)->type_id]); + uncore_type_customized_copy(*types, uncores[(*types)->type_id]); } for (i = 0; i < num_extra; i++, types++) @@ -6470,7 +6474,9 @@ void spr_uncore_cpu_init(void) uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, UNCORE_SPR_MSR_EXTRA_UNCORES, - spr_msr_uncores); + spr_msr_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); type = uncore_find_type_by_id(uncore_msr_uncores, UNCORE_SPR_CHA); if (type) { @@ -6552,7 +6558,9 @@ int spr_uncore_pci_init(void) spr_update_device_location(UNCORE_SPR_M3UPI); uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, UNCORE_SPR_PCI_EXTRA_UNCORES, - spr_pci_uncores); + spr_pci_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); return 0; } @@ -6560,15 +6568,116 @@ void spr_uncore_mmio_init(void) { int ret = snbep_pci2phy_map_init(0x3250, SKX_CPUNODEID, SKX_GIDNIDMAP, true); - if (ret) - uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL); - else { + if (ret) { + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); + } else { uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, UNCORE_SPR_MMIO_EXTRA_UNCORES, - spr_mmio_uncores); + spr_mmio_uncores, + UNCORE_SPR_NUM_UNCORE_TYPES, + spr_uncores); spr_uncore_imc_free_running.num_boxes = uncore_type_max_boxes(uncore_mmio_uncores, UNCORE_SPR_IMC) / 2; } } /* end of SPR uncore support */ + +/* GNR uncore support */ + +#define UNCORE_GNR_NUM_UNCORE_TYPES 23 +#define UNCORE_GNR_TYPE_15 15 +#define UNCORE_GNR_B2UPI 18 +#define UNCORE_GNR_TYPE_21 21 +#define UNCORE_GNR_TYPE_22 22 + +int gnr_uncore_units_ignore[] = { + UNCORE_SPR_UPI, + UNCORE_GNR_TYPE_15, + UNCORE_GNR_B2UPI, + UNCORE_GNR_TYPE_21, + UNCORE_GNR_TYPE_22, + UNCORE_IGNORE_END +}; + +static struct intel_uncore_type gnr_uncore_ubox = { + .name = "ubox", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type gnr_uncore_b2cmi = { + SPR_UNCORE_PCI_COMMON_FORMAT(), + .name = "b2cmi", +}; + +static struct intel_uncore_type gnr_uncore_b2cxl = { + SPR_UNCORE_MMIO_COMMON_FORMAT(), + .name = "b2cxl", +}; + +static struct intel_uncore_type gnr_uncore_mdf_sbo = { + .name = "mdf_sbo", + .attr_update = uncore_alias_groups, +}; + +static struct intel_uncore_type *gnr_uncores[UNCORE_GNR_NUM_UNCORE_TYPES] = { + &spr_uncore_chabox, + &spr_uncore_iio, + &spr_uncore_irp, + NULL, + &spr_uncore_pcu, + &gnr_uncore_ubox, + &spr_uncore_imc, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + NULL, + &gnr_uncore_b2cmi, + &gnr_uncore_b2cxl, + NULL, + NULL, + &gnr_uncore_mdf_sbo, + NULL, + NULL, +}; + +static struct freerunning_counters gnr_iio_freerunning[] = { + [SPR_IIO_MSR_IOCLK] = { 0x290e, 0x01, 0x10, 1, 48 }, + [SPR_IIO_MSR_BW_IN] = { 0x360e, 0x10, 0x80, 8, 48 }, + [SPR_IIO_MSR_BW_OUT] = { 0x2e0e, 0x10, 0x80, 8, 48 }, +}; + +void gnr_uncore_cpu_init(void) +{ + uncore_msr_uncores = uncore_get_uncores(UNCORE_ACCESS_MSR, + UNCORE_SPR_MSR_EXTRA_UNCORES, + spr_msr_uncores, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + spr_uncore_iio_free_running.num_boxes = uncore_type_max_boxes(uncore_msr_uncores, UNCORE_SPR_IIO); + spr_uncore_iio_free_running.freerunning = gnr_iio_freerunning; +} + +int gnr_uncore_pci_init(void) +{ + uncore_pci_uncores = uncore_get_uncores(UNCORE_ACCESS_PCI, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); + return 0; +} + +void gnr_uncore_mmio_init(void) +{ + uncore_mmio_uncores = uncore_get_uncores(UNCORE_ACCESS_MMIO, 0, NULL, + UNCORE_GNR_NUM_UNCORE_TYPES, + gnr_uncores); +} + +/* end of GNR uncore support */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index c8ba2be7585d4490af31bad6f31891f224900f2f..b8a2d3ba4ccd52f00652bd470000d56e8e1d3253 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -110,6 +110,11 @@ static inline bool is_topdown_event(struct perf_event *event) return is_metric_event(event) || is_slots_event(event); } +static inline bool is_branch_counters_group(struct perf_event *event) +{ + return event->group_leader->hw.flags & PERF_X86_EVENT_BRANCH_COUNTERS; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ @@ -283,6 +288,7 @@ struct cpu_hw_events { int lbr_pebs_users; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; + u64 lbr_counters[MAX_LBR_ENTRIES]; /* branch stack extra */ union { struct er_account *lbr_sel; struct er_account *lbr_ctl; @@ -881,6 +887,7 @@ struct x86_pmu { unsigned int lbr_mispred:1; unsigned int lbr_timed_lbr:1; unsigned int lbr_br_type:1; + unsigned int lbr_counters:4; void (*lbr_reset)(void); void (*lbr_read)(struct cpu_hw_events *cpuc); @@ -1005,6 +1012,7 @@ do { \ #define PMU_FL_INSTR_LATENCY 0x80 /* Support Instruction Latency in PEBS Memory Info Record */ #define PMU_FL_MEM_LOADS_AUX 0x100 /* Require an auxiliary event for the complete memory info */ #define PMU_FL_RETIRE_LATENCY 0x200 /* Support Retire Latency in PEBS */ +#define PMU_FL_BR_CNTR 0x400 /* Support branch counter logging */ #define EVENT_VAR(_id) event_attr_##_id #define EVENT_PTR(_id) &event_attr_##_id.attr.attr @@ -1545,6 +1553,10 @@ void intel_pmu_store_pebs_lbrs(struct lbr_entry *lbr); void intel_ds_init(void); +void intel_pmu_lbr_save_brstack(struct perf_sample_data *data, + struct cpu_hw_events *cpuc, + struct perf_event *event); + void intel_pmu_lbr_swap_task_ctx(struct perf_event_pmu_context *prev_epc, struct perf_event_pmu_context *next_epc); diff --git a/arch/x86/events/perf_event_flags.h b/arch/x86/events/perf_event_flags.h index 1dc19b9b4426abde8e1b005a7561e8f97b5a3995..6c977c19f2cd7b0a1c1947611a2b28f72376202e 100644 --- a/arch/x86/events/perf_event_flags.h +++ b/arch/x86/events/perf_event_flags.h @@ -20,3 +20,5 @@ PERF_ARCH(TOPDOWN, 0x04000) /* Count Topdown slots/metrics events */ PERF_ARCH(PEBS_STLAT, 0x08000) /* st+stlat data address sampling */ PERF_ARCH(AMD_BRS, 0x10000) /* AMD Branch Sampling */ PERF_ARCH(PEBS_LAT_HYBRID, 0x20000) /* ld and st lat for hybrid */ +PERF_ARCH(NEEDS_BRANCH_STACK, 0x40000) /* require branch stack setup */ +PERF_ARCH(BRANCH_COUNTERS, 0x80000) /* logs the counters in the extra space of each branch */ diff --git a/arch/x86/events/zhaoxin/Makefile b/arch/x86/events/zhaoxin/Makefile index 642c1174d662650192e31c213ffb48e29a19951c..767d6212bac1636b648ba0c0cc32006d32097f99 100644 --- a/arch/x86/events/zhaoxin/Makefile +++ b/arch/x86/events/zhaoxin/Makefile @@ -1,2 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 obj-y += core.o +obj-y += uncore.o diff --git a/arch/x86/events/zhaoxin/core.c b/arch/x86/events/zhaoxin/core.c index 3e9acdaeed1ec8ed3b68d0d35ccc7300fb001dd2..2957b416a6db5182703b748ba54e472d5427c301 100644 --- a/arch/x86/events/zhaoxin/core.c +++ b/arch/x86/events/zhaoxin/core.c @@ -19,15 +19,15 @@ #include "../perf_event.h" /* - * Zhaoxin PerfMon, used on zxc and later. + * Zhaoxin PerfMon, used on Lujiazui and later. */ static u64 zx_pmon_event_map[PERF_COUNT_HW_MAX] __read_mostly = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0082, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515, - [PERF_COUNT_HW_CACHE_MISSES] = 0x051a, [PERF_COUNT_HW_BUS_CYCLES] = 0x0083, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028, + [PERF_COUNT_HW_BRANCH_MISSES] = 0x0029, }; static struct event_constraint zxc_event_constraints[] __read_mostly = { @@ -36,7 +36,7 @@ static struct event_constraint zxc_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static struct event_constraint zxd_event_constraints[] __read_mostly = { +static struct event_constraint wudaokou_event_constraints[] __read_mostly = { FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* retired instructions */ FIXED_EVENT_CONSTRAINT(0x0082, 1), /* unhalted core clock cycles */ @@ -44,7 +44,7 @@ static struct event_constraint zxd_event_constraints[] __read_mostly = { EVENT_CONSTRAINT_END }; -static __initconst const u64 zxd_hw_cache_event_ids +static __initconst const u64 wudaokou_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -148,7 +148,7 @@ static __initconst const u64 zxd_hw_cache_event_ids }, }; -static __initconst const u64 zxe_hw_cache_event_ids +static __initconst const u64 lujiazui_hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] [PERF_COUNT_HW_CACHE_OP_MAX] [PERF_COUNT_HW_CACHE_RESULT_MAX] = { @@ -471,7 +471,7 @@ static const struct x86_pmu zhaoxin_pmu __initconst = { .max_events = ARRAY_SIZE(zx_pmon_event_map), .apic = 1, /* - * For zxd/zxe, read/write operation for PMCx MSR is 48 bits. + * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits. */ .max_period = (1ULL << 47) - 1, .get_event_constraints = zhaoxin_get_event_constraints, @@ -559,6 +559,8 @@ __init int zhaoxin_pmu_init(void) zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0; zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; zx_pmon_event_map[PERF_COUNT_HW_BUS_CYCLES] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0; + zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0; pr_cont("ZXC events, "); break; @@ -574,26 +576,47 @@ __init int zhaoxin_pmu_init(void) switch (boot_cpu_data.x86_model) { case 0x1b: - memcpy(hw_cache_event_ids, zxd_hw_cache_event_ids, + memcpy(hw_cache_event_ids, wudaokou_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0700; zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0709; - pr_cont("ZXD events, "); + pr_cont("Wudaokou events, "); break; case 0x3b: - memcpy(hw_cache_event_ids, zxe_hw_cache_event_ids, + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, sizeof(hw_cache_event_ids)); - x86_pmu.event_constraints = zxd_event_constraints; + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0515; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0x051a; + + pr_cont("Lujiazui events, "); + break; + case 0x5b: + case 0x6b: + zx_pmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event = 0x02, .umask = 0x01, .inv = 0x01, + .cmask = 0x01); + + memcpy(hw_cache_event_ids, lujiazui_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + + x86_pmu.event_constraints = wudaokou_event_constraints; + + zx_pmon_event_map[PERF_COUNT_HW_CACHE_REFERENCES] = 0x051a; + zx_pmon_event_map[PERF_COUNT_HW_CACHE_MISSES] = 0; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x0028; - zx_pmon_event_map[PERF_COUNT_HW_BRANCH_MISSES] = 0x0029; + if (boot_cpu_data.x86_model == 0x5b) + pr_cont("Yongfeng events, "); - pr_cont("ZXE events, "); break; default: return -ENODEV; diff --git a/arch/x86/events/zhaoxin/uncore.c b/arch/x86/events/zhaoxin/uncore.c new file mode 100644 index 0000000000000000000000000000000000000000..8d898a10d953404d8eed3280ba52e47bc1132a7c --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.c @@ -0,0 +1,2900 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include "uncore.h" + +static struct zhaoxin_uncore_type *empty_uncore[] = { NULL, }; +static struct zhaoxin_uncore_type **uncore_msr_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_pci_uncores = empty_uncore; +static struct zhaoxin_uncore_type **uncore_mmio_uncores = empty_uncore; + + +static bool pcidrv_registered; +static struct pci_driver *uncore_pci_driver; + +/* mask of cpus that collect uncore events */ +static cpumask_t uncore_cpu_mask; +static cpumask_t uncore_cpu_subnode_mask; +static cpumask_t uncore_cpu_cluster_mask; + +/* constraint for the fixed counter */ +static struct event_constraint uncore_constraint_fixed = + EVENT_CONSTRAINT(~0ULL, 1 << UNCORE_PMC_IDX_FIXED, ~0ULL); + +static int max_packages, max_subnodes, max_clusters; +static int clusters_per_subnode; +static int subnodes_per_die; +static int dies_per_socket; + +#define KH40000_MAX_SUBNODE_NUMBER 8 +static int kh40000_pcibus_limit[KH40000_MAX_SUBNODE_NUMBER]; + +/* get CPU topology register */ +#define BJ_GLOBAL_STATUS_MSR 0x1610 +#define BJ_HDW_CONFIG_MSR 0X1628 + +/* KX5000/KX6000 event control */ +#define KX5000_UNC_CTL_EV_SEL_MASK 0x000000ff +#define KX5000_UNC_CTL_UMASK_MASK 0x0000ff00 +#define KX5000_UNC_CTL_EDGE_DET (1 << 18) +#define KX5000_UNC_CTL_EN (1 << 22) +#define KX5000_UNC_CTL_INVERT (1 << 23) +#define KX5000_UNC_CTL_CMASK_MASK 0x7000000 +#define KX5000_UNC_FIXED_CTR_CTL_EN (1 << 0) + +#define KX5000_UNC_RAW_EVENT_MASK (KX5000_UNC_CTL_EV_SEL_MASK | \ + KX5000_UNC_CTL_UMASK_MASK | \ + KX5000_UNC_CTL_EDGE_DET | \ + KX5000_UNC_CTL_INVERT | \ + KX5000_UNC_CTL_CMASK_MASK) + +/* KX5000/KX6000 uncore global register */ +#define KX5000_UNC_PERF_GLOBAL_CTL 0x391 +#define KX5000_UNC_FIXED_CTR 0x394 +#define KX5000_UNC_FIXED_CTR_CTRL 0x395 + +/* KX5000/KX6000 uncore global control */ +#define KX5000_UNC_GLOBAL_CTL_EN_PC_ALL ((1ULL << 4) - 1) +#define KX5000_UNC_GLOBAL_CTL_EN_FC (1ULL << 32) + +/* KX5000/KX6000 uncore register */ +#define KX5000_UNC_PERFEVTSEL0 0x3c0 +#define KX5000_UNC_UNCORE_PMC0 0x3b0 + +/* KH40000 event control */ +#define KH40000_PMON_CTL_EV_SEL_MASK 0x000000ff +#define KH40000_PMON_CTL_UMASK_MASK 0x0000ff00 +#define KH40000_PMON_CTL_RST (1 << 17) +#define KH40000_PMON_CTL_EDGE_DET (1 << 18) +#define KH40000_PMON_CTL_EV_SEL_EXT (1 << 21) +#define KH40000_PMON_CTL_EN (1 << 22) +#define KH40000_PMON_CTL_INVERT (1 << 23) +#define KH40000_PMON_CTL_TRESH_MASK 0xff000000 +#define KH40000_PMON_RAW_EVENT_MASK (KH40000_PMON_CTL_EV_SEL_MASK | \ + KH40000_PMON_CTL_UMASK_MASK | \ + KH40000_PMON_CTL_EDGE_DET | \ + KH40000_PMON_CTL_INVERT | \ + KH40000_PMON_CTL_TRESH_MASK) + +/* KH40000 LLC register*/ +#define KH40000_LLC_MSR_PMON_CTL0 0x1660 +#define KH40000_LLC_MSR_PMON_CTR0 0x165c +#define KH40000_LLC_MSR_PMON_BLK_CTL 0x1665 + +/* KH40000 HIF register*/ +#define KH40000_HIF_MSR_PMON_CTL0 0x1656 +#define KH40000_HIF_MSR_PMON_CTR0 0x1651 +#define KH40000_HIF_MSR_PMON_FIXED_CTL 0x1655 +#define KH40000_HIF_MSR_PMON_FIXED_CTR 0x1650 +#define KH40000_HIF_MSR_PMON_BLK_CTL 0x165b + +/* KH40000 ZZI(ZPI+ZOI+INI) register*/ +#define KH40000_ZZI_MSR_PMON_CTL0 0x166A +#define KH40000_ZZI_MSR_PMON_CTR0 0x1666 +#define KH40000_ZZI_MSR_PMON_BLK_CTL 0x166f + +/* KH40000 MC register*/ +#define KH40000_MC0_CHy_PMON_FIXED_CTL 0xf40 +#define KH40000_MC0_CHy_PMON_FIXED_CTR 0xf20 +#define KH40000_MC0_CHy_PMON_CTR0 0xf00 +#define KH40000_MC0_CHy_PMON_CTL0 0xf28 +#define KH40000_MC0_CHy_PMON_BLK_CTL 0xf44 + +#define KH40000_MC1_CHy_PMON_FIXED_CTL 0xf90 +#define KH40000_MC1_CHy_PMON_FIXED_CTR 0xf70 +#define KH40000_MC1_CHy_PMON_CTR0 0xf50 +#define KH40000_MC1_CHy_PMON_CTL0 0xf78 +#define KH40000_MC1_CHy_PMON_BLK_CTL 0xf94 + +/* KH40000 PCI register*/ +#define KH40000_PCI_PMON_CTR0 0xf00 +#define KH40000_PCI_PMON_CTL0 0xf28 +#define KH40000_PCI_PMON_BLK_CTL 0xf44 + +/* KH40000 ZPI_DLL register*/ +#define KH40000_ZPI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZPI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZPI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZPI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZPI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 ZDI_DLL register*/ +#define KH40000_ZDI_DLL_PMON_FIXED_CTL 0xf40 +#define KH40000_ZDI_DLL_PMON_FIXED_CTR 0xf20 +#define KH40000_ZDI_DLL_PMON_CTR0 0xf00 +#define KH40000_ZDI_DLL_PMON_CTL0 0xf28 +#define KH40000_ZDI_DLL_PMON_BLK_CTL 0xf44 + +/* KH40000 PXPTRF register*/ +#define KH40000_PXPTRF_PMON_CTR0 0xf00 +#define KH40000_PXPTRF_PMON_CTL0 0xf28 +#define KH40000_PXPTRF_PMON_BLK_CTL 0xf44 + +/* KH40000 Box level control */ +#define KH40000_PMON_BOX_CTL_RST_CTRL (1 << 0) +#define KH40000_PMON_BOX_CTL_RST_CTRS (1 << 1) +#define KH40000_PMON_BOX_CTL_FRZ (1 << 8) +#define KH40000_PMON_PCI_BOX_PMON_EN (1 << 31) + +#define KH40000_PMON_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS) + +#define KH40000_PMON_PCI_BOX_CTL_INT (KH40000_PMON_BOX_CTL_RST_CTRL | \ + KH40000_PMON_BOX_CTL_RST_CTRS | \ + KH40000_PMON_PCI_BOX_PMON_EN) + +/* KX8000 LLC register*/ +#define KX8000_LLC_MSR_PMON_CTL0 0x1979 +#define KX8000_LLC_MSR_PMON_CTR0 0x1975 +#define KX8000_LLC_MSR_PMON_BLK_CTL 0x197e + +/* KX8000 MESH register*/ +#define KX8000_MESH_MSR_PMON_CTL0 0x1983 +#define KX8000_MESH_MSR_PMON_CTR0 0x197f +#define KX8000_MESH_MSR_PMON_BLK_CTL 0x1987 + +/* KX8000 HOMESTOP register*/ +#define KX8000_HOMESTOP_MSR_PMON_CTL0 0x196a +#define KX8000_HOMESTOP_MSR_PMON_CTR0 0x1966 +#define KX8000_HOMESTOP_MSR_PMON_BLK_CTL 0x196e +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTR 0x1970 +#define KX8000_HOMESTOP_MSR_PMON_FIXED_CTL 0x1971 + +/* KX8000 CCDie ZDI_PL register*/ +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTL0 0x1960 +#define KX8000_CCD_ZDI_PL_MSR_PMON_CTR0 0x195c +#define KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL 0x1964 + +/* KX8000 cIODie ZDI_PL register*/ +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTL0 0x1894 +#define KX8000_IOD_ZDI_PL_MSR_PMON_CTR0 0x1890 +#define KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL 0x1898 +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR 0x189A +#define KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL 0x189B + +/* KX8000 MC register*/ +#define KX8000_MC_A0_CHy_PMON_FIXED_CTL 0xe30 +#define KX8000_MC_A0_CHy_PMON_FIXED_CTR 0xe08 +#define KX8000_MC_A0_CHy_PMON_CTR0 0xe00 +#define KX8000_MC_A0_CHy_PMON_CTL0 0xe20 +#define KX8000_MC_A0_CHy_PMON_BLK_CTL 0xe34 + +#define KX8000_MC_A1_CHy_PMON_FIXED_CTL 0xe70 +#define KX8000_MC_A1_CHy_PMON_FIXED_CTR 0xe48 +#define KX8000_MC_A1_CHy_PMON_CTR0 0xe40 +#define KX8000_MC_A1_CHy_PMON_CTL0 0xe60 +#define KX8000_MC_A1_CHy_PMON_BLK_CTL 0xe74 + +#define KX8000_MC_B0_CHy_PMON_FIXED_CTL 0xeb0 +#define KX8000_MC_B0_CHy_PMON_FIXED_CTR 0xe88 +#define KX8000_MC_B0_CHy_PMON_CTR0 0xe80 +#define KX8000_MC_B0_CHy_PMON_CTL0 0xea0 +#define KX8000_MC_B0_CHy_PMON_BLK_CTL 0xeb4 + +#define KX8000_MC_B1_CHy_PMON_FIXED_CTL 0xef0 +#define KX8000_MC_B1_CHy_PMON_FIXED_CTR 0xec8 +#define KX8000_MC_B1_CHy_PMON_CTR0 0xec0 +#define KX8000_MC_B1_CHy_PMON_CTL0 0xee0 +#define KX8000_MC_B1_CHy_PMON_BLK_CTL 0xef4 + +#define KX8000_ZDI_DL_MMIO_PMON_CTR0 0xf00 +#define KX8000_ZDI_DL_MMIO_PMON_CTL0 0xf28 +#define KX8000_ZDI_DL_MMIO_PMON_BLK_CTL 0xf44 +#define KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET 0x168 +#define KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET 0x170 +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_BASE_MASK 0x3fff +#define KX8000_ZDI_DL_MMIO_MEM0_MASK 0xfffff000 +#define KX8000_ZDI_DL_MMIO_SIZE 0x1000 + + + + +DEFINE_UNCORE_FORMAT_ATTR(event, event, "config:0-7"); +DEFINE_UNCORE_FORMAT_ATTR(umask, umask, "config:8-15"); +DEFINE_UNCORE_FORMAT_ATTR(edge, edge, "config:18"); +DEFINE_UNCORE_FORMAT_ATTR(inv, inv, "config:23"); +DEFINE_UNCORE_FORMAT_ATTR(cmask3, cmask, "config:24-26"); +DEFINE_UNCORE_FORMAT_ATTR(thresh8, thresh, "config:24-31"); + +static void get_hdw_config_msr(void *config) +{ + u64 *data = (u64 *)config; + + rdmsrl(BJ_HDW_CONFIG_MSR, *data); +} + +static void get_global_status_msr(void *status) +{ + u64 *data = (u64 *)status; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, *data); +} + +/*topology number : get max packages/subnode/clusters number*/ +static void get_topology_number(void) +{ + int clusters; + int subnodes; + int dies; + int packages; + u64 data; + + rdmsrl(BJ_GLOBAL_STATUS_MSR, data); + + /* check packages number */ + packages = data & 0x1; + if (packages) + max_packages = 2; + else + max_packages = 1; + + /* only Yongfeng needs die/subnode/cluster info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + return; + + /* check dies_per_socket */ + dies = (data >> 12) & 0x1; + if (dies) + dies_per_socket = 2; + else + dies_per_socket = 1; + + /* check subnodes_per_die */ + subnodes = (data >> 32) & 0x3; + if (subnodes == 0x3) + subnodes_per_die = 2; + else + subnodes_per_die = 1; + + /* check clusters_per_subnode */ + clusters = (data >> 6) & 0x3; + if (clusters == 0x3) + clusters_per_subnode = 2; + else + clusters_per_subnode = 1; + + max_subnodes = max_packages * dies_per_socket * subnodes_per_die; + max_clusters = clusters_per_subnode * max_subnodes; +} + +static int get_pcibus_limit(void) +{ + struct pci_dev *dev; + u32 val; + int i = 0; + + dev = pci_get_device(0x1D17, 0x31B1, NULL); + if (dev == NULL) + return -ENODEV; + + pci_read_config_dword(dev, 0x94, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + + if (max_packages == 2) { + pci_read_config_dword(dev, 0x9c, &val); + kh40000_pcibus_limit[i++] = (val & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 8 & 0x1f) << 3 | 0x7; + if (dies_per_socket == 2) { + kh40000_pcibus_limit[i++] = (val >> 16 & 0x1f) << 3 | 0x7; + kh40000_pcibus_limit[i++] = (val >> 24 & 0x1f) << 3 | 0x7; + } + } + + return 0; +} + +static int uncore_pcibus_to_subnodeid(struct pci_bus *bus) +{ + int i; + + for (i = 0; i < KH40000_MAX_SUBNODE_NUMBER; i++) { + if (bus->number < kh40000_pcibus_limit[i]) + break; + } + + return i; +} + +DEFINE_PER_CPU(int, zx_package_id); +DEFINE_PER_CPU(int, zx_subnode_id); +DEFINE_PER_CPU(int, zx_cluster_id); + +static void get_topology_info(void) +{ + int cpu; + int cluster_id; + int socket_id; + int die_id; + int subnode_id; + + int die_info; + int subnode_info; + int cluster_info; + + u64 config; + + for_each_present_cpu(cpu) { + smp_call_function_single(cpu, get_global_status_msr, &config, 1); + socket_id = (int)((config >> 3) & 0x1); + per_cpu(zx_package_id, cpu) = socket_id; + + /* only kh40000 needs cluster and subnode info */ + if (boot_cpu_data.x86_model != ZHAOXIN_FAM7_KH40000) + continue; + + smp_call_function_single(cpu, get_hdw_config_msr, &config, 1); + + die_info = (int)((config >> 21) & 0x3); + die_id = socket_id * dies_per_socket + die_info; + + subnode_info = (int)((config >> 20) & 0x1); + subnode_id = die_id * subnodes_per_die + subnode_info; + per_cpu(zx_subnode_id, cpu) = subnode_id; + + cluster_info = (int)((config >> 18) & 0x3); + cluster_id = subnode_id * clusters_per_subnode + cluster_info; + per_cpu(zx_cluster_id, cpu) = cluster_id; + } +} + +static int zx_topology_cluster_id(int cpu) +{ + return per_cpu(zx_cluster_id, cpu); +} + +static int zx_topology_subnode_id(int cpu) +{ + return per_cpu(zx_subnode_id, cpu); +} + +static int zx_topology_package_id(int cpu) +{ + return per_cpu(zx_package_id, cpu); +} + +DEFINE_PER_CPU(cpumask_t, zx_cluster_core_bits); +DEFINE_PER_CPU(cpumask_t, zx_subnode_core_bits); + +static void zx_gen_core_map(void) +{ + int i, nr, cpu; + int cluster_id, subnode_id; + + for_each_present_cpu(cpu) { + cluster_id = zx_topology_cluster_id(cpu); + + for (i = 0; i < 4; i++) { + nr = (cluster_id << 2) + i; + cpumask_set_cpu(nr, &per_cpu(zx_cluster_core_bits, cpu)); + } + } + + for_each_present_cpu(cpu) { + subnode_id = zx_topology_subnode_id(cpu); + + for (i = 0; i < 8; i++) { + nr = (subnode_id << 3) + i; + cpumask_set_cpu(nr, &per_cpu(zx_subnode_core_bits, cpu)); + } + } +} + +static struct cpumask *topology_cluster_core_cpumask(int cpu) +{ + return &per_cpu(zx_cluster_core_bits, cpu); +} + +static struct cpumask *topology_subnode_core_cpumask(int cpu) +{ + return &per_cpu(zx_subnode_core_bits, cpu); +} + +static void uncore_free_pcibus_map(void) +{ + +} + +static int kh40000_pci2node_map_init(void) +{ + return 0; +} + +ssize_t zx_uncore_event_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uncore_event_desc *event = + container_of(attr, struct uncore_event_desc, attr); + return sprintf(buf, "%s", event->config); +} + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu) +{ + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + return pmu->boxes[zx_topology_cluster_id(cpu)]; + else + return pmu->boxes[zx_topology_subnode_id(cpu)]; + } else { + return pmu->boxes[zx_topology_package_id(cpu)]; + } +} + +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 count; + + WARN_ON_ONCE(box->cpu != smp_processor_id()); + rdmsrl(event->hw.event_base, count); + return count; +} + +static void uncore_assign_hw_event(struct zhaoxin_uncore_box *box, + struct perf_event *event, int idx) +{ + struct hw_perf_event *hwc = &event->hw; + + hwc->idx = idx; + hwc->last_tag = ++box->tags[idx]; + + if (uncore_pmc_fixed(hwc->idx)) { + hwc->event_base = uncore_fixed_ctr(box); + hwc->config_base = uncore_fixed_ctl(box); + return; + } + + hwc->config_base = uncore_event_ctl(box, hwc->idx); + hwc->event_base = uncore_perf_ctr(box, hwc->idx); +} + +void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + u64 prev_count, new_count, delta; + int shift; + + if (uncore_pmc_fixed(event->hw.idx)) + shift = 64 - uncore_fixed_ctr_bits(box); + else + shift = 64 - uncore_perf_ctr_bits(box); + + /* the hrtimer might modify the previous event value */ +again: + prev_count = local64_read(&event->hw.prev_count); + new_count = uncore_read_counter(box, event); + if (local64_xchg(&event->hw.prev_count, new_count) != prev_count) + goto again; + + delta = (new_count << shift) - (prev_count << shift); + delta >>= shift; + + local64_add(delta, &event->count); +} + +/*KX5000/KX6000 uncore ops start*/ +static void kx5000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + wrmsrl(event->hw.config_base, 0); +} + +static void kx5000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, 0); +} + +static void kx5000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + wrmsrl(KX5000_UNC_PERF_GLOBAL_CTL, + KX5000_UNC_GLOBAL_CTL_EN_PC_ALL | KX5000_UNC_GLOBAL_CTL_EN_FC); +} + +static void kx5000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (hwc->idx < UNCORE_PMC_IDX_FIXED) + wrmsrl(hwc->config_base, hwc->config | KX5000_UNC_CTL_EN); + else + wrmsrl(hwc->config_base, KX5000_UNC_FIXED_CTR_CTL_EN); +} + +static struct attribute *kx5000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_cmask3.attr, + NULL, +}; + +static struct attribute_group kx5000_uncore_format_group = { + .name = "format", + .attrs = kx5000_uncore_formats_attr, +}; + +static struct uncore_event_desc kx5000_uncore_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kx5000_uncore_msr_ops = { + .disable_box = kx5000_uncore_msr_disable_box, + .enable_box = kx5000_uncore_msr_enable_box, + .disable_event = kx5000_uncore_msr_disable_event, + .enable_event = kx5000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kx5000_uncore_box = { + .name = "", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX5000_UNC_PERFEVTSEL0, + .perf_ctr = KX5000_UNC_UNCORE_PMC0, + .fixed_ctr = KX5000_UNC_FIXED_CTR, + .fixed_ctl = KX5000_UNC_FIXED_CTR_CTRL, + .event_mask = KX5000_UNC_RAW_EVENT_MASK, + .event_descs = kx5000_uncore_events, + .ops = &kx5000_uncore_msr_ops, + .format_group = &kx5000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx5000_msr_uncores[] = { + &kx5000_uncore_box, + NULL, +}; +/*KX5000/KX6000 uncore ops end*/ + +/*KH40000 msr ops start*/ +static void kh40000_uncore_msr_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config); +} + +static void kh40000_uncore_msr_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + wrmsrl(hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_msr_disable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config |= KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_enable_box(struct zhaoxin_uncore_box *box) +{ + u64 config; + unsigned int msr; + + msr = uncore_msr_box_ctl(box); + if (msr) { + rdmsrl(msr, config); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + wrmsrl(msr, config); + } +} + +static void kh40000_uncore_msr_init_box(struct zhaoxin_uncore_box *box) +{ + unsigned int msr = uncore_msr_box_ctl(box); + + if (msr) { + wrmsrl(msr, KH40000_PMON_BOX_CTL_INT); + wrmsrl(msr, 0); + } +} + +static struct attribute *kh40000_uncore_formats_attr[] = { + &format_attr_event.attr, + &format_attr_umask.attr, + &format_attr_edge.attr, + &format_attr_inv.attr, + &format_attr_thresh8.attr, + NULL, +}; + +static struct attribute_group kh40000_uncore_format_group = { + .name = "format", + .attrs = kh40000_uncore_formats_attr, +}; + +static struct uncore_event_desc kh40000_uncore_llc_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_hif_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zzi_box_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_msr_ops = { + .init_box = kh40000_uncore_msr_init_box, + .disable_box = kh40000_uncore_msr_disable_box, + .enable_box = kh40000_uncore_msr_enable_box, + .disable_event = kh40000_uncore_msr_disable_event, + .enable_event = kh40000_uncore_msr_enable_event, + .read_counter = uncore_msr_read_counter, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_LLC_MSR_PMON_CTL0, + .perf_ctr = KH40000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_LLC_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_llc_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_hif_box = { + .name = "hif", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KH40000_HIF_MSR_PMON_CTL0, + .perf_ctr = KH40000_HIF_MSR_PMON_CTR0, + .fixed_ctr = KH40000_HIF_MSR_PMON_FIXED_CTR, + .fixed_ctl = KH40000_HIF_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_HIF_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_hif_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zzi_box = { + .name = "zzi", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_ctl = KH40000_ZZI_MSR_PMON_CTL0, + .perf_ctr = KH40000_ZZI_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZZI_MSR_PMON_BLK_CTL, + .event_descs = kh40000_uncore_zzi_box_events, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kh40000_msr_uncores[] = { + &kh40000_uncore_llc_box, + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; +/*KH40000 msr ops end*/ + +/*KH40000 pci ops start*/ +static void kh40000_uncore_pci_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config); +} + +static void kh40000_uncore_pci_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + + pci_write_config_dword(pdev, hwc->config_base, hwc->config | KH40000_PMON_CTL_EN); +} + +static void kh40000_uncore_pci_disable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config |= KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static void kh40000_uncore_pci_enable_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + u32 config = 0; + + if (!pci_read_config_dword(pdev, box_ctl, &config)) { + config &= ~KH40000_PMON_BOX_CTL_FRZ; + pci_write_config_dword(pdev, box_ctl, config); + } +} + +static u64 kh40000_uncore_pci_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count + 1); + pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count); + + return count; +} + +static void kh40000_uncore_pci_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = box->pci_dev; + int box_ctl = uncore_pci_box_ctl(box); + + pci_write_config_dword(pdev, box_ctl, KH40000_PMON_PCI_BOX_CTL_INT); +} + +static struct uncore_event_desc kh40000_uncore_imc_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pci_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zpi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_zdi_dll_events[] = { + { /* end: all zeroes */ }, +}; + +static struct uncore_event_desc kh40000_uncore_pxptrf_events[] = { + { /* end: all zeroes */ }, +}; + +static struct zhaoxin_uncore_ops kh40000_uncore_pci_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kh40000_uncore_pci_read_counter +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc0 = { + .name = "mc0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC0_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC0_CHy_PMON_CTR0, + .event_ctl = KH40000_MC0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC0_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_mc1 = { + .name = "mc1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KH40000_MC1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KH40000_MC1_CHy_PMON_FIXED_CTL, + .event_descs = kh40000_uncore_imc_events, + .perf_ctr = KH40000_MC1_CHy_PMON_CTR0, + .event_ctl = KH40000_MC1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_MC1_CHy_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 10, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zpi_dll = { + .name = "zpi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zpi_dll_events, + .perf_ctr = KH40000_ZPI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZPI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZPI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_zdi_dll = { + .name = "zdi_dll", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_zdi_dll_events, + .perf_ctr = KH40000_ZDI_DLL_PMON_CTR0, + .event_ctl = KH40000_ZDI_DLL_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_ZDI_DLL_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kh40000_uncore_pxptrf = { + .name = "pxptrf", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pxptrf_events, + .perf_ctr = KH40000_PXPTRF_PMON_CTR0, + .event_ctl = KH40000_PXPTRF_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PXPTRF_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + +enum { + KH40000_PCI_UNCORE_MC0, + KH40000_PCI_UNCORE_MC1, + KH40000_PCI_UNCORE_PCI, + KH40000_PCI_UNCORE_ZPI_DLL, + KH40000_PCI_UNCORE_ZDI_DLL, + KH40000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kh40000_pci_uncores[] = { + [KH40000_PCI_UNCORE_MC0] = &kh40000_uncore_mc0, + [KH40000_PCI_UNCORE_MC1] = &kh40000_uncore_mc1, + [KH40000_PCI_UNCORE_PCI] = &kh40000_uncore_pci, + [KH40000_PCI_UNCORE_ZPI_DLL] = &kh40000_uncore_zpi_dll, + [KH40000_PCI_UNCORE_ZDI_DLL] = &kh40000_uncore_zdi_dll, + [KH40000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kh40000_uncore_pci_ids[] = { + { /* MC Channe0/1 */ + PCI_DEVICE(0x1D17, 0x31b2), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_MC0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x071A), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x0731), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PCI, 9), + }, + + { /* ZPI_DLL */ + PCI_DEVICE(0x1D17, 0x91c1), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZPI_DLL, 0), + }, + + { /* ZDI_DLL */ + PCI_DEVICE(0x1D17, 0x3b03), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_ZDI_DLL, 0), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KH40000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + +static struct pci_driver kh40000_uncore_pci_driver = { + .name = "kh40000_uncore", + .id_table = kh40000_uncore_pci_ids, +}; +/*KH40000 pci ops end*/ + + +/*KX8000 msr ops start*/ +static unsigned int kx8000_uncore_msr_offsets[] = { + 0x0, 0x13, 0x27, 0x3b, 0x4f, 0x63, 0x77, 0x8b +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mesh_box = { + .name = "mesh", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_MESH_MSR_PMON_CTL0, + .perf_ctr = KX8000_MESH_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MESH_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_llc_box = { + .name = "llc", + .num_counters = 4, + .num_boxes = 8, + .perf_ctr_bits = 48, + .event_ctl = KX8000_LLC_MSR_PMON_CTL0, + .perf_ctr = KX8000_LLC_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_LLC_MSR_PMON_BLK_CTL, + .msr_offsets = kx8000_uncore_msr_offsets, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_homestop = { + .name = "homestop", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_HOMESTOP_MSR_PMON_CTL0, + .perf_ctr = KX8000_HOMESTOP_MSR_PMON_CTR0, + .fixed_ctr = KX8000_HOMESTOP_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_HOMESTOP_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_HOMESTOP_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_pl = { + .name = "ccd_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_CCD_ZDI_PL_MSR_PMON_CTR0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_CCD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_pl = { + .name = "iod_zdi_pl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .event_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_CTL0, + .perf_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_CTR0, + .fixed_ctr = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTR, + .fixed_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_FIXED_CTL, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_IOD_ZDI_PL_MSR_PMON_BLK_CTL, + .ops = &kh40000_uncore_msr_ops, + .format_group = &kh40000_uncore_format_group, +}; + + +static struct zhaoxin_uncore_type *kx8000_msr_uncores[] = { + &kx8000_uncore_llc_box, + &kx8000_uncore_mesh_box, + &kh40000_uncore_hif_box, + &kx8000_uncore_homestop, + &kx8000_uncore_ccd_zdi_pl, + &kx8000_uncore_iod_zdi_pl, + NULL, +}; +/*KX8000 msr ops end*/ + +/*KX8000 pci ops start*/ +static unsigned int kx8000_mc_ctr_lh_offsets[] = { + 0xc, 0xe, 0x10, 0x12, 0x14 +}; + +static u64 kx8000_uncore_pci_mc_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct pci_dev *pdev = box->pci_dev; + struct hw_perf_event *hwc = &event->hw; + u64 count = 0; + + pci_read_config_word(pdev, hwc->event_base, (u16 *)&count + 3); + pci_read_config_dword(pdev, hwc->event_base + kx8000_mc_ctr_lh_offsets[hwc->idx], + (u32 *)&count); + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_pci_mc_ops = { + .init_box = kh40000_uncore_pci_init_box, + .disable_box = kh40000_uncore_pci_disable_box, + .enable_box = kh40000_uncore_pci_enable_box, + .disable_event = kh40000_uncore_pci_disable_event, + .enable_event = kh40000_uncore_pci_enable_event, + .read_counter = kx8000_uncore_pci_mc_read_counter +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a0 = { + .name = "mc_a0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_a1 = { + .name = "mc_a1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_A1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_A1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_A1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_A1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_A1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b0 = { + .name = "mc_b0", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B0_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B0_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B0_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B0_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B0_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_mc_b1 = { + .name = "mc_b1", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .fixed_ctr = KX8000_MC_B1_CHy_PMON_FIXED_CTR, + .fixed_ctl = KX8000_MC_B1_CHy_PMON_FIXED_CTL, + .perf_ctr = KX8000_MC_B1_CHy_PMON_CTR0, + .event_ctl = KX8000_MC_B1_CHy_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_MC_B1_CHy_PMON_BLK_CTL, + .ops = &kx8000_uncore_pci_mc_ops, + .format_group = &kh40000_uncore_format_group +}; + +static struct zhaoxin_uncore_type kx8000_uncore_pci = { + .name = "pci", + .num_counters = 4, + .num_boxes = 17, + .perf_ctr_bits = 48, + .event_descs = kh40000_uncore_pci_events, + .perf_ctr = KH40000_PCI_PMON_CTR0, + .event_ctl = KH40000_PCI_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KH40000_PCI_PMON_BLK_CTL, + .ops = &kh40000_uncore_pci_ops, + .format_group = &kh40000_uncore_format_group +}; + + +enum { + KX8000_PCI_UNCORE_MC_A0, + KX8000_PCI_UNCORE_MC_A1, + KX8000_PCI_UNCORE_MC_B0, + KX8000_PCI_UNCORE_MC_B1, + KX8000_PCI_UNCORE_PCI, + KX8000_PCI_UNCORE_PXPTRF, +}; + +static struct zhaoxin_uncore_type *kx8000_pci_uncores[] = { + [KX8000_PCI_UNCORE_MC_A0] = &kx8000_uncore_mc_a0, + [KX8000_PCI_UNCORE_MC_A1] = &kx8000_uncore_mc_a1, + [KX8000_PCI_UNCORE_MC_B0] = &kx8000_uncore_mc_b0, + [KX8000_PCI_UNCORE_MC_B1] = &kx8000_uncore_mc_b1, + [KX8000_PCI_UNCORE_PCI] = &kx8000_uncore_pci, + [KX8000_PCI_UNCORE_PXPTRF] = &kh40000_uncore_pxptrf, + NULL, +}; + +static const struct pci_device_id kx8000_uncore_pci_ids[] = { + { /* MC Channe A0/A1/B0/B1 */ + PCI_DEVICE(0x1D17, 0x31B2), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_MC_A0, 0), + }, + + { /* PCIE D2F0 */ + PCI_DEVICE(0x1D17, 0x0717), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 0), + }, + + { /* PCIE D2F1 */ + PCI_DEVICE(0x1D17, 0x0718), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 1), + }, + + { /* PCIE D2F2 */ + PCI_DEVICE(0x1D17, 0x0733), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 2), + }, + + { /* PCIE D2F3 */ + PCI_DEVICE(0x1D17, 0x0734), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 3), + }, + + { /* PCIE D3F0 */ + PCI_DEVICE(0x1D17, 0x0719), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 4), + }, + + { /* PCIE D3F1 */ + PCI_DEVICE(0x1D17, 0x0735), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 5), + }, + + { /* PCIE D3F2 */ + PCI_DEVICE(0x1D17, 0x0739), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 6), + }, + + { /* PCIE D3F3 */ + PCI_DEVICE(0x1D17, 0x073A), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 7), + }, + + { /* PCIE D4F0 */ + PCI_DEVICE(0x1D17, 0x071B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 8), + }, + + { /* PCIE D4F1 */ + PCI_DEVICE(0x1D17, 0x071C), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 9), + }, + + { /* PCIE D4F2 */ + PCI_DEVICE(0x1D17, 0x0736), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 10), + }, + + { /* PCIE D4F3 */ + PCI_DEVICE(0x1D17, 0x0737), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 11), + }, + + { /* PCIE D4F4 */ + PCI_DEVICE(0x1D17, 0x0738), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 12), + }, + + { /* PCIE D5F0 */ + PCI_DEVICE(0x1D17, 0x071D), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 13), + }, + + { /* PCIE D5F1 */ + PCI_DEVICE(0x1D17, 0x071E), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 14), + }, + + { /* PCIE D5F2 */ + PCI_DEVICE(0x1D17, 0x0732), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 15), + }, + + { /* PCIE D5F3 */ + PCI_DEVICE(0x1D17, 0x073B), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PCI, 16), + }, + + { /* PXPTRF */ + PCI_DEVICE(0x1D17, 0x31B4), + .driver_data = UNCORE_PCI_DEV_DATA(KX8000_PCI_UNCORE_PXPTRF, 0), + }, + + { /* end: all zeroes */ } +}; + + +static struct pci_driver kx8000_uncore_pci_driver = { + .name = "kx8000_uncore", + .id_table = kx8000_uncore_pci_ids, +}; +/*KX8000 pci ops end*/ + +/*KX8000 mmio ops start*/ +static void kx8000_uncore_mmio_init_box(struct zhaoxin_uncore_box *box) +{ + struct pci_dev *pdev = NULL; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + resource_size_t addr; + u32 pci_dword; + int mmio_base_offset; + + pdev = pci_get_device(0x1d17, 0x31b1, pdev); + if (!pdev) + return; + + if (!strcmp(box->pmu->name, "iod_zdi_dl")) + mmio_base_offset = KX8000_IOD_ZDI_DL_MMIO_BASE_OFFSET; + else + mmio_base_offset = KX8000_CCD_ZDI_DL_MMIO_BASE_OFFSET; + + pci_read_config_dword(pdev, mmio_base_offset, &pci_dword); + addr = (u64)(pci_dword & KX8000_ZDI_DL_MMIO_BASE_MASK) << 32; + + pci_read_config_dword(pdev, mmio_base_offset + 4, &pci_dword); + addr |= pci_dword & KX8000_ZDI_DL_MMIO_MEM0_MASK; + + box->io_addr = ioremap(addr, KX8000_ZDI_DL_MMIO_SIZE); + if (!box->io_addr) + return; + + writel(KH40000_PMON_PCI_BOX_CTL_INT, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_disable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config |= KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_box(struct zhaoxin_uncore_box *box) +{ + u32 config; + unsigned int box_ctl = uncore_mmio_box_ctl(box); + + if (!box->io_addr) + return; + + config = readl(box->io_addr + box_ctl); + config &= ~KH40000_PMON_BOX_CTL_FRZ; + writel(config, box->io_addr + box_ctl); +} + +static void kx8000_uncore_mmio_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config | KH40000_PMON_CTL_EN, box->io_addr + hwc->config_base); +} + +static void kx8000_uncore_mmio_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!box->io_addr) + return; + + writel(hwc->config, box->io_addr + hwc->config_base); +} + +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box) +{ + if (box->io_addr) + iounmap(box->io_addr); +} + +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + u64 count = 0; + u64 count_low = 0; + u64 count_high = 0; + + if (!box->io_addr) + return 0; + + count_high = readl(box->io_addr + event->hw.event_base) & 0xffff; + count_low = readl(box->io_addr + event->hw.event_base + 4); + count = (count_high << 32) + count_low; + + return count; +} + +static struct zhaoxin_uncore_ops kx8000_uncore_mmio_ops = { + .init_box = kx8000_uncore_mmio_init_box, + .exit_box = uncore_mmio_exit_box, + .disable_box = kx8000_uncore_mmio_disable_box, + .enable_box = kx8000_uncore_mmio_enable_box, + .disable_event = kx8000_uncore_mmio_disable_event, + .enable_event = kx8000_uncore_mmio_enable_event, + .read_counter = uncore_mmio_read_counter, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_iod_zdi_dl = { + .name = "iod_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type kx8000_uncore_ccd_zdi_dl = { + .name = "ccd_zdi_dl", + .num_counters = 4, + .num_boxes = 1, + .perf_ctr_bits = 48, + .fixed_ctr_bits = 48, + .perf_ctr = KX8000_ZDI_DL_MMIO_PMON_CTR0, + .event_ctl = KX8000_ZDI_DL_MMIO_PMON_CTL0, + .event_mask = KH40000_PMON_RAW_EVENT_MASK, + .box_ctl = KX8000_ZDI_DL_MMIO_PMON_BLK_CTL, + .ops = &kx8000_uncore_mmio_ops, + .format_group = &kh40000_uncore_format_group, +}; + +static struct zhaoxin_uncore_type *kx8000_mmio_uncores[] = { + &kx8000_uncore_iod_zdi_dl, + &kx8000_uncore_ccd_zdi_dl, + NULL, +}; + +/*KX8000 mmio ops end*/ + + + +static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer) +{ + struct zhaoxin_uncore_box *box; + struct perf_event *event; + unsigned long flags; + int bit; + + box = container_of(hrtimer, struct zhaoxin_uncore_box, hrtimer); + if (!box->n_active || box->cpu != smp_processor_id()) + return HRTIMER_NORESTART; + /* + * disable local interrupt to prevent uncore_pmu_event_start/stop + * to interrupt the update process + */ + local_irq_save(flags); + + /* + * handle boxes with an active event list as opposed to active + * counters + */ + list_for_each_entry(event, &box->active_list, active_entry) { + uncore_perf_event_update(box, event); + } + + for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) + uncore_perf_event_update(box, box->events[bit]); + + local_irq_restore(flags); + + hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); + return HRTIMER_RESTART; +} + +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), + HRTIMER_MODE_REL_PINNED); +} + +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_cancel(&box->hrtimer); +} + +static void uncore_pmu_init_hrtimer(struct zhaoxin_uncore_box *box) +{ + hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + box->hrtimer.function = uncore_pmu_hrtimer; +} + +static struct zhaoxin_uncore_box *uncore_alloc_box(struct zhaoxin_uncore_type *type, + int node) +{ + int i, size, numshared = type->num_shared_regs; + struct zhaoxin_uncore_box *box; + + size = sizeof(*box) + numshared * sizeof(struct zhaoxin_uncore_extra_reg); + + box = kzalloc_node(size, GFP_KERNEL, node); + if (!box) + return NULL; + + for (i = 0; i < numshared; i++) + raw_spin_lock_init(&box->shared_regs[i].lock); + + uncore_pmu_init_hrtimer(box); + box->cpu = -1; + box->package_id = -1; + box->cluster_id = -1; + box->subnode_id = -1; + + /* set default hrtimer timeout */ + box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; + + INIT_LIST_HEAD(&box->active_list); + + return box; +} + +static bool is_box_event(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + return &box->pmu->pmu == event->pmu; +} + +static int +uncore_collect_events(struct zhaoxin_uncore_box *box, struct perf_event *leader, + bool dogrp) +{ + struct perf_event *event; + int n, max_count; + + max_count = box->pmu->type->num_counters; + if (box->pmu->type->fixed_ctl) + max_count++; + + if (box->n_events >= max_count) + return -EINVAL; + + n = box->n_events; + + if (is_box_event(box, leader)) { + box->event_list[n] = leader; + n++; + } + + if (!dogrp) + return n; + + for_each_sibling_event(event, leader) { + if (!is_box_event(box, event) || + event->state <= PERF_EVENT_STATE_OFF) + continue; + + if (n >= max_count) + return -EINVAL; + + box->event_list[n] = event; + n++; + } + return n; +} + +static struct event_constraint * +uncore_get_event_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event) +{ + struct zhaoxin_uncore_type *type = box->pmu->type; + struct event_constraint *c; + + if (type->ops->get_constraint) { + c = type->ops->get_constraint(box, event); + if (c) + return c; + } + + if (event->attr.config == UNCORE_FIXED_EVENT) + return &uncore_constraint_fixed; + + if (type->constraints) { + for_each_event_constraint(c, type->constraints) { + if ((event->hw.config & c->cmask) == c->code) + return c; + } + } + + return &type->unconstrainted; +} + +static void uncore_put_event_constraint(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + if (box->pmu->type->ops->put_constraint) + box->pmu->type->ops->put_constraint(box, event); +} + +static int uncore_assign_events(struct zhaoxin_uncore_box *box, int assign[], int n) +{ + unsigned long used_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + struct event_constraint *c; + int i, wmin, wmax, ret = 0; + struct hw_perf_event *hwc; + + bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX); + + for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) { + c = uncore_get_event_constraint(box, box->event_list[i]); + box->event_constraint[i] = c; + wmin = min(wmin, c->weight); + wmax = max(wmax, c->weight); + } + + /* fastpath, try to reuse previous register */ + for (i = 0; i < n; i++) { + hwc = &box->event_list[i]->hw; + c = box->event_constraint[i]; + + /* never assigned */ + if (hwc->idx == -1) + break; + + /* constraint still honored */ + if (!test_bit(hwc->idx, c->idxmsk)) + break; + + /* not already used */ + if (test_bit(hwc->idx, used_mask)) + break; + + __set_bit(hwc->idx, used_mask); + if (assign) + assign[i] = hwc->idx; + } + /* slow path */ + if (i != n) + ret = perf_assign_events(box->event_constraint, n, + wmin, wmax, n, assign); + + if (!assign || ret) { + for (i = 0; i < n; i++) + uncore_put_event_constraint(box, box->event_list[i]); + } + return ret ? -EINVAL : 0; +} + +static void uncore_pmu_event_start(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int idx = event->hw.idx; + + + if (WARN_ON_ONCE(idx == -1 || idx >= UNCORE_PMC_IDX_MAX)) + return; + + if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) + return; + + event->hw.state = 0; + box->events[idx] = event; + box->n_active++; + __set_bit(idx, box->active_mask); + + local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); + uncore_enable_event(box, event); + + if (box->n_active == 1) + uncore_pmu_start_hrtimer(box); +} + +static void uncore_pmu_event_stop(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + + if (__test_and_clear_bit(hwc->idx, box->active_mask)) { + uncore_disable_event(box, event); + box->n_active--; + box->events[hwc->idx] = NULL; + WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); + hwc->state |= PERF_HES_STOPPED; + + if (box->n_active == 0) + uncore_pmu_cancel_hrtimer(box); + } + + if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { + /* + * Drain the remaining delta count out of a event + * that we are disabling: + */ + uncore_perf_event_update(box, event); + hwc->state |= PERF_HES_UPTODATE; + } +} + +static int uncore_pmu_event_add(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + struct hw_perf_event *hwc = &event->hw; + int assign[UNCORE_PMC_IDX_MAX]; + int i, n, ret; + + if (!box) + return -ENODEV; + + ret = n = uncore_collect_events(box, event, false); + if (ret < 0) + return ret; + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (!(flags & PERF_EF_START)) + hwc->state |= PERF_HES_ARCH; + + ret = uncore_assign_events(box, assign, n); + if (ret) + return ret; + + /* save events moving to new counters */ + for (i = 0; i < box->n_events; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx == assign[i] && + hwc->last_tag == box->tags[assign[i]]) + continue; + /* + * Ensure we don't accidentally enable a stopped + * counter simply because we rescheduled. + */ + if (hwc->state & PERF_HES_STOPPED) + hwc->state |= PERF_HES_ARCH; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + } + + /* reprogram moved events into new counters */ + for (i = 0; i < n; i++) { + event = box->event_list[i]; + hwc = &event->hw; + + if (hwc->idx != assign[i] || + hwc->last_tag != box->tags[assign[i]]) + uncore_assign_hw_event(box, event, assign[i]); + else if (i < box->n_events) + continue; + + if (hwc->state & PERF_HES_ARCH) + continue; + + uncore_pmu_event_start(event, 0); + } + box->n_events = n; + + return 0; +} + +static void uncore_pmu_event_del(struct perf_event *event, int flags) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + int i; + + uncore_pmu_event_stop(event, PERF_EF_UPDATE); + + for (i = 0; i < box->n_events; i++) { + if (event == box->event_list[i]) { + uncore_put_event_constraint(box, event); + + for (++i; i < box->n_events; i++) + box->event_list[i - 1] = box->event_list[i]; + + --box->n_events; + break; + } + } + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; +} + +static void uncore_pmu_event_read(struct perf_event *event) +{ + struct zhaoxin_uncore_box *box = uncore_event_to_box(event); + + uncore_perf_event_update(box, event); +} + +static int uncore_validate_group(struct zhaoxin_uncore_pmu *pmu, + struct perf_event *event) +{ + struct perf_event *leader = event->group_leader; + struct zhaoxin_uncore_box *fake_box; + int ret = -EINVAL, n; + + fake_box = uncore_alloc_box(pmu->type, NUMA_NO_NODE); + if (!fake_box) + return -ENOMEM; + + fake_box->pmu = pmu; + /* + * the event is not yet connected with its + * siblings therefore we must first collect + * existing siblings, then add the new event + * before we can simulate the scheduling + */ + n = uncore_collect_events(fake_box, leader, true); + if (n < 0) + goto out; + + fake_box->n_events = n; + n = uncore_collect_events(fake_box, event, false); + if (n < 0) + goto out; + + fake_box->n_events = n; + + ret = uncore_assign_events(fake_box, NULL, n); +out: + kfree(fake_box); + return ret; +} + +static int uncore_pmu_event_init(struct perf_event *event) +{ + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct hw_perf_event *hwc = &event->hw; + int ret; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + pmu = uncore_event_to_pmu(event); + /* no device found for this pmu */ + if (pmu->func_id < 0) + return -ENOENT; + + /* Sampling not supported yet */ + if (hwc->sample_period) + return -EINVAL; + + /* + * Place all uncore events for a particular physical package + * onto a single cpu + */ + if (event->cpu < 0) + return -EINVAL; + box = uncore_pmu_to_box(pmu, event->cpu); + if (!box || box->cpu < 0) + return -EINVAL; + event->cpu = box->cpu; + event->pmu_private = box; + + //event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; + + event->hw.idx = -1; + event->hw.last_tag = ~0ULL; + event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; + + if (event->attr.config == UNCORE_FIXED_EVENT) { + /* no fixed counter */ + if (!pmu->type->fixed_ctl) + return -EINVAL; + /* + * if there is only one fixed counter, only the first pmu + * can access the fixed counter + */ + if (pmu->type->single_fixed && pmu->pmu_idx > 0) + return -EINVAL; + + /* fixed counters have event field hardcoded to zero */ + hwc->config = 0ULL; + } else { + hwc->config = event->attr.config & + (pmu->type->event_mask | ((u64)pmu->type->event_mask_ext << 32)); + if (pmu->type->ops->hw_config) { + ret = pmu->type->ops->hw_config(box, event); + if (ret) + return ret; + } + } + + if (event->group_leader != event) + ret = uncore_validate_group(pmu, event); + else + ret = 0; + + return ret; +} + +static void uncore_pmu_enable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->enable_box) + uncore_pmu->type->ops->enable_box(box); +} + +static void uncore_pmu_disable(struct pmu *pmu) +{ + struct zhaoxin_uncore_pmu *uncore_pmu; + struct zhaoxin_uncore_box *box; + + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + if (!uncore_pmu) + return; + + box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); + if (!box) + return; + + if (uncore_pmu->type->ops->disable_box) + uncore_pmu->type->ops->disable_box(box); +} + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + cpumask_t *active_mask; + struct pmu *pmu; + struct zhaoxin_uncore_pmu *uncore_pmu; + + pmu = dev_get_drvdata(dev); + uncore_pmu = container_of(pmu, struct zhaoxin_uncore_pmu, pmu); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(uncore_pmu->type->name, "llc")) + active_mask = &uncore_cpu_cluster_mask; + else + active_mask = &uncore_cpu_subnode_mask; + } else { + active_mask = &uncore_cpu_mask; + } + return cpumap_print_to_pagebuf(true, buf, active_mask); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + +static int uncore_pmu_register(struct zhaoxin_uncore_pmu *pmu) +{ + int ret; + + if (!pmu->type->pmu) { + pmu->pmu = (struct pmu) { + .attr_groups = pmu->type->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = uncore_pmu_enable, + .pmu_disable = uncore_pmu_disable, + .event_init = uncore_pmu_event_init, + .add = uncore_pmu_event_add, + .del = uncore_pmu_event_del, + .start = uncore_pmu_event_start, + .stop = uncore_pmu_event_stop, + .read = uncore_pmu_event_read, + .module = THIS_MODULE, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + }; + } else { + pmu->pmu = *pmu->type->pmu; + pmu->pmu.attr_groups = pmu->type->attr_groups; + } + + if (pmu->type->num_boxes == 1) { + if (strlen(pmu->type->name) > 0) + sprintf(pmu->name, "uncore_%s", pmu->type->name); + else + sprintf(pmu->name, "uncore"); + } else { + sprintf(pmu->name, "uncore_%s_%d", pmu->type->name, + pmu->pmu_idx); + } + + ret = perf_pmu_register(&pmu->pmu, pmu->name, -1); + if (!ret) + pmu->registered = true; + return ret; +} + +static void uncore_pmu_unregister(struct zhaoxin_uncore_pmu *pmu) +{ + if (!pmu->registered) + return; + perf_pmu_unregister(&pmu->pmu); + pmu->registered = false; +} + +static void uncore_free_boxes(struct zhaoxin_uncore_pmu *pmu) +{ + int i, max; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(pmu->type->name, "llc")) + max = max_clusters; + else + max = max_subnodes; + } else { + max = max_packages; + } + + for (i = 0; i < max; i++) + kfree(pmu->boxes[i]); + kfree(pmu->boxes); +} + +static void uncore_type_exit(struct zhaoxin_uncore_type *type) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + int i; + + if (pmu) { + for (i = 0; i < type->num_boxes; i++, pmu++) { + uncore_pmu_unregister(pmu); + uncore_free_boxes(pmu); + } + kfree(type->pmus); + type->pmus = NULL; + } + kfree(type->events_group); + type->events_group = NULL; +} + +static void uncore_types_exit(struct zhaoxin_uncore_type **types) +{ + for (; *types; types++) + uncore_type_exit(*types); +} + +static int __init uncore_type_init(struct zhaoxin_uncore_type *type, bool setid) +{ + struct zhaoxin_uncore_pmu *pmus; + size_t size; + int i, j; + + pmus = kcalloc(type->num_boxes, sizeof(*pmus), GFP_KERNEL); + if (!pmus) + return -ENOMEM; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) + size = max_clusters * sizeof(struct zhaoxin_uncore_box *); + else + size = max_subnodes * sizeof(struct zhaoxin_uncore_box *); + } else { + size = max_packages * sizeof(struct zhaoxin_uncore_box *); + } + + for (i = 0; i < type->num_boxes; i++) { + pmus[i].func_id = setid ? i : -1; + pmus[i].pmu_idx = i; + pmus[i].type = type; + pmus[i].boxes = kzalloc(size, GFP_KERNEL); + if (!pmus[i].boxes) + goto err; + } + + type->pmus = pmus; + type->unconstrainted = (struct event_constraint) + __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, + 0, type->num_counters, 0, 0); + + if (type->event_descs) { + struct { + struct attribute_group group; + struct attribute *attrs[]; + } *attr_group; + for (i = 0; type->event_descs[i].attr.attr.name; i++) + ; + + attr_group = kzalloc(struct_size(attr_group, attrs, i + 1), GFP_KERNEL); + if (!attr_group) + goto err; + + attr_group->group.name = "events"; + attr_group->group.attrs = attr_group->attrs; + + for (j = 0; j < i; j++) + attr_group->attrs[j] = &type->event_descs[j].attr.attr; + + type->events_group = &attr_group->group; + } + + type->pmu_group = &uncore_pmu_attr_group; + + return 0; + +err: + for (i = 0; i < type->num_boxes; i++) + kfree(pmus[i].boxes); + kfree(pmus); + + return -ENOMEM; +} + +static int __init +uncore_types_init(struct zhaoxin_uncore_type **types, bool setid) +{ + int ret; + + for (; *types; types++) { + ret = uncore_type_init(*types, setid); + if (ret) + return ret; + } + return 0; +} + +/* + * add a pci uncore device + */ +static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_box **boxes; + char mc_dev[10]; + int loop = 1; + int i, j = 0; + int subnode_id = 0; + int ret = 0; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) + subnode_id = uncore_pcibus_to_subnodeid(pdev->bus); + + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + strscpy(mc_dev, "mc0", sizeof("mc0")); + if (!strcmp(type->name, mc_dev)) + loop = 2; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + strscpy(mc_dev, "mc_a0", sizeof("mc_a0")); + if (!strcmp(type->name, mc_dev)) + loop = 4; + } + + boxes = kcalloc(loop, sizeof(struct zhaoxin_uncore_box *), GFP_KERNEL); + if (!boxes) + return -ENOMEM; + + for (i = 0; i < loop; i++) { + type = uncore_pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data) + j]; + + if (!type) + continue; + /* + * for performance monitoring unit with multiple boxes, + * each box has a different function id. + */ + pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; + + if (WARN_ON_ONCE(pmu->boxes[subnode_id] != NULL)) + return -EINVAL; + + box = uncore_alloc_box(type, NUMA_NO_NODE); + if (!box) + return -ENOMEM; + + if (pmu->func_id < 0) + pmu->func_id = pdev->devfn; + else + WARN_ON_ONCE(pmu->func_id != pdev->devfn); + + atomic_inc(&box->refcnt); + box->subnode_id = subnode_id; + box->pci_dev = pdev; + box->pmu = pmu; + uncore_box_init(box); + boxes[i] = box; + + pci_set_drvdata(pdev, boxes); + pmu->boxes[subnode_id] = box; + if (atomic_inc_return(&pmu->activeboxes) > 1) { + if (!strcmp(type->name, mc_dev)) + goto next_loop; + else + return 0; + } + /* First active box registers the pmu */ + ret = uncore_pmu_register(pmu); + if (ret) { + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + uncore_box_exit(box); + kfree(box); + } +next_loop: + j++; + } + + return ret; +} + +static void uncore_pci_remove(struct pci_dev *pdev) +{ + struct zhaoxin_uncore_box **boxes; + struct zhaoxin_uncore_box *box; + struct zhaoxin_uncore_pmu *pmu; + int subnode_id = 0; + int i = 0; + int loop = 1; + + boxes = pci_get_drvdata(pdev); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc0")) + loop = 2; + else + loop = 1; + } else if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KX8000) { + if (!strcmp(boxes[0]->pmu->type->name, "mc_a0")) + loop = 4; + else + loop = 1; + } + + + for (i = 0; i < loop; i++) { + box = boxes[i]; + pmu = box->pmu; + if (WARN_ON_ONCE(subnode_id != box->subnode_id)) + return; + + pci_set_drvdata(pdev, NULL); + pmu->boxes[subnode_id] = NULL; + if (atomic_dec_return(&pmu->activeboxes) == 0) + uncore_pmu_unregister(pmu); + + uncore_box_exit(box); + kfree(box); + } + + kfree(boxes); +} + +static int __init uncore_pci_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_pci_uncores, false); + if (ret) + goto errtype; + + uncore_pci_driver->probe = uncore_pci_probe; + uncore_pci_driver->remove = uncore_pci_remove; + + ret = pci_register_driver(uncore_pci_driver); + if (ret) + goto errtype; + + pcidrv_registered = true; + return 0; + +errtype: + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + uncore_pci_uncores = empty_uncore; + return ret; +} + +static void uncore_pci_exit(void) +{ + if (pcidrv_registered) { + pcidrv_registered = false; + pci_unregister_driver(uncore_pci_driver); + uncore_types_exit(uncore_pci_uncores); + uncore_free_pcibus_map(); + } +} + +static void uncore_change_type_ctx(struct zhaoxin_uncore_type *type, int old_cpu, + int new_cpu) +{ + struct zhaoxin_uncore_pmu *pmu = type->pmus; + struct zhaoxin_uncore_box *box; + int i, package_id, cluster_id = 0, subnode_id = 0; + + package_id = zx_topology_package_id(old_cpu < 0 ? new_cpu : old_cpu); + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + cluster_id = zx_topology_cluster_id(old_cpu < 0 ? new_cpu : old_cpu); + subnode_id = zx_topology_subnode_id(old_cpu < 0 ? new_cpu : old_cpu); + } + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + if (!strcmp(type->name, "llc")) { + box = pmu->boxes[cluster_id]; + if (!box) + continue; + } else { + box = pmu->boxes[subnode_id]; + if (!box) + continue; + } + } else { + box = pmu->boxes[package_id]; + if (!box) + continue; + } + + if (old_cpu < 0) { + + WARN_ON_ONCE(box->cpu != -1); + box->cpu = new_cpu; + continue; + } + WARN_ON_ONCE(box->cpu != old_cpu); + box->cpu = -1; + if (new_cpu < 0) + continue; + + uncore_pmu_cancel_hrtimer(box); + perf_pmu_migrate_context(&pmu->pmu, old_cpu, new_cpu); + box->cpu = new_cpu; + } +} + +static void uncore_change_context(struct zhaoxin_uncore_type **uncores, + int old_cpu, int new_cpu) +{ + for (; *uncores; uncores++) + uncore_change_type_ctx(*uncores, old_cpu, new_cpu); +} + +static void uncore_box_unref(struct zhaoxin_uncore_type **types, int id) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_dec_return(&box->refcnt) == 0) + uncore_box_exit(box); + } + } +} + +struct zhaoxin_uncore_type *uncore_msr_cluster_uncores[] = { + &kh40000_uncore_llc_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_msr_subnode_uncores[] = { + &kh40000_uncore_hif_box, + &kh40000_uncore_zzi_box, + NULL, +}; + +struct zhaoxin_uncore_type *uncore_pci_subnode_uncores[] = { + &kh40000_uncore_mc0, + &kh40000_uncore_mc1, + &kh40000_uncore_pci, + &kh40000_uncore_zpi_dll, + &kh40000_uncore_zdi_dll, + &kh40000_uncore_pxptrf, + NULL, +}; + +static void kx5000_event_cpu_offline(int cpu) +{ + int package, target; + + /* Check if exiting cpu is used for collecting uncore events */ + + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + goto unref_cpu_mask; + + /* Find a new cpu to collect uncore events */ + target = cpumask_any_but(topology_core_cpumask(cpu), cpu); + + /* Migrate uncore events to the new target */ + if (target < nr_cpu_ids) + cpumask_set_cpu(target, &uncore_cpu_mask); + else + target = -1; + + uncore_change_context(uncore_msr_uncores, cpu, target); + uncore_change_context(uncore_mmio_uncores, cpu, target); + uncore_change_context(uncore_pci_uncores, cpu, target); + +unref_cpu_mask: + /*clear the references*/ + package = zx_topology_package_id(cpu); + uncore_box_unref(uncore_msr_uncores, package); + uncore_box_unref(uncore_mmio_uncores, package); +} + +static void kh40000_event_cpu_offline(int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + /* Check if exiting cpu is used for collecting uncore events */ + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_cluster_mask)) { + cluster_target = cpumask_any_but(topology_cluster_core_cpumask(cpu), cpu); + if (cluster_target < nr_cpu_ids) + cpumask_set_cpu(cluster_target, &uncore_cpu_cluster_mask); + else + cluster_target = -1; + uncore_change_context(uncore_msr_cluster_uncores, cpu, cluster_target); + } else { + uncore_box_unref(uncore_msr_cluster_uncores, cluster_id); + } + + if (cpumask_test_and_clear_cpu(cpu, &uncore_cpu_subnode_mask)) { + subnode_target = cpumask_any_but(topology_subnode_core_cpumask(cpu), cpu); + if (subnode_target < nr_cpu_ids) + cpumask_set_cpu(subnode_target, &uncore_cpu_subnode_mask); + else + subnode_target = -1; + uncore_change_context(uncore_msr_subnode_uncores, cpu, subnode_target); + uncore_change_context(uncore_pci_subnode_uncores, cpu, subnode_target); + } else { + uncore_box_unref(uncore_msr_subnode_uncores, subnode_id); + } + +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + unsigned int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_event_cpu_offline(cpu); + else + kx5000_event_cpu_offline(cpu); + + return 0; +} + +static int kx5000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + box->package_id = id; + list_add(&box->active_list, &allocated); + } + } + + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int kh40000_allocate_boxes(struct zhaoxin_uncore_type **types, + unsigned int id, unsigned int cpu) +{ + struct zhaoxin_uncore_box *box, *tmp; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + LIST_HEAD(allocated); + int i; + + /* Try to allocate all required boxes */ + for (; *types; types++) { + type = *types; + pmu = type->pmus; + + for (i = 0; i < type->num_boxes; i++, pmu++) { + if (pmu->boxes[id]) + continue; + box = uncore_alloc_box(type, cpu_to_node(cpu)); + if (!box) + goto cleanup; + box->pmu = pmu; + if (!strcmp(type->name, "llc")) + box->cluster_id = id; + else + box->subnode_id = id; + list_add(&box->active_list, &allocated); + } + } + /* Install them in the pmus */ + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + box->pmu->boxes[id] = box; + } + return 0; + +cleanup: + list_for_each_entry_safe(box, tmp, &allocated, active_list) { + list_del_init(&box->active_list); + kfree(box); + } + return -ENOMEM; +} + +static int uncore_box_ref(struct zhaoxin_uncore_type **types, + int id, unsigned int cpu) +{ + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_pmu *pmu; + struct zhaoxin_uncore_box *box; + int i, ret = 0; + + int x86_model; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + ret = kh40000_allocate_boxes(types, id, cpu); + else + ret = kx5000_allocate_boxes(types, id, cpu); + + if (ret) + return ret; + + for (; *types; types++) { + type = *types; + pmu = type->pmus; + for (i = 0; i < type->num_boxes; i++, pmu++) { + box = pmu->boxes[id]; + if (box && atomic_inc_return(&box->refcnt) == 1) + uncore_box_init(box); + } + } + return 0; +} + +static int kx5000_event_cpu_online(unsigned int cpu) +{ + int package, target, msr_ret, mmio_ret; + + package = zx_topology_package_id(cpu); + msr_ret = uncore_box_ref(uncore_msr_uncores, package, cpu); + mmio_ret = uncore_box_ref(uncore_mmio_uncores, package, cpu); + if (msr_ret && mmio_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the package + * which collects uncore events already. + */ + target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + cpumask_set_cpu(cpu, &uncore_cpu_mask); + + if (!msr_ret) + uncore_change_context(uncore_msr_uncores, -1, cpu); + if (!mmio_ret) + uncore_change_context(uncore_mmio_uncores, -1, cpu); + uncore_change_context(uncore_pci_uncores, -1, cpu); + + return 0; +} + +static int kh40000_event_cpu_online(unsigned int cpu) +{ + int cluster_target, subnode_target; + int cluster_id, subnode_id; + int cluster_ret, subnode_ret; + + cluster_id = zx_topology_cluster_id(cpu); + subnode_id = zx_topology_subnode_id(cpu); + + cluster_ret = uncore_box_ref(uncore_msr_cluster_uncores, cluster_id, cpu); + subnode_ret = uncore_box_ref(uncore_msr_subnode_uncores, subnode_id, cpu); + + if (cluster_ret && subnode_ret) + return -ENOMEM; + + /* + * Check if there is an online cpu in the cluster or subnode + * which collects uncore events already. + */ + + cluster_target = + cpumask_any_and(&uncore_cpu_cluster_mask, topology_cluster_core_cpumask(cpu)); + subnode_target = + cpumask_any_and(&uncore_cpu_subnode_mask, topology_subnode_core_cpumask(cpu)); + + if (cluster_target < nr_cpu_ids && subnode_target < nr_cpu_ids) + return 0; + + if (!cluster_ret && cluster_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_cluster_mask); + uncore_change_context(uncore_msr_cluster_uncores, -1, cpu); + } + + if (!subnode_ret && subnode_target >= nr_cpu_ids) { + cpumask_set_cpu(cpu, &uncore_cpu_subnode_mask); + uncore_change_context(uncore_msr_subnode_uncores, -1, cpu); + uncore_change_context(uncore_pci_subnode_uncores, -1, cpu); + } + + return 0; +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int x86_model; + int kx5000_ret = 0, kh40000_ret = 0; + + x86_model = boot_cpu_data.x86_model; + + if (x86_model == ZHAOXIN_FAM7_KH40000) + kh40000_ret = kh40000_event_cpu_online(cpu); + else + kx5000_ret = kx5000_event_cpu_online(cpu); + + if (kx5000_ret || kh40000_ret) + return -ENOMEM; + + return 0; +} + +static int __init type_pmu_register(struct zhaoxin_uncore_type *type) +{ + int i, ret; + + for (i = 0; i < type->num_boxes; i++) { + ret = uncore_pmu_register(&type->pmus[i]); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_msr_pmus_register(void) +{ + struct zhaoxin_uncore_type **types = uncore_msr_uncores; + int ret; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + return ret; + } + return 0; +} + +static int __init uncore_cpu_init(void) +{ + int ret; + + ret = uncore_types_init(uncore_msr_uncores, true); + if (ret) + goto err; + + ret = uncore_msr_pmus_register(); + if (ret) + goto err; + return 0; +err: + uncore_types_exit(uncore_msr_uncores); + uncore_msr_uncores = empty_uncore; + return ret; +} + +static int __init uncore_mmio_init(void) +{ + struct zhaoxin_uncore_type **types = uncore_mmio_uncores; + int ret; + + ret = uncore_types_init(types, true); + if (ret) + goto err; + + for (; *types; types++) { + ret = type_pmu_register(*types); + if (ret) + goto err; + } + return 0; +err: + uncore_types_exit(uncore_mmio_uncores); + uncore_mmio_uncores = empty_uncore; + return ret; +} + +struct zhaoxin_uncore_init_fun { + void (*cpu_init)(void); + int (*pci_init)(void); + void (*mmio_init)(void); +}; + +void kx5000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx5000_msr_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx5000_uncore_init __initconst = { + .cpu_init = kx5000_uncore_cpu_init, +}; + +void kh40000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kh40000_msr_uncores; +} + +int kh40000_uncore_pci_init(void) +{ + int ret = kh40000_pci2node_map_init();/*pci_bus to package mapping, do nothing*/ + + if (ret) + return ret; + uncore_pci_uncores = kh40000_pci_uncores; + uncore_pci_driver = &kh40000_uncore_pci_driver; + return 0; +} + +static const struct zhaoxin_uncore_init_fun kh40000_uncore_init __initconst = { + .cpu_init = kh40000_uncore_cpu_init, + .pci_init = kh40000_uncore_pci_init, +}; + +void kx8000_uncore_cpu_init(void) +{ + uncore_msr_uncores = kx8000_msr_uncores; +} + +int kx8000_uncore_pci_init(void) +{ + uncore_pci_uncores = kx8000_pci_uncores; + uncore_pci_driver = &kx8000_uncore_pci_driver; + + return 0; +} + +void kx8000_uncore_mmio_init(void) +{ + uncore_mmio_uncores = kx8000_mmio_uncores; +} + +static const struct zhaoxin_uncore_init_fun kx8000_uncore_init __initconst = { + .cpu_init = kx8000_uncore_cpu_init, + .pci_init = kx8000_uncore_pci_init, + .mmio_init = kx8000_uncore_mmio_init, +}; + +static const struct x86_cpu_id zhaoxin_uncore_match[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX5000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX6000, &kx5000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KH40000, &kh40000_uncore_init), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, ZHAOXIN_FAM7_KX8000, &kx8000_uncore_init), + {}, +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_uncore_match); + +static int __init zhaoxin_uncore_init(void) +{ + const struct x86_cpu_id *id = NULL; + struct zhaoxin_uncore_init_fun *uncore_init; + int pret = 0, cret = 0, mret = 0, ret; + + id = x86_match_cpu(zhaoxin_uncore_match); + if (!id) + return -ENODEV; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) + return -ENODEV; + + pr_info("welcome to uncore.\n"); + + get_topology_number(); + get_topology_info(); + + if (boot_cpu_data.x86_model == ZHAOXIN_FAM7_KH40000) { + zx_gen_core_map(); + get_pcibus_limit(); + } + + uncore_init = (struct zhaoxin_uncore_init_fun *)id->driver_data; + + if (uncore_init->pci_init) { + pret = uncore_init->pci_init(); + if (!pret) + pret = uncore_pci_init(); + } + + if (uncore_init->cpu_init) { + uncore_init->cpu_init(); + cret = uncore_cpu_init(); + } + + if (uncore_init->mmio_init) { + uncore_init->mmio_init(); + mret = uncore_mmio_init(); + } + + if (cret && pret && mret) + return -ENODEV; + + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "perf/x86/zhaoxin/uncore:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret) + goto err; + pr_info("uncore init success!\n"); + + return 0; + +err: + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); + pr_info("uncore init fail!\n"); + + return ret; +} +module_init(zhaoxin_uncore_init); + +static void __exit zhaoxin_uncore_exit(void) +{ + cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + uncore_types_exit(uncore_msr_uncores); + uncore_types_exit(uncore_mmio_uncores); + uncore_pci_exit(); +} +module_exit(zhaoxin_uncore_exit); diff --git a/arch/x86/events/zhaoxin/uncore.h b/arch/x86/events/zhaoxin/uncore.h new file mode 100644 index 0000000000000000000000000000000000000000..5d09696f8bc793fe3b0245bf7ff3f4a9d1544f66 --- /dev/null +++ b/arch/x86/events/zhaoxin/uncore.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +#include +#include +#include +#include + +#include +#include "../perf_event.h" + +#define ZHAOXIN_FAM7_KX5000 0x1b +#define ZHAOXIN_FAM7_KX6000 0x3b +#define ZHAOXIN_FAM7_KH40000 0x5b +#define ZHAOXIN_FAM7_KX8000 0x6b + + + +#define UNCORE_PMU_NAME_LEN 32 +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) + +#define UNCORE_FIXED_EVENT 0xff +#define UNCORE_PMC_IDX_MAX_GENERIC 4 +#define UNCORE_PMC_IDX_MAX_FIXED 1 +#define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC + +#define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) + +#define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) +#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) +#define UNCORE_PCI_DEV_IDX(data) (data & 0xff) + +struct zhaoxin_uncore_ops; +struct zhaoxin_uncore_pmu; +struct zhaoxin_uncore_box; +struct uncore_event_desc; + +struct zhaoxin_uncore_type { + const char *name; + int num_counters; + int num_boxes; + int perf_ctr_bits; + int fixed_ctr_bits; + unsigned int perf_ctr; + unsigned int event_ctl; + unsigned int event_mask; + unsigned int event_mask_ext; + unsigned int fixed_ctr; + unsigned int fixed_ctl; + unsigned int box_ctl; + union { + unsigned int msr_offset; + unsigned int mmio_offset; + }; + unsigned int num_shared_regs:8; + unsigned int single_fixed:1; + unsigned int pair_ctr_ctl:1; + unsigned int *msr_offsets; + struct event_constraint unconstrainted; + struct event_constraint *constraints; + struct zhaoxin_uncore_pmu *pmus; + struct zhaoxin_uncore_ops *ops; + struct uncore_event_desc *event_descs; + const struct attribute_group *attr_groups[4]; + struct pmu *pmu; /* for custom pmu ops */ +}; + +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] + +struct zhaoxin_uncore_ops { + void (*init_box)(struct zhaoxin_uncore_box *box); + void (*exit_box)(struct zhaoxin_uncore_box *box); + void (*disable_box)(struct zhaoxin_uncore_box *box); + void (*enable_box)(struct zhaoxin_uncore_box *box); + void (*disable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + void (*enable_event)(struct zhaoxin_uncore_box *box, struct perf_event *event); + u64 (*read_counter)(struct zhaoxin_uncore_box *box, struct perf_event *event); + int (*hw_config)(struct zhaoxin_uncore_box *box, struct perf_event *event); + struct event_constraint *(*get_constraint)(struct zhaoxin_uncore_box *box, + struct perf_event *event); + void (*put_constraint)(struct zhaoxin_uncore_box *box, struct perf_event *event); +}; + +struct zhaoxin_uncore_pmu { + struct pmu pmu; + char name[UNCORE_PMU_NAME_LEN]; + int pmu_idx; + int func_id; + bool registered; + atomic_t activeboxes; + struct zhaoxin_uncore_type *type; + struct zhaoxin_uncore_box **boxes; +}; + +struct zhaoxin_uncore_extra_reg { + raw_spinlock_t lock; + u64 config, config1, config2; + atomic_t ref; +}; + +struct zhaoxin_uncore_box { + int pci_phys_id; + int package_id; /*Package ID */ + int cluster_id; + int subnode_id; + int n_active; /* number of active events */ + int n_events; + int cpu; /* cpu to collect events */ + unsigned long flags; + atomic_t refcnt; + struct perf_event *events[UNCORE_PMC_IDX_MAX]; + struct perf_event *event_list[UNCORE_PMC_IDX_MAX]; + struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX]; + unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)]; + u64 tags[UNCORE_PMC_IDX_MAX]; + struct pci_dev *pci_dev; + struct zhaoxin_uncore_pmu *pmu; + u64 hrtimer_duration; /* hrtimer timeout for this box */ + struct hrtimer hrtimer; + struct list_head list; + struct list_head active_list; + void __iomem *io_addr; + struct zhaoxin_uncore_extra_reg shared_regs[]; +}; + +#define UNCORE_BOX_FLAG_INITIATED 0 + +struct uncore_event_desc { + struct device_attribute attr; + const char *config; +}; + +struct hw_info { + u64 config_info; + u64 active_state; +}; + +ssize_t zx_uncore_event_show(struct device *dev, + struct device_attribute *attr, char *buf); + +#define ZHAOXIN_UNCORE_EVENT_DESC(_name, _config) \ +{ \ + .attr = __ATTR(_name, 0444, zx_uncore_event_show, NULL), \ + .config = _config, \ +} + +#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format) \ +static ssize_t __uncore_##_var##_show(struct device *dev, \ + struct device_attribute *attr, \ + char *page) \ +{ \ + BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE); \ + return sprintf(page, _format "\n"); \ +} \ +static struct device_attribute format_attr_##_var = \ + __ATTR(_name, 0444, __uncore_##_var##_show, NULL) + +static inline bool uncore_pmc_fixed(int idx) +{ + return idx == UNCORE_PMC_IDX_FIXED; +} + +static inline +unsigned int uncore_mmio_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl + + box->pmu->type->mmio_offset * box->pmu->pmu_idx; +} + +static inline unsigned int uncore_pci_box_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->box_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctl; +} + +static inline unsigned int uncore_pci_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr; +} + +static inline +unsigned int uncore_pci_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return idx * 4 + box->pmu->type->event_ctl; +} + +static inline +unsigned int uncore_pci_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + if (!strncmp(box->pmu->type->name, "mc_", 3)) + return idx * 2 + box->pmu->type->perf_ctr; + else + return idx * 8 + box->pmu->type->perf_ctr; +} + +static inline unsigned int uncore_msr_box_offset(struct zhaoxin_uncore_box *box) +{ + struct zhaoxin_uncore_pmu *pmu = box->pmu; + + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned int uncore_msr_box_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->box_ctl) + return 0; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (!box->pmu->type->fixed_ctl) + return 0; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); +} + +static inline unsigned int uncore_msr_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->event_ctl + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_msr_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ + return box->pmu->type->perf_ctr + + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + + uncore_msr_box_offset(box); +} + +static inline +unsigned int uncore_fixed_ctl(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctl(box); + else + return uncore_msr_fixed_ctl(box); +} + +static inline +unsigned int uncore_fixed_ctr(struct zhaoxin_uncore_box *box) +{ + if (box->pci_dev) + return uncore_pci_fixed_ctr(box); + else + return uncore_msr_fixed_ctr(box); +} + +static inline +unsigned int uncore_event_ctl(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_event_ctl(box, idx); + else + return uncore_msr_event_ctl(box, idx); +} + +static inline +unsigned int uncore_perf_ctr(struct zhaoxin_uncore_box *box, int idx) +{ if (box->pci_dev || box->io_addr) + return uncore_pci_perf_ctr(box, idx); + else + return uncore_msr_perf_ctr(box, idx); +} + +static inline int uncore_perf_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->perf_ctr_bits; +} + +static inline int uncore_fixed_ctr_bits(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->fixed_ctr_bits; +} + +static inline int uncore_num_counters(struct zhaoxin_uncore_box *box) +{ + return box->pmu->type->num_counters; +} + +static inline void uncore_disable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->disable_box) + box->pmu->type->ops->disable_box(box); +} + +static inline void uncore_enable_box(struct zhaoxin_uncore_box *box) +{ + if (box->pmu->type->ops->enable_box) + box->pmu->type->ops->enable_box(box); +} + +static inline void uncore_disable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->disable_event(box, event); +} + +static inline void uncore_enable_event(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + box->pmu->type->ops->enable_event(box, event); +} + +static inline u64 uncore_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event) +{ + return box->pmu->type->ops->read_counter(box, event); +} + +static inline void uncore_box_init(struct zhaoxin_uncore_box *box) +{ + if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->init_box) + box->pmu->type->ops->init_box(box); + } +} + +static inline void uncore_box_exit(struct zhaoxin_uncore_box *box) +{ + if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { + if (box->pmu->type->ops->exit_box) + box->pmu->type->ops->exit_box(box); + } +} + +static inline bool uncore_box_is_fake(struct zhaoxin_uncore_box *box) +{ + return (box->package_id < 0); +} + +static inline struct zhaoxin_uncore_pmu *uncore_event_to_pmu(struct perf_event *event) +{ + return container_of(event->pmu, struct zhaoxin_uncore_pmu, pmu); +} + +static inline struct zhaoxin_uncore_box *uncore_event_to_box(struct perf_event *event) +{ + return event->pmu_private; +} + + +static struct zhaoxin_uncore_box *uncore_pmu_to_box(struct zhaoxin_uncore_pmu *pmu, int cpu); +static u64 uncore_msr_read_counter(struct zhaoxin_uncore_box *box, struct perf_event *event); +static void uncore_mmio_exit_box(struct zhaoxin_uncore_box *box); +static u64 uncore_mmio_read_counter(struct zhaoxin_uncore_box *box, + struct perf_event *event); +static void uncore_pmu_start_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_cancel_hrtimer(struct zhaoxin_uncore_box *box); +static void uncore_pmu_event_start(struct perf_event *event, int flags); +static void uncore_pmu_event_stop(struct perf_event *event, int flags); +static int uncore_pmu_event_add(struct perf_event *event, int flags); +static void uncore_pmu_event_del(struct perf_event *event, int flags); +static void uncore_pmu_event_read(struct perf_event *event); +static void uncore_perf_event_update(struct zhaoxin_uncore_box *box, struct perf_event *event); +struct event_constraint * +uncore_get_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +void uncore_put_constraint(struct zhaoxin_uncore_box *box, struct perf_event *event); +u64 uncore_shared_reg_config(struct zhaoxin_uncore_box *box, int idx); diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index f896eed4516c7e3c131751ab5865aa70cfa2d645..1f5fe16484fac9eaa9541bcaf8085c271a7d594d 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -102,7 +102,8 @@ static inline bool arch_has_acpi_pdc(void) { struct cpuinfo_x86 *c = &cpu_data(0); return (c->x86_vendor == X86_VENDOR_INTEL || - c->x86_vendor == X86_VENDOR_CENTAUR); + c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN); } static inline void arch_acpi_set_proc_cap_bits(u32 *cap) diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h index ed0eaf65c43721ebfcf4f4ee74f827c6d91ec5d1..4230a80a5a9de2264ba53a96e06aea1ea1709a95 100644 --- a/arch/x86/include/asm/amd_nb.h +++ b/arch/x86/include/asm/amd_nb.h @@ -82,6 +82,10 @@ u16 amd_nb_num(void); bool amd_nb_has_feature(unsigned int feature); struct amd_northbridge *node_to_amd_nb(int node); +bool hygon_f18h_m4h(void); +u16 hygon_nb_num(void); +int get_df_id(struct pci_dev *misc, u8 *id); + static inline u16 amd_pci_dev_to_node_id(struct pci_dev *pdev) { struct pci_dev *misc; @@ -119,6 +123,10 @@ static inline bool amd_gart_present(void) #define node_to_amd_nb(x) NULL #define amd_gart_present(x) false +#define hygon_f18h_m4h false +#define hygon_nb_num(x) 0 +#define get_df_id(x, y) NULL + #endif diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 5af4ec1a0f71cf9e232aa394981e53ede090d088..17f2f28a495ec6f9e7bc54d659fba260ea30d96d 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -276,7 +276,8 @@ struct apic { u32 disable_esr : 1, dest_mode_logical : 1, - x2apic_set_max_apicid : 1; + x2apic_set_max_apicid : 1, + nmi_to_offline_cpu : 1; u32 (*calc_dest_apicid)(unsigned int cpu); @@ -542,6 +543,8 @@ extern bool default_check_apicid_used(physid_mask_t *map, int apicid); extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); extern int default_cpu_present_to_apicid(int mps_cpu); +void apic_send_nmi_to_offline_cpu(unsigned int cpu); + #else /* CONFIG_X86_LOCAL_APIC */ static inline unsigned int read_apic_id(void) { return 0; } diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 25050d953eee02d71cb50f585f2f7dcc7482c011..fecc4fe1d68aff799c7b91b363d69e961cefbab1 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h @@ -71,26 +71,12 @@ static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {} extern __noendbr void cet_disable(void); -struct ucode_cpu_info; +struct cpu_signature; -int intel_cpu_collect_info(struct ucode_cpu_info *uci); - -static inline bool intel_cpu_signatures_match(unsigned int s1, unsigned int p1, - unsigned int s2, unsigned int p2) -{ - if (s1 != s2) - return false; - - /* Processor flags are either both 0 ... */ - if (!p1 && !p2) - return true; - - /* ... or they intersect. */ - return p1 & p2; -} +void intel_collect_cpu_info(struct cpu_signature *sig); extern u64 x86_read_arch_cap_msr(void); -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf); +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig); int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type); extern struct cpumask cpus_stop_mask; diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index a26bebbdff87ed20c45bdb98dcc4a8873f5c30f5..8b0ac83c4d9b98f0c157a7f24b8858b181176d7c 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -33,6 +33,8 @@ enum cpuid_leafs CPUID_7_EDX, CPUID_8000_001F_EAX, CPUID_8000_0021_EAX, + CPUID_8C86_0000_EDX, + CPUID_C000_0006_EAX, }; #define X86_CAP_FMT_NUM "%d:%d" @@ -91,8 +93,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 22, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -116,8 +120,10 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 21, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 22, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 21)) + BUILD_BUG_ON_ZERO(NCAPINTS != 23)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 58cb9495e40f426323cd65569cd53145e7b2f9e7..0a1ffbb95062c453d29922e3a9fb0ef930732c34 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -146,8 +146,12 @@ #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_SM2 (5*32 + 0) /* SM2 ZhaoXin GMI present */ +#define X86_FEATURE_SM2_EN (5*32 + 1) /* SM2 ZhaoXin GMI enabled */ #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ +#define X86_FEATURE_CCS (5*32 + 4) /* "sm3/4" SM3/4 present */ +#define X86_FEATURE_CCS_EN (5*32 + 5) /* "sm3/4" SM3/4 enabled */ #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ @@ -156,6 +160,23 @@ #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ +#define X86_FEATURE_ZX_FMA (5*32+15) /* FMA supported */ +#define X86_FEATURE_PARALLAX (5*32+16) /* Adaptive P-state control present */ +#define X86_FEATURE_PARALLAX_EN (5*32+17) /* Adaptive P-state control enabled */ +#define X86_FEATURE_OVERSTRESS (5*32+18) /* Overstress Feature for auto overclock present */ +#define X86_FEATURE_OVERSTRESS_EN (5*32+19) /* Overstress Feature for auto overclock enabled */ +#define X86_FEATURE_TM3 (5*32+20) /* Thermal Monitor 3 present */ +#define X86_FEATURE_TM3_EN (5*32+21) /* Thermal Monitor 3 enabled */ +#define X86_FEATURE_RNG2 (5*32+22) /* 2nd generation of RNG present */ +#define X86_FEATURE_RNG2_EN (5*32+23) /* 2nd generation of RNG enabled */ +#define X86_FEATURE_SEM (5*32+24) /* SME feature present */ +#define X86_FEATURE_PHE2 (5*32+25) /* SHA384 and SHA 512 present */ +#define X86_FEATURE_PHE2_EN (5*32+26) /* SHA384 and SHA 512 enabled */ +#define X86_FEATURE_XMODX (5*32+27) /* "rsa" XMODEXP and MONTMUL2 are present */ +#define X86_FEATURE_XMODX_EN (5*32+28) /* "rsa_en" XMODEXP and MONTMUL2 are enabled */ +#define X86_FEATURE_VEX (5*32+29) /* VEX instructions are present */ +#define X86_FEATURE_VEX_EN (5*32+30) /* VEX instructions are enabled */ +#define X86_FEATURE_STK (5*32+31) /* STK are present */ /* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */ #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ @@ -312,6 +333,7 @@ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ #define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ +#define X86_FEATURE_CRC32C_LOW_PERF (11*32+27) /* "" Low performance */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ @@ -440,6 +462,8 @@ #define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ #define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */ +/* HYGON 3rd CSV */ +#define X86_FEATURE_CSV3 (19*32 + 30) /* HYGON 3rd CSV */ /* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */ #define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */ @@ -452,6 +476,13 @@ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ #define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ +/* HYGON-defined CPU features, CPUID level 0x8c860000:0 (EDX), word 21 */ +#define X86_FEATURE_SM3 (21*32 + 1) /* SM3 instructions */ +#define X86_FEATURE_SM4 (21*32 + 2) /* SM4 instructions */ + +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 22 */ +#define X86_FEATURE_ZXPAUSE (22*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/arch/x86/include/asm/csv.h b/arch/x86/include/asm/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..30e8a89ce8c0951c5b02ba8ced5837a059506017 --- /dev/null +++ b/arch/x86/include/asm/csv.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#ifndef __ASM_X86_CSV_H__ +#define __ASM_X86_CSV_H__ + +#ifndef __ASSEMBLY__ + +struct csv_mem { + uint64_t start; + uint64_t size; +}; + +#ifdef CONFIG_HYGON_CSV + +#define CSV_MR_ALIGN_BITS (28) + +extern struct csv_mem *csv_smr; +extern unsigned int csv_smr_num; + +void __init early_csv_reserve_mem(void); + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align); +void csv_release_to_contiguous(phys_addr_t pa, size_t size); + +uint32_t csv_get_smr_entry_shift(void); + +bool csv3_active(void); + +void __init csv_early_reset_memory(struct boot_params *bp); +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages); +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages); + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc); + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc); + +#else /* !CONFIG_HYGON_CSV */ + +#define csv_smr NULL +#define csv_smr_num 0U + +static inline void __init early_csv_reserve_mem(void) { } + +static inline phys_addr_t +csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) { return 0; } +static inline void csv_release_to_contiguous(phys_addr_t pa, size_t size) { } + +static inline uint32_t csv_get_smr_entry_shift(void) { return 0; } + +static inline bool csv3_active(void) { return false; } + +static inline void __init csv_early_reset_memory(struct boot_params *bp) { } +static inline void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) { } +static inline void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) { } + +static inline void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, + bool enc) { } + +static inline void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) { } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_X86_CSV_H__ */ diff --git a/arch/x86/include/asm/delay.h b/arch/x86/include/asm/delay.h index 630891d2581989e4e2058a18c5790545f412d73b..4dbb3fea67fb510c051de76b86b08d322caf3521 100644 --- a/arch/x86/include/asm/delay.h +++ b/arch/x86/include/asm/delay.h @@ -7,6 +7,7 @@ void __init use_tsc_delay(void); void __init use_tpause_delay(void); +void __init use_zxpause_delay(void); void use_mwaitx_delay(void); #endif /* _ASM_X86_DELAY_H */ diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 702d93fdd10e8d44015cc687cb90106ae5bd422c..b108e656fa5b8bb5f489f3f0a1a3a10ecd38f438 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -143,6 +143,8 @@ #define DISABLED_MASK18 (DISABLE_IBT) #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK22 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h index ff5c7134a37aa11a6b9cfc08994cd58af76dbfbc..ba344b416ac5bb5d48c499b9d3e62ac47d84554c 100644 --- a/arch/x86/include/asm/kfence.h +++ b/arch/x86/include/asm/kfence.h @@ -19,11 +19,12 @@ #include /* Force 4K pages for __kfence_pool. */ -static inline bool arch_kfence_init_pool(void) +static inline bool arch_kfence_init_pool(struct kfence_pool_area *kpa) { + char *__kfence_pool = kpa->addr; unsigned long addr; - for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + for (addr = (unsigned long)__kfence_pool; is_kfence_address_area((void *)addr, kpa); addr += PAGE_SIZE) { unsigned int level; @@ -68,6 +69,51 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) return true; } +/* + * This function is used to recover TLB to 1G kernel mapping. + * The caller MUST make sure there're no other active kfence + * pools in this 1G area. + */ +static inline bool arch_kfence_free_pool(unsigned long addr) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud, new_pud, old_pud; + + addr = ALIGN_DOWN(addr, PUD_SIZE); + + pgd = pgd_offset_k(addr); + if (pgd_none(*pgd)) + return false; + + p4d = p4d_offset(pgd, addr); + if (p4d_none(*p4d)) + return false; + + if (p4d_large(*p4d) || !p4d_present(*p4d)) + return false; + + pud = pud_offset(p4d, addr); + if (pud_none(*pud)) + return false; + + if (pud_large(*pud) || !pud_present(*pud)) + return false; + + new_pud = pfn_pud((unsigned long)__phys_to_pfn(__pa(addr)), + __pgprot(__PAGE_KERNEL_LARGE)); + + old_pud = xchg(pud, new_pud); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + if (!pud_free_pmd_page(&old_pud, addr)) { + pr_warn("free old TLB error at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); + } + + return true; +} + #endif /* !MODULE */ #endif /* _ASM_X86_KFENCE_H */ diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 9b419f0de713cc720ad348598ac3c5a615426c58..0c540ac3872e26e428424e54552fd4ae5f8bf26e 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -135,6 +135,10 @@ KVM_X86_OP(msr_filter_changed) KVM_X86_OP(complete_emulated_msr) KVM_X86_OP(vcpu_deliver_sipi_vector) KVM_X86_OP_OPTIONAL_RET0(vcpu_get_apicv_inhibit_reasons); +KVM_X86_OP_OPTIONAL(get_untagged_addr) +KVM_X86_OP_OPTIONAL(vm_attestation) +KVM_X86_OP_OPTIONAL(control_pre_system_reset) +KVM_X86_OP_OPTIONAL(control_post_system_reset) #undef KVM_X86_OP #undef KVM_X86_OP_OPTIONAL diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index fb9f5fa96cc964d39041e2757773a21754dffc2c..c2442ffe305635fdca78cec0cfa47f4ce9fd9b79 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -125,7 +125,8 @@ | X86_CR4_PGE | X86_CR4_PCE | X86_CR4_OSFXSR | X86_CR4_PCIDE \ | X86_CR4_OSXSAVE | X86_CR4_SMEP | X86_CR4_FSGSBASE \ | X86_CR4_OSXMMEXCPT | X86_CR4_LA57 | X86_CR4_VMXE \ - | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP)) + | X86_CR4_SMAP | X86_CR4_PKE | X86_CR4_UMIP \ + | X86_CR4_LAM_SUP)) #define CR8_RESERVED_BITS (~(unsigned long)X86_CR8_TPR) @@ -1750,6 +1751,12 @@ struct kvm_x86_ops { * Returns vCPU specific APICv inhibit reasons */ unsigned long (*vcpu_get_apicv_inhibit_reasons)(struct kvm_vcpu *vcpu); + + gva_t (*get_untagged_addr)(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + + int (*vm_attestation)(struct kvm *kvm, unsigned long gpa, unsigned long len); + int (*control_pre_system_reset)(struct kvm *kvm); + int (*control_post_system_reset)(struct kvm *kvm); }; struct kvm_x86_nested_ops { @@ -2142,6 +2149,8 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event); int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, unsigned long ipi_bitmap_high, u32 min, unsigned long icr, int op_64_bit); +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, + gpa_t psp_ret_gpa, gpa_t table_gpa); int kvm_add_user_return_msr(u32 msr); int kvm_find_user_return_msr(u32 msr); diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index bbbe9d744977d0276d3c503aa29f9d51faba8fd2..695e569159c1d1fcbf469f8fe8ce1a0f366df996 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -23,6 +23,8 @@ static inline void load_ucode_ap(void) { } static inline void microcode_bsp_resume(void) { } #endif +extern unsigned long initrd_start_early; + #ifdef CONFIG_CPU_SUP_INTEL /* Intel specific microcode defines. Public for IFS */ struct microcode_header_intel { @@ -36,7 +38,8 @@ struct microcode_header_intel { unsigned int datasize; unsigned int totalsize; unsigned int metasize; - unsigned int reserved[2]; + unsigned int min_req_ver; + unsigned int reserved; }; struct microcode_intel { @@ -68,11 +71,19 @@ static inline u32 intel_get_microcode_revision(void) return rev; } +#endif /* !CONFIG_CPU_SUP_INTEL */ -void show_ucode_info_early(void); +bool microcode_nmi_handler(void); +void microcode_offline_nmi_handler(void); -#else /* CONFIG_CPU_SUP_INTEL */ -static inline void show_ucode_info_early(void) { } -#endif /* !CONFIG_CPU_SUP_INTEL */ +#ifdef CONFIG_MICROCODE_LATE_LOADING +DECLARE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static __always_inline bool microcode_nmi_handler_enabled(void) +{ + return static_branch_unlikely(µcode_nmi_handler_enable); +} +#else +static __always_inline bool microcode_nmi_handler_enabled(void) { return false; } +#endif #endif /* _ASM_X86_MICROCODE_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 389f9594746ef58903c59d31a88c2d1e56bd1e25..97f4dbfce6a867ff4bd4419079e6652430e6f743 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -222,6 +233,7 @@ #define MSR_INTEGRITY_CAPS_ARRAY_BIST BIT(MSR_INTEGRITY_CAPS_ARRAY_BIST_BIT) #define MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT 4 #define MSR_INTEGRITY_CAPS_PERIODIC_BIST BIT(MSR_INTEGRITY_CAPS_PERIODIC_BIST_BIT) +#define MSR_INTEGRITY_CAPS_SAF_GEN_MASK GENMASK_ULL(10, 9) #define MSR_LBR_NHM_FROM 0x00000680 #define MSR_LBR_NHM_TO 0x000006c0 @@ -236,6 +248,11 @@ #define LBR_INFO_CYCLES 0xffff #define LBR_INFO_BR_TYPE_OFFSET 56 #define LBR_INFO_BR_TYPE (0xfull << LBR_INFO_BR_TYPE_OFFSET) +#define LBR_INFO_BR_CNTR_OFFSET 32 +#define LBR_INFO_BR_CNTR_NUM 4 +#define LBR_INFO_BR_CNTR_BITS 2 +#define LBR_INFO_BR_CNTR_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_BITS - 1, 0) +#define LBR_INFO_BR_CNTR_FULL_MASK GENMASK_ULL(LBR_INFO_BR_CNTR_NUM * LBR_INFO_BR_CNTR_BITS - 1, 0) #define MSR_ARCH_LBR_CTL 0x000014ce #define ARCH_LBR_CTL_LBREN BIT(0) @@ -753,6 +770,13 @@ #define MSR_VIA_RNG 0x0000110b #define MSR_VIA_BCR2 0x00001147 +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Transmeta defined MSRs */ #define MSR_TMTA_LONGRUN_CTRL 0x80868010 #define MSR_TMTA_LONGRUN_FLAGS 0x80868011 diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 778df05f8539181fb5531d0a0ea368549f665967..7b370c227b0340ec2fb9359d55efebcd4dfc41bb 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h @@ -26,6 +26,8 @@ #define TPAUSE_C01_STATE 1 #define TPAUSE_C02_STATE 0 +#define ZXPAUSE_C01_STATE 1 + static __always_inline void __monitor(const void *eax, unsigned long ecx, unsigned long edx) { @@ -141,4 +143,17 @@ static inline void __tpause(u32 ecx, u32 edx, u32 eax) #endif } +/* + * Caller can specify whether to enter C0.1 (low latency, less + * power saving) or C0.2 state (saves more power, but longer wakeup + * latency). This may be overridden by the ZX_PAUSE_CONTROL MSR + * which can force requests for C0.2 to be downgraded to C0.1. + */ +static inline void __zxpause(u32 ecx, u32 edx, u32 eax) +{ + /* "zxpause %ecx, %edx, %eax;" */ + asm volatile(".byte 0xf2, 0x0f, 0xa6, 0xd0\t\n" + : + : "c"(ecx), "d"(edx), "a"(eax)); +} #endif /* _ASM_X86_MWAIT_H */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 85a9fd5a3ec33176ff3e2dd96aae915136f47864..9450594f1709e9e9ceee0ca18083c490c8373570 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -31,6 +31,7 @@ #define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22) #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL +#define ARCH_PERFMON_EVENTSEL_BR_CNTR (1ULL << 35) #define INTEL_FIXED_BITS_MASK 0xFULL #define INTEL_FIXED_BITS_STRIDE 4 @@ -60,6 +61,14 @@ #define INTEL_ARCH_EVENT_MASK \ (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT) +#define HYGON_L3_SLICE_SHIFT 28 +#define HYGON_L3_SLICE_MASK \ + (0xFULL << HYGON_L3_SLICE_SHIFT) + +#define HYGON_L3_THREAD_SHIFT 32 +#define HYGON_L3_THREAD_MASK \ + (0xFFFFFFFFULL << HYGON_L3_THREAD_SHIFT) + #define AMD64_L3_SLICE_SHIFT 48 #define AMD64_L3_SLICE_MASK \ (0xFULL << AMD64_L3_SLICE_SHIFT) @@ -216,6 +225,9 @@ union cpuid28_ecx { unsigned int lbr_timed_lbr:1; /* Branch Type Field Supported */ unsigned int lbr_br_type:1; + unsigned int reserved:13; + /* Branch counters (Event Logging) Supported */ + unsigned int lbr_counters:4; } split; unsigned int full; }; diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index 7ba1726b71c7b8bfc95888dc78508998bba263fe..76953f757f3c32d593f1c079eb69ce13961e7caf 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -99,6 +99,8 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/include/asm/resctrl.h b/arch/x86/include/asm/resctrl.h index 255a78d9d90672afb053875184d89b05bab52a0b..f159bddbec5101a2280fadae38e1d7b301ba48b0 100644 --- a/arch/x86/include/asm/resctrl.h +++ b/arch/x86/include/asm/resctrl.h @@ -4,8 +4,19 @@ #ifdef CONFIG_X86_CPU_RESCTRL -#include #include +#include +#include +#include + +/* + * This value can never be a valid CLOSID, and is used when mapping a + * (closid, rmid) pair to an index and back. On x86 only the RMID is + * needed. The index is a software defined value. + */ +#define X86_RESCTRL_EMPTY_CLOSID ((u32)~0) + +void resctrl_arch_reset_resources(void); /** * struct resctrl_pqr_state - State cache for the PQR MSR @@ -31,10 +42,68 @@ struct resctrl_pqr_state { DECLARE_PER_CPU(struct resctrl_pqr_state, pqr_state); +extern bool rdt_alloc_capable; +extern bool rdt_mon_capable; +extern unsigned int rdt_mon_features; + DECLARE_STATIC_KEY_FALSE(rdt_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); +static inline bool resctrl_arch_alloc_capable(void) +{ + return rdt_alloc_capable; +} + +static inline void resctrl_arch_enable_alloc(void) +{ + static_branch_enable_cpuslocked(&rdt_alloc_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_alloc(void) +{ + static_branch_disable_cpuslocked(&rdt_alloc_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + +static inline bool resctrl_arch_mon_capable(void) +{ + return rdt_mon_capable; +} + +static inline void resctrl_arch_enable_mon(void) +{ + static_branch_enable_cpuslocked(&rdt_mon_enable_key); + static_branch_inc_cpuslocked(&rdt_enable_key); +} + +static inline void resctrl_arch_disable_mon(void) +{ + static_branch_disable_cpuslocked(&rdt_mon_enable_key); + static_branch_dec_cpuslocked(&rdt_enable_key); +} + +static inline bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_total_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_local_enabled(void) +{ + return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); +} + +static inline bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return false; +} + /* * __resctrl_sched_in() - Writes the task's CLOSid/RMID to IA32_PQR_MSR * @@ -52,8 +121,8 @@ DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); static inline void __resctrl_sched_in(struct task_struct *tsk) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - u32 closid = state->default_closid; - u32 rmid = state->default_rmid; + u32 closid = READ_ONCE(state->default_closid); + u32 rmid = READ_ONCE(state->default_rmid); u32 tmp; /* @@ -88,17 +157,78 @@ static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) return val * scale; } -static inline void resctrl_sched_in(struct task_struct *tsk) +static inline void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, + u32 rmid) +{ + WRITE_ONCE(per_cpu(pqr_state.default_closid, cpu), closid); + WRITE_ONCE(per_cpu(pqr_state.default_rmid, cpu), rmid); +} + +static inline void resctrl_arch_set_closid_rmid(struct task_struct *tsk, + u32 closid, u32 rmid) +{ + WRITE_ONCE(tsk->closid, closid); + WRITE_ONCE(tsk->rmid, rmid); +} + +static inline bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + return READ_ONCE(tsk->closid) == closid; +} + +static inline bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 ignored, + u32 rmid) +{ + return READ_ONCE(tsk->rmid) == rmid; +} + +static inline void resctrl_arch_sched_in(struct task_struct *tsk) { if (static_branch_likely(&rdt_enable_key)) __resctrl_sched_in(tsk); } +static inline u32 resctrl_arch_system_num_rmid_idx(void) +{ + /* RMID are independent numbers for x86. num_rmid_idx == num_rmid */ + return boot_cpu_data.x86_cache_max_rmid + 1; +} + +static inline void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + *rmid = idx; + *closid = X86_RESCTRL_EMPTY_CLOSID; +} + +static inline u32 resctrl_arch_rmid_idx_encode(u32 ignored, u32 rmid) +{ + return rmid; +} + +/* x86 can always read an rmid, nothing needs allocating */ +struct rdt_resource; +static inline void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + might_sleep(); + return NULL; +}; + +static inline void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *ctx) { }; + +u64 resctrl_arch_get_prefetch_disable_bits(void); +int resctrl_arch_pseudo_lock_fn(void *_plr); +int resctrl_arch_measure_cycles_lat_fn(void *_plr); +int resctrl_arch_measure_l2_residency(void *_plr); +int resctrl_arch_measure_l3_residency(void *_plr); void resctrl_cpu_detect(struct cpuinfo_x86 *c); +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); + #else -static inline void resctrl_sched_in(struct task_struct *tsk) {} +static inline void resctrl_arch_sched_in(struct task_struct *tsk) {} static inline void resctrl_cpu_detect(struct cpuinfo_x86 *c) {} #endif /* CONFIG_X86_CPU_RESCTRL */ diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h index f3495623ac997222fe62a9b571c656b6fcbc7fbb..5c83729c8e71ff5a287095211829cf197365a728 100644 --- a/arch/x86/include/asm/setup.h +++ b/arch/x86/include/asm/setup.h @@ -31,8 +31,6 @@ #include #include -extern u64 relocated_ramdisk; - /* Interrupt control for vSMPowered x86_64 systems */ #ifdef CONFIG_X86_64 void vsmp_init(void); @@ -126,6 +124,7 @@ void clear_bss(void); #ifdef __i386__ asmlinkage void __init __noreturn i386_start_kernel(void); +void __init mk_early_pgtbl_32(void); #else asmlinkage void __init __noreturn x86_64_start_kernel(char *real_mode); diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index 3ac0ffc4f3e202b18fab10ee6ecacb6b1ea11a91..24b6a7e60f33839619edc453c329a092d4e5c8fb 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -680,4 +680,24 @@ DEFINE_GHCB_ACCESSORS(sw_exit_info_2) DEFINE_GHCB_ACCESSORS(sw_scratch) DEFINE_GHCB_ACCESSORS(xcr0) +/* same to the ring buffer max num */ +#define SVM_RING_BUFFER_MAX 4094 + +struct csv_ringbuf_info_item { + struct page **pages; + uintptr_t hdr_vaddr; + uintptr_t trans_vaddr; + uintptr_t data_vaddr; + uintptr_t trans_uaddr; + uintptr_t hdr_uaddr; + unsigned long trans_len; + unsigned long hdr_len; + unsigned long n; +}; + +struct csv_ringbuf_infos { + struct csv_ringbuf_info_item *item[SVM_RING_BUFFER_MAX]; + int num; +}; + #endif diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5c367c1290c355fb3849800a38c88c3553175903..fd56282ee9a8da6172fc48105f0fd3b789674e0a 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -497,6 +497,7 @@ copy_mc_to_kernel(void *to, const void *from, unsigned len); unsigned long __must_check copy_mc_to_user(void __user *to, const void *from, unsigned len); +#define copy_mc_to_user copy_mc_to_user #endif /* diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 0e73616b82f3469f47877e0cc56ca6b79f034173..3a4f60f19de3efe789e0424bb9b4485c45d7d979 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -84,6 +84,11 @@ */ #define TERTIARY_EXEC_IPI_VIRT VMCS_CONTROL_BIT(IPI_VIRT) +/* + * Definitions of Zhaoxin Tertiary Processor-Based VM-Execution Controls. + */ +#define ZX_TERTIARY_EXEC_GUEST_ZXPAUSE VMCS_CONTROL_BIT(GUEST_ZXPAUSE) + #define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING) #define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING) #define PIN_BASED_VIRTUAL_NMIS VMCS_CONTROL_BIT(VIRTUAL_NMIS) @@ -235,6 +240,7 @@ enum vmcs_field { TERTIARY_VM_EXEC_CONTROL_HIGH = 0x00002035, PID_POINTER_TABLE = 0x00002042, PID_POINTER_TABLE_HIGH = 0x00002043, + ZXPAUSE_VMEXIT_TSC = 0x00002200, GUEST_PHYSICAL_ADDRESS = 0x00002400, GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401, VMCS_LINK_POINTER = 0x00002800, @@ -284,6 +290,7 @@ enum vmcs_field { PLE_GAP = 0x00004020, PLE_WINDOW = 0x00004022, NOTIFY_WINDOW = 0x00004024, + ZX_TERTIARY_VM_EXEC_CONTROL = 0x00004200, VM_INSTRUCTION_ERROR = 0x00004400, VM_EXIT_REASON = 0x00004402, VM_EXIT_INTR_INFO = 0x00004404, diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h index c6a7eed039145be3964db90a6cac559e45d87040..ba209bdf57d9dc7c59b4cc8cb87432d0b674c708 100644 --- a/arch/x86/include/asm/vmxfeatures.h +++ b/arch/x86/include/asm/vmxfeatures.h @@ -5,7 +5,7 @@ /* * Defines VMX CPU feature bits */ -#define NVMXINTS 5 /* N 32-bit words worth of info */ +#define NVMXINTS 6 /* N 32-bit words worth of info */ /* * Note: If the comment begins with a quoted string, that string is used @@ -89,4 +89,8 @@ /* Tertiary Processor-Based VM-Execution Controls, word 3 */ #define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */ + +/* Zhaoxin Tertiary Processor-Based VM-Execution Controls, word 4 */ +#define VMX_FEATURE_GUEST_ZXPAUSE (4*32 + 0) /* zxpause instruction in guest mode */ + #endif /* _ASM_X86_VMXFEATURES_H */ diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 3269a0e23d3ab86752380175352106085bde640a..2b433325ca8fc31517455af10e40a31c12dfb220 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -16,6 +16,7 @@ CFLAGS_REMOVE_kvmclock.o = -pg CFLAGS_REMOVE_ftrace.o = -pg CFLAGS_REMOVE_early_printk.o = -pg CFLAGS_REMOVE_head64.o = -pg +CFLAGS_REMOVE_head32.o = -pg CFLAGS_REMOVE_sev.o = -pg CFLAGS_REMOVE_rethook.o = -pg endif @@ -158,4 +159,7 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o + obj-$(CONFIG_PCI) += zhaoxin_kh40000.o endif + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c index 401808b47af3edd6bf06ab9f83b846c5a939ae2d..90f22148acc798aee67fcd5a0f3c9713ed55186f 100644 --- a/arch/x86/kernel/acpi/cstate.c +++ b/arch/x86/kernel/acpi/cstate.c @@ -221,7 +221,9 @@ static int __init ffh_cstate_init(void) if (c->x86_vendor != X86_VENDOR_INTEL && c->x86_vendor != X86_VENDOR_AMD && - c->x86_vendor != X86_VENDOR_HYGON) + c->x86_vendor != X86_VENDOR_HYGON && + c->x86_vendor != X86_VENDOR_CENTAUR && + c->x86_vendor != X86_VENDOR_ZHAOXIN) return -1; cpu_cstate_entry = alloc_percpu(struct cstate_entry); diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index cab4d8b1535d61371e785a7cd551f299cb663232..9f759c7bf0370b3a86c0ba52acf55f43bdd696f7 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c @@ -44,10 +44,17 @@ #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F4 0x12c4 #define PCI_DEVICE_ID_AMD_MI200_DF_F4 0x14d4 +#define PCI_DEVICE_ID_HYGON_18H_M05H_ROOT 0x14a0 +#define PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1 0x1491 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1 0x14b1 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4 0x14b4 +#define PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5 0x14b5 + /* Protect the PCI config register pairs used for SMN. */ static DEFINE_MUTEX(smn_mutex); static u32 *flush_words; +static u16 nb_num; static const struct pci_device_id amd_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) }, @@ -123,16 +130,22 @@ static const struct pci_device_id amd_nb_link_ids[] = { static const struct pci_device_id hygon_root_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_ROOT) }, {} }; static const struct pci_device_id hygon_nb_misc_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; static const struct pci_device_id hygon_nb_link_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) }, + { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F4) }, {} }; @@ -219,6 +232,226 @@ int amd_smn_write(u16 node, u32 address, u32 value) } EXPORT_SYMBOL_GPL(amd_smn_write); +bool hygon_f18h_m4h(void) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(hygon_f18h_m4h); + +u16 hygon_nb_num(void) +{ + return nb_num; +} +EXPORT_SYMBOL_GPL(hygon_nb_num); + +static int get_df_register(struct pci_dev *misc, u8 func, int offset, u32 *value) +{ + struct pci_dev *df_func = NULL; + u32 device; + int err; + + if (func == 1) { + switch (boot_cpu_data.x86_model) { + case 0x4: + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x5: + if (misc->device == PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + else + device = PCI_DEVICE_ID_HYGON_18H_M04H_DF_F1; + break; + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M05H_DF_F1; + break; + default: + return -ENODEV; + } + } else if (func == 5) { + switch (boot_cpu_data.x86_model) { + case 0x6: + device = PCI_DEVICE_ID_HYGON_18H_M06H_DF_F5; + break; + default: + return -ENODEV; + } + } else { + return -ENODEV; + } + + while ((df_func = pci_get_device(misc->vendor, device, df_func))) + if (pci_domain_nr(df_func->bus) == pci_domain_nr(misc->bus) && + df_func->bus->number == misc->bus->number && + PCI_SLOT(df_func->devfn) == PCI_SLOT(misc->devfn)) + break; + + if (!df_func) { + pr_warn("Error getting DF F%d device.\n", func); + return -ENODEV; + } + + err = pci_read_config_dword(df_func, offset, value); + if (err) + pr_warn("Error reading DF F%d register.\n", func); + + return err; +} + +int get_df_id(struct pci_dev *misc, u8 *id) +{ + u32 value; + int ret; + + if (boot_cpu_data.x86_model == 0x6) { + /* F5x180[19:16]: DF ID */ + ret = get_df_register(misc, 5, 0x180, &value); + *id = (value >> 16) & 0xf; + } else { + /* F1x200[23:20]: DF ID */ + ret = get_df_register(misc, 1, 0x200, &value); + *id = (value >> 20) & 0xf; + } + + return ret; +} +EXPORT_SYMBOL_GPL(get_df_id); + +static u8 get_socket_num(struct pci_dev *misc) +{ + u32 value; + int ret; + + /* F1x200[7:0]: Which socket is present. */ + ret = get_df_register(misc, 1, 0x200, &value); + + return ret ? 0 : hweight8(value & 0xff); +} + +static int northbridge_init_f18h_m4h(const struct pci_device_id *root_ids, + const struct pci_device_id *misc_ids, + const struct pci_device_id *link_ids) +{ + struct pci_dev *root, *misc, *link; + struct pci_dev *root_first = NULL; + struct amd_northbridge *nb; + u16 roots_per_socket = 0; + u16 miscs_per_socket = 0; + u16 socket_num = 0; + u16 root_count = 0; + u16 misc_count = 0; + int err = -ENODEV; + u8 i, j, m, n; + u8 id; + + pr_info("Hygon Fam%xh Model%xh NB driver.\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); + + misc = next_northbridge(NULL, misc_ids); + if (misc != NULL) { + socket_num = get_socket_num(misc); + pr_info("Socket number: %d\n", socket_num); + if (!socket_num) { + err = -ENODEV; + goto ret; + } + } else { + err = -ENODEV; + goto ret; + } + + misc = NULL; + while ((misc = next_northbridge(misc, misc_ids)) != NULL) + misc_count++; + + root = NULL; + while ((root = next_northbridge(root, root_ids)) != NULL) + root_count++; + + if (!root_count || !misc_count) { + err = -ENODEV; + goto ret; + } + + /* + * There should be _exactly_ N roots for each DF/SMN + * interface, and M DF/SMN interfaces in one socket. + */ + roots_per_socket = root_count / socket_num; + miscs_per_socket = misc_count / socket_num; + + if (!roots_per_socket || !miscs_per_socket) { + err = -ENODEV; + goto ret; + } + + nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL); + if (!nb) { + err = -ENOMEM; + goto ret; + } + + amd_northbridges.nb = nb; + amd_northbridges.num = misc_count; + + link = misc = root = NULL; + j = m = n = 0; + for (i = 0; i < amd_northbridges.num; i++) { + misc = next_northbridge(misc, misc_ids); + link = next_northbridge(link, link_ids); + + /* Only save the first PCI root device for each socket. */ + if (!(i % miscs_per_socket)) { + root_first = next_northbridge(root, root_ids); + root = root_first; + j = 1; + } + + if (get_df_id(misc, &id)) { + err = -ENODEV; + goto err; + } + pr_info("DF ID: %d\n", id); + + if (id < 4) { + /* Add the devices with id<4 from the tail. */ + node_to_amd_nb(misc_count - m - 1)->misc = misc; + node_to_amd_nb(misc_count - m - 1)->link = link; + node_to_amd_nb(misc_count - m - 1)->root = root_first; + m++; + } else { + node_to_amd_nb(n)->misc = misc; + node_to_amd_nb(n)->link = link; + node_to_amd_nb(n)->root = root_first; + n++; + } + + /* Skip the redundant PCI root devices per socket. */ + while (j < roots_per_socket) { + root = next_northbridge(root, root_ids); + j++; + } + } + nb_num = n; + + return 0; + +err: + kfree(nb); + amd_northbridges.nb = NULL; + +ret: + pr_err("Hygon Fam%xh Model%xh northbridge init failed(%d)!\n", + boot_cpu_data.x86, boot_cpu_data.x86_model, err); + return err; +} static int amd_cache_northbridges(void) { @@ -239,6 +472,11 @@ static int amd_cache_northbridges(void) root_ids = hygon_root_ids; misc_ids = hygon_nb_misc_ids; link_ids = hygon_nb_link_ids; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) + return northbridge_init_f18h_m4h(root_ids, + misc_ids, link_ids); } misc = NULL; diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c index 032a84e2c3ccc7e345ed0c05193965549989510f..cd16228611ce8fcd356e281c1003391963674049 100644 --- a/arch/x86/kernel/apic/apic_flat_64.c +++ b/arch/x86/kernel/apic/apic_flat_64.c @@ -103,6 +103,7 @@ static struct apic apic_flat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, @@ -175,6 +176,7 @@ static struct apic apic_physflat __ro_after_init = { .send_IPI_allbutself = default_send_IPI_allbutself, .send_IPI_all = default_send_IPI_all, .send_IPI_self = default_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_mem_read, .write = native_apic_mem_write, diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c index a44ba7209ef3a854e36444af9ad53c7314772fbb..edad86f32e38cb5fb36656494f4d2472d8ab7030 100644 --- a/arch/x86/kernel/apic/ipi.c +++ b/arch/x86/kernel/apic/ipi.c @@ -97,6 +97,14 @@ void native_send_call_func_ipi(const struct cpumask *mask) __apic_send_IPI_mask(mask, CALL_FUNCTION_VECTOR); } +void apic_send_nmi_to_offline_cpu(unsigned int cpu) +{ + if (WARN_ON_ONCE(!apic->nmi_to_offline_cpu)) + return; + if (WARN_ON_ONCE(!cpumask_test_cpu(cpu, &cpus_booted_once_mask))) + return; + apic->send_IPI(cpu, NMI_VECTOR); +} #endif /* CONFIG_SMP */ static inline int __prepare_ICR2(unsigned int mask) diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index affbff65e49713d80e88f4097e83b031494c369d..a8306089c91bca12277b904e1b79ad38f1b85122 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -251,6 +251,7 @@ static struct apic apic_x2apic_cluster __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 788cdb4ee394dae662e4618644e8e240b3666429..c8ac1b12b8ac6c2f8e68cecbc61237e30195babc 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -166,6 +166,7 @@ static struct apic apic_x2apic_phys __ro_after_init = { .send_IPI_allbutself = x2apic_send_IPI_allbutself, .send_IPI_all = x2apic_send_IPI_all, .send_IPI_self = x2apic_send_IPI_self, + .nmi_to_offline_cpu = true, .read = native_apic_msr_read, .write = native_apic_msr_write, diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 4350f6bfc0641ea7a6877cd9a224055a456753bd..dec6b0d9e711ba3e0cfd690411d3bff45edcd67f 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -25,6 +25,7 @@ obj-y += bugs.o obj-y += aperfmperf.o obj-y += cpuid-deps.o obj-y += umwait.o +obj-$(CONFIG_CPU_SUP_ZHAOXIN) += zxpause.o obj-$(CONFIG_PROC_FS) += proc.o obj-y += capflags.o powerflags.o diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c index 8f86eacf69f7c965a92996ddf669c02b2d62b751..7c4ce361c728cc53ae0a8e98eb97ab1ca1cc21db 100644 --- a/arch/x86/kernel/cpu/cacheinfo.c +++ b/arch/x86/kernel/cpu/cacheinfo.c @@ -708,11 +708,30 @@ void cacheinfo_hygon_init_llc_id(struct cpuinfo_x86 *c, int cpu) if (!cpuid_edx(0x80000006)) return; - /* - * LLC is at the core complex level. - * Core complex ID is ApicId[3] for these processors. - */ - per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + if (c->x86_model < 0x5) { + /* + * LLC is at the core complex level. + * Core complex ID is ApicId[3] for these processors. + */ + per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; + } else { + /* + * LLC ID is calculated from the number of threads + * sharing the cache. + */ + u32 eax, ebx, ecx, edx, num_sharing_cache = 0; + u32 llc_index = find_num_cache_leaves(c) - 1; + + cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx); + if (eax) + num_sharing_cache = ((eax >> 14) & 0xfff) + 1; + + if (num_sharing_cache) { + int bits = get_count_order(num_sharing_cache); + + per_cpu(cpu_llc_id, cpu) = c->apicid >> bits; + } + } } void init_amd_cacheinfo(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 345f7d905db677291f7f8eb9b33b692263afe447..b15bcf21ac7b32704e21ffb79fba0b2a2dfe7f43 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -109,6 +109,19 @@ static void early_init_centaur(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); } + + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_centaur(struct cpuinfo_x86 *c) @@ -127,11 +140,14 @@ static void init_centaur(struct cpuinfo_x86 *c) clear_cpu_cap(c, 0*32+31); #endif early_init_centaur(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 4e5ffc8b0e469d142c28667359a1f4719f942fd7..bd631f1320ce6de243beae966d6465d6187cc8f0 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -2166,8 +2166,6 @@ static inline void setup_getcpu(int cpu) } #ifdef CONFIG_X86_64 -static inline void ucode_cpu_init(int cpu) { } - static inline void tss_setup_ist(struct tss_struct *tss) { /* Set up the per-CPU TSS IST stacks */ @@ -2178,16 +2176,8 @@ static inline void tss_setup_ist(struct tss_struct *tss) /* Only mapped when SEV-ES is active */ tss->x86_tss.ist[IST_INDEX_VC] = __this_cpu_ist_top_va(VC); } - #else /* CONFIG_X86_64 */ - -static inline void ucode_cpu_init(int cpu) -{ - show_ucode_info_early(); -} - static inline void tss_setup_ist(struct tss_struct *tss) { } - #endif /* !CONFIG_X86_64 */ static inline void tss_setup_io_bitmap(struct tss_struct *tss) @@ -2243,8 +2233,6 @@ void cpu_init(void) struct task_struct *cur = current; int cpu = raw_smp_processor_id(); - ucode_cpu_init(cpu); - #ifdef CONFIG_NUMA if (this_cpu_read(numa_node) == 0 && early_cpu_to_node(cpu) != NUMA_NO_NODE) diff --git a/arch/x86/kernel/cpu/cpuid-deps.c b/arch/x86/kernel/cpu/cpuid-deps.c index e462c1d3800a6cb47c7d486a2a684278fd4cca1e..4f559eb4952577eaedb252fa57136f877b406a23 100644 --- a/arch/x86/kernel/cpu/cpuid-deps.c +++ b/arch/x86/kernel/cpu/cpuid-deps.c @@ -82,6 +82,7 @@ static const struct cpuid_dep cpuid_deps[] = { { X86_FEATURE_XFD, X86_FEATURE_XGETBV1 }, { X86_FEATURE_AMX_TILE, X86_FEATURE_XFD }, { X86_FEATURE_SHSTK, X86_FEATURE_XSAVES }, + { X86_FEATURE_CRC32C_LOW_PERF, X86_FEATURE_XMM4_2 }, {} }; diff --git a/arch/x86/kernel/cpu/feat_ctl.c b/arch/x86/kernel/cpu/feat_ctl.c index 03851240c3e36d4ed5e9ad250eee76410830d6e9..3e0fbf510f1c6db7aee1e4fcacf6c3269174d415 100644 --- a/arch/x86/kernel/cpu/feat_ctl.c +++ b/arch/x86/kernel/cpu/feat_ctl.c @@ -17,6 +17,7 @@ enum vmx_feature_leafs { SECONDARY_CTLS, TERTIARY_CTLS_LOW, TERTIARY_CTLS_HIGH, + ZX_TERTIARY_CTLS, NR_VMX_FEATURE_WORDS, }; @@ -97,6 +98,15 @@ static void init_vmx_capabilities(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_EPT_AD); if (c->vmx_capability[MISC_FEATURES] & VMX_F(VPID)) set_cpu_cap(c, X86_FEATURE_VPID); + /* + * Initialize Zhaoxin Tertiary Exec Control feature flags. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &supported, &ign); + if (supported & MSR_ZX_VMCS_EXEC_CTL3) + c->vmx_capability[ZX_TERTIARY_CTLS] |= VMX_F(GUEST_ZXPAUSE); + } } #endif /* CONFIG_X86_VMX_FEATURE_NAMES */ diff --git a/arch/x86/kernel/cpu/hygon.c b/arch/x86/kernel/cpu/hygon.c index a7b3ef4c4de91e500e1aad56d6337e893f73999d..69d14232a1fc81336ed6f9a6d6d7069e498fad72 100644 --- a/arch/x86/kernel/cpu/hygon.c +++ b/arch/x86/kernel/cpu/hygon.c @@ -15,6 +15,7 @@ #include #include #include +#include #include "cpu.h" @@ -80,12 +81,14 @@ static void hygon_get_topology(struct cpuinfo_x86 *c) c->x86_max_cores /= smp_num_siblings; /* - * In case leaf B is available, use it to derive + * From model 0x4, leaf B is available, so use it to derive * topology information. */ err = detect_extended_topology(c); - if (!err) + if (!err) { c->x86_coreid_bits = get_count_order(c->x86_max_cores); + __max_die_per_package = nodes_per_socket; + } /* * Socket ID is ApicId[6] for the processors with model <= 0x3 @@ -240,6 +243,68 @@ static void bsp_init_hygon(struct cpuinfo_x86 *c) x86_amd_ls_cfg_ssbd_mask = 1ULL << 10; } } + resctrl_cpu_detect(c); +} + +static void init_hygon_cap(struct cpuinfo_x86 *c) +{ + /* Test for Extended Feature Flags presence */ + if (cpuid_eax(0x8C860000) >= 0x8C860000) { + /* + * Store Extended Feature Flags of the CPU capability + * bit array + */ + c->x86_capability[CPUID_8C86_0000_EDX] = cpuid_edx(0x8C860000); + } +} + +static void early_detect_mem_encrypt(struct cpuinfo_x86 *c) +{ + u64 msr; + u32 eax; + + eax = cpuid_eax(0x8000001f); + + /* Check whether SME or CSV is supported */ + if (!(eax & (BIT(0) | BIT(1)))) + return; + + /* If BIOS has not enabled SME then don't advertise the SME feature. */ + rdmsrl(MSR_AMD64_SYSCFG, msr); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + goto clear_all; + + /* + * Always adjust physical address bits. Even though this will be a + * value above 32-bits this is still done for CONFIG_X86_32 so that + * accurate values are reported. + */ + c->x86_phys_bits -= (cpuid_ebx(0x8000001f) >> 6) & 0x3f; + + /* Don't advertise SME and CSV features under CONFIG_X86_32. */ + if (IS_ENABLED(CONFIG_X86_32)) + goto clear_all; + + /* Clear the SME feature flag if the kernel is not using it. */ + if (!sme_me_mask) + setup_clear_cpu_cap(X86_FEATURE_SME); + + /* + * If BIOS has not enabled CSV then don't advertise the CSV and CSV2 + * feature. + */ + rdmsrl(MSR_K7_HWCR, msr); + if (!(msr & MSR_K7_HWCR_SMMLOCK)) + goto clear_csv; + + return; + +clear_all: + setup_clear_cpu_cap(X86_FEATURE_SME); +clear_csv: + setup_clear_cpu_cap(X86_FEATURE_SEV); + setup_clear_cpu_cap(X86_FEATURE_SEV_ES); + setup_clear_cpu_cap(X86_FEATURE_CSV3); } static void early_init_hygon(struct cpuinfo_x86 *c) @@ -290,6 +355,8 @@ static void early_init_hygon(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_VMMCALL); hygon_get_topology_early(c); + + early_detect_mem_encrypt(c); } static void init_hygon(struct cpuinfo_x86 *c) @@ -348,6 +415,8 @@ static void init_hygon(struct cpuinfo_x86 *c) set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS); check_null_seg_clears_base(c); + + init_hygon_cap(c); } static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/mce/core.c b/arch/x86/kernel/cpu/mce/core.c index 6f35f724cc142e44f3a3a2adbfbaa7aefc6c57b4..04ed23fb54e1d001cb7e27c3bdd2f672cdb9830e 100644 --- a/arch/x86/kernel/cpu/mce/core.c +++ b/arch/x86/kernel/cpu/mce/core.c @@ -466,7 +466,8 @@ int mce_usable_address(struct mce *m) /* Checks after this one are Intel/Zhaoxin-specific: */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 1; if (!(m->status & MCI_STATUS_MISCV)) @@ -490,6 +491,7 @@ bool mce_is_memory_error(struct mce *m) return amd_mce_is_memory_error(m); case X86_VENDOR_INTEL: + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: /* * Intel SDM Volume 3B - 15.9.2 Compound Error Codes @@ -1215,7 +1217,8 @@ static noinstr bool mce_check_crashing_cpu(void) mcgstatus = __rdmsr(MSR_IA32_MCG_STATUS); - if (boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { if (mcgstatus & MCG_STATUS_LMCES) return false; } @@ -1489,7 +1492,8 @@ noinstr void do_machine_check(struct pt_regs *regs) * on Intel, Zhaoxin only. */ if (m.cpuvendor == X86_VENDOR_INTEL || - m.cpuvendor == X86_VENDOR_ZHAOXIN) + m.cpuvendor == X86_VENDOR_CENTAUR || + m.cpuvendor == X86_VENDOR_ZHAOXIN) lmce = m.mcgstatus & MCG_STATUS_LMCES; /* @@ -1916,7 +1920,8 @@ static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) mce_flags.skx_repmov_quirk = 1; } - if (c->x86_vendor == X86_VENDOR_ZHAOXIN) { + if (c->x86_vendor == X86_VENDOR_CENTAUR || + c->x86_vendor == X86_VENDOR_ZHAOXIN) { /* * All newer Zhaoxin CPUs support MCE broadcasting. Enable * synchronization with a one second timeout. @@ -1969,21 +1974,6 @@ static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c) } } -static void mce_centaur_feature_init(struct cpuinfo_x86 *c) -{ - struct mca_config *cfg = &mca_cfg; - - /* - * All newer Centaur CPUs support MCE broadcasting. Enable - * synchronization with a one second timeout. - */ - if ((c->x86 == 6 && c->x86_model == 0xf && c->x86_stepping >= 0xe) || - c->x86 > 6) { - if (cfg->monarch_timeout < 0) - cfg->monarch_timeout = USEC_PER_SEC; - } -} - static void mce_zhaoxin_feature_init(struct cpuinfo_x86 *c) { struct mce_bank *mce_banks = this_cpu_ptr(mce_banks_array); @@ -2031,9 +2021,6 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) break; case X86_VENDOR_CENTAUR: - mce_centaur_feature_init(c); - break; - case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_init(c); break; @@ -2050,6 +2037,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) mce_intel_feature_clear(c); break; + case X86_VENDOR_CENTAUR: case X86_VENDOR_ZHAOXIN: mce_zhaoxin_feature_clear(c); break; @@ -2333,9 +2321,10 @@ static void vendor_disable_error_reporting(void) * controller (iMC), etc. */ if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL || - boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || - boot_cpu_data.x86_vendor == X86_VENDOR_AMD || - boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON || + boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) return; mce_disable_error_reporting(); diff --git a/arch/x86/kernel/cpu/mce/intel.c b/arch/x86/kernel/cpu/mce/intel.c index f5323551c1a9a9aab0a87de057db19f6e1819b94..e013dd5162fcbd86e48276be6931a54d27095743 100644 --- a/arch/x86/kernel/cpu/mce/intel.c +++ b/arch/x86/kernel/cpu/mce/intel.c @@ -93,7 +93,8 @@ static int cmci_supported(int *banks) * makes sure none of the backdoors are entered otherwise. */ if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && - boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) return 0; if (!boot_cpu_has(X86_FEATURE_APIC) || lapic_get_maxlvt() < 6) diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c index bbd1dc38ea0316c9ce22c042b4d1e235a3760571..2ba4f7dd445a7586d8abc48bdc61b89359ed5216 100644 --- a/arch/x86/kernel/cpu/microcode/amd.c +++ b/arch/x86/kernel/cpu/microcode/amd.c @@ -37,6 +37,16 @@ #include "internal.h" +struct ucode_patch { + struct list_head plist; + void *data; + unsigned int size; + u32 patch_id; + u16 equiv_cpu; +}; + +static LIST_HEAD(microcode_cache); + #define UCODE_MAGIC 0x00414d44 #define UCODE_EQUIV_CPU_TABLE_TYPE 0x00000000 #define UCODE_UCODE_TYPE 0x00000001 @@ -121,24 +131,20 @@ static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig) /* * Check whether there is a valid microcode container file at the beginning - * of @buf of size @buf_size. Set @early to use this function in the early path. + * of @buf of size @buf_size. */ -static bool verify_container(const u8 *buf, size_t buf_size, bool early) +static bool verify_container(const u8 *buf, size_t buf_size) { u32 cont_magic; if (buf_size <= CONTAINER_HDR_SZ) { - if (!early) - pr_debug("Truncated microcode container header.\n"); - + pr_debug("Truncated microcode container header.\n"); return false; } cont_magic = *(const u32 *)buf; if (cont_magic != UCODE_MAGIC) { - if (!early) - pr_debug("Invalid magic value (0x%08x).\n", cont_magic); - + pr_debug("Invalid magic value (0x%08x).\n", cont_magic); return false; } @@ -147,23 +153,20 @@ static bool verify_container(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated CPU equivalence table at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. */ -static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) +static bool verify_equivalence_table(const u8 *buf, size_t buf_size) { const u32 *hdr = (const u32 *)buf; u32 cont_type, equiv_tbl_len; - if (!verify_container(buf, buf_size, early)) + if (!verify_container(buf, buf_size)) return false; cont_type = hdr[1]; if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) { - if (!early) - pr_debug("Wrong microcode container equivalence table type: %u.\n", - cont_type); - + pr_debug("Wrong microcode container equivalence table type: %u.\n", + cont_type); return false; } @@ -172,9 +175,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) equiv_tbl_len = hdr[2]; if (equiv_tbl_len < sizeof(struct equiv_cpu_entry) || buf_size < equiv_tbl_len) { - if (!early) - pr_debug("Truncated equivalence table.\n"); - + pr_debug("Truncated equivalence table.\n"); return false; } @@ -183,22 +184,19 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size, bool early) /* * Check whether there is a valid, non-truncated microcode patch section at the - * beginning of @buf of size @buf_size. Set @early to use this function in the - * early path. + * beginning of @buf of size @buf_size. * * On success, @sh_psize returns the patch size according to the section header, * to the caller. */ static bool -__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early) +__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize) { u32 p_type, p_size; const u32 *hdr; if (buf_size < SECTION_HDR_SIZE) { - if (!early) - pr_debug("Truncated patch section.\n"); - + pr_debug("Truncated patch section.\n"); return false; } @@ -207,17 +205,13 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize, bool early p_size = hdr[1]; if (p_type != UCODE_UCODE_TYPE) { - if (!early) - pr_debug("Invalid type field (0x%x) in container file section header.\n", - p_type); - + pr_debug("Invalid type field (0x%x) in container file section header.\n", + p_type); return false; } if (p_size < sizeof(struct microcode_header_amd)) { - if (!early) - pr_debug("Patch of size %u too short.\n", p_size); - + pr_debug("Patch of size %u too short.\n", p_size); return false; } @@ -269,7 +263,7 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size * 0: success */ static int -verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool early) +verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size) { struct microcode_header_amd *mc_hdr; unsigned int ret; @@ -277,7 +271,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea u16 proc_id; u8 patch_fam; - if (!__verify_patch_section(buf, buf_size, &sh_psize, early)) + if (!__verify_patch_section(buf, buf_size, &sh_psize)) return -1; /* @@ -292,16 +286,13 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea * size sh_psize, as the section claims. */ if (buf_size < sh_psize) { - if (!early) - pr_debug("Patch of size %u truncated.\n", sh_psize); - + pr_debug("Patch of size %u truncated.\n", sh_psize); return -1; } ret = __verify_patch_size(family, sh_psize, buf_size); if (!ret) { - if (!early) - pr_debug("Per-family patch size mismatch.\n"); + pr_debug("Per-family patch size mismatch.\n"); return -1; } @@ -309,8 +300,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size, bool ea mc_hdr = (struct microcode_header_amd *)(buf + SECTION_HDR_SIZE); if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { - if (!early) - pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); + pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n", mc_hdr->patch_id); return -1; } @@ -337,7 +327,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u16 eq_id; u8 *buf; - if (!verify_equivalence_table(ucode, size, true)) + if (!verify_equivalence_table(ucode, size)) return 0; buf = ucode; @@ -364,7 +354,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc) u32 patch_size; int ret; - ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size, true); + ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size); if (ret < 0) { /* * Patch verification failed, skip to the next container, if @@ -456,14 +446,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) { struct cont_desc desc = { 0 }; struct microcode_amd *mc; - u32 rev, dummy, *new_rev; bool ret = false; - -#ifdef CONFIG_X86_32 - new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); -#else - new_rev = &ucode_new_rev; -#endif + u32 rev, dummy; desc.cpuid_1_eax = cpuid_1_eax; @@ -484,8 +468,8 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) return ret; if (!__apply_microcode_amd(mc)) { - *new_rev = mc->hdr.patch_id; - ret = true; + ucode_new_rev = mc->hdr.patch_id; + ret = true; } return ret; @@ -493,15 +477,18 @@ static bool early_apply_microcode(u32 cpuid_1_eax, void *ucode, size_t size) static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct firmware fw; if (IS_ENABLED(CONFIG_X86_32)) return false; - if (family >= 0x15) + if (x86_cpuid_vendor() == X86_VENDOR_AMD && family >= 0x15) snprintf(fw_name, sizeof(fw_name), - "amd-ucode/microcode_amd_fam%.2xh.bin", family); + "amd-ucode/microcode_amd_fam%02hhxh.bin", family); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%02hhxh.bin", family); if (firmware_request_builtin(&fw, fw_name)) { cp->size = fw.size; @@ -512,36 +499,23 @@ static bool get_builtin_microcode(struct cpio_data *cp, unsigned int family) return false; } -static void find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) +static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret) { - struct ucode_cpu_info *uci; struct cpio_data cp; - const char *path; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - uci = (struct ucode_cpu_info *)__pa_nodebug(ucode_cpu_info); - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - uci = ucode_cpu_info; - path = ucode_path; - use_pa = false; - } if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax))) - cp = find_microcode_in_initrd(path, use_pa); - - /* Needed in load_microcode_amd() */ - uci->cpu_sig.sig = cpuid_1_eax; + cp = find_microcode_in_initrd(ucode_path); *ret = cp; } -static void apply_ucode_from_containers(unsigned int cpuid_1_eax) +void __init load_ucode_amd_bsp(unsigned int cpuid_1_eax) { struct cpio_data cp = { }; + /* Needed in load_microcode_amd() */ + ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax; + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return; @@ -549,20 +523,21 @@ static void apply_ucode_from_containers(unsigned int cpuid_1_eax) early_apply_microcode(cpuid_1_eax, cp.data, cp.size); } -void load_ucode_amd_early(unsigned int cpuid_1_eax) -{ - return apply_ucode_from_containers(cpuid_1_eax); -} - static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); -int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) +static int __init save_microcode_in_initrd(void) { + unsigned int cpuid_1_eax = native_cpuid_eax(1); + struct cpuinfo_x86 *c = &boot_cpu_data; struct cont_desc desc = { 0 }; enum ucode_state ret; struct cpio_data cp; - cp = find_microcode_in_initrd(ucode_path, false); + if (dis_ucode_ldr || ((c->x86_vendor != X86_VENDOR_AMD || + c->x86 < 0x10) && (c->x86_vendor != X86_VENDOR_HYGON))) + return 0; + + find_blobs_in_containers(cpuid_1_eax, &cp); if (!(cp.data && cp.size)) return -EINVAL; @@ -578,6 +553,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax) return 0; } +early_initcall(save_microcode_in_initrd); /* * a small, trivial cache of per-family ucode patches @@ -631,7 +607,6 @@ static struct ucode_patch *find_patch(unsigned int cpu) struct ucode_cpu_info *uci = ucode_cpu_info + cpu; u16 equiv_id; - equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig); if (!equiv_id) return NULL; @@ -733,12 +708,20 @@ static enum ucode_state apply_microcode_amd(int cpu) return ret; } +void load_ucode_amd_ap(unsigned int cpuid_1_eax) +{ + unsigned int cpu = smp_processor_id(); + + ucode_cpu_info[cpu].cpu_sig.sig = cpuid_1_eax; + apply_microcode_amd(cpu); +} + static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size) { u32 equiv_tbl_len; const u32 *hdr; - if (!verify_equivalence_table(buf, buf_size, false)) + if (!verify_equivalence_table(buf, buf_size)) return 0; hdr = (const u32 *)buf; @@ -784,7 +767,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover, u16 proc_id; int ret; - ret = verify_patch(family, fw, leftover, patch_size, false); + ret = verify_patch(family, fw, leftover, patch_size); if (ret) return ret; @@ -904,13 +887,20 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz */ static enum ucode_state request_microcode_amd(int cpu, struct device *device) { - char fw_name[36] = "amd-ucode/microcode_amd.bin"; + char fw_name[40] = "amd-ucode/microcode_amd.bin"; struct cpuinfo_x86 *c = &cpu_data(cpu); enum ucode_state ret = UCODE_NFOUND; const struct firmware *fw; - if (c->x86 >= 0x15) - snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + if (force_minrev) + return UCODE_NFOUND; + + if (x86_cpuid_vendor() == X86_VENDOR_AMD && c->x86 >= 0x15) + snprintf(fw_name, sizeof(fw_name), + "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86); + else if (x86_cpuid_vendor() == X86_VENDOR_HYGON) + snprintf(fw_name, sizeof(fw_name), + "hygon-ucode/microcode_hygon_fam%.2xh.bin", c->x86); if (request_firmware_direct(&fw, (const char *)fw_name, device)) { pr_debug("failed to load file %s\n", fw_name); @@ -918,7 +908,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device) } ret = UCODE_ERROR; - if (!verify_container(fw->data, fw->size, false)) + if (!verify_container(fw->data, fw->size)) goto fw_release; ret = load_microcode_amd(c->x86, fw->data, fw->size); @@ -938,10 +928,11 @@ static void microcode_fini_cpu_amd(int cpu) } static struct microcode_ops microcode_amd_ops = { - .request_microcode_fw = request_microcode_amd, - .collect_cpu_info = collect_cpu_info_amd, - .apply_microcode = apply_microcode_amd, - .microcode_fini_cpu = microcode_fini_cpu_amd, + .request_microcode_fw = request_microcode_amd, + .collect_cpu_info = collect_cpu_info_amd, + .apply_microcode = apply_microcode_amd, + .microcode_fini_cpu = microcode_fini_cpu_amd, + .nmi_safe = true, }; struct microcode_ops * __init init_amd_microcode(void) @@ -960,6 +951,22 @@ struct microcode_ops * __init init_amd_microcode(void) return µcode_amd_ops; } +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops * __init init_hygon_microcode(void) +{ + struct cpuinfo_x86 *c = &boot_cpu_data; + + if (c->x86_vendor != X86_VENDOR_HYGON) + return NULL; + + if (ucode_new_rev) + pr_info_once("microcode updated early to new patch_level=0x%08x\n", + ucode_new_rev); + + return µcode_amd_ops; +} +#endif + void __exit exit_amd_microcode(void) { cleanup(); diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 6cc7a2c181da5fa9ad220c0872ac7c5e84d0648a..7196ad323c4b28659f6e3042cf32360e4cc07430 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include +#include #include #include #include @@ -41,12 +43,15 @@ #define DRIVER_VERSION "2.2" +#ifdef CONFIG_CPU_SUP_HYGON +static const struct microcode_ops *microcode_ops; +#else static struct microcode_ops *microcode_ops; -static bool dis_ucode_ldr = true; - -bool initrd_gone; +#endif +bool dis_ucode_ldr = true; -LIST_HEAD(microcode_cache); +bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV); +module_param(force_minrev, bool, S_IRUSR | S_IWUSR); /* * Synchronization. @@ -90,10 +95,7 @@ static bool amd_check_current_patch_level(void) native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy); - if (IS_ENABLED(CONFIG_X86_32)) - levels = (u32 *)__pa_nodebug(&final_levels); - else - levels = final_levels; + levels = final_levels; for (i = 0; levels[i]; i++) { if (lvl == levels[i]) @@ -105,17 +107,8 @@ static bool amd_check_current_patch_level(void) static bool __init check_loader_disabled_bsp(void) { static const char *__dis_opt_str = "dis_ucode_ldr"; - -#ifdef CONFIG_X86_32 - const char *cmdline = (const char *)__pa_nodebug(boot_command_line); - const char *option = (const char *)__pa_nodebug(__dis_opt_str); - bool *res = (bool *)__pa_nodebug(&dis_ucode_ldr); - -#else /* CONFIG_X86_64 */ const char *cmdline = boot_command_line; const char *option = __dis_opt_str; - bool *res = &dis_ucode_ldr; -#endif /* * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not @@ -123,17 +116,18 @@ static bool __init check_loader_disabled_bsp(void) * that's good enough as they don't land on the BSP path anyway. */ if (native_cpuid_ecx(1) & BIT(31)) - return *res; + return true; - if (x86_cpuid_vendor() == X86_VENDOR_AMD) { + if (x86_cpuid_vendor() == X86_VENDOR_AMD || + x86_cpuid_vendor() == X86_VENDOR_HYGON) { if (amd_check_current_patch_level()) - return *res; + return true; } if (cmdline_find_option_bool(cmdline, option) <= 0) - *res = false; + dis_ucode_ldr = false; - return *res; + return dis_ucode_ldr; } void __init load_ucode_bsp(void) @@ -158,6 +152,10 @@ void __init load_ucode_bsp(void) intel = false; break; + case X86_VENDOR_HYGON: + intel = false; + break; + default: return; } @@ -168,23 +166,14 @@ void __init load_ucode_bsp(void) if (intel) load_ucode_intel_bsp(); else - load_ucode_amd_early(cpuid_1_eax); -} - -static bool check_loader_disabled_ap(void) -{ -#ifdef CONFIG_X86_32 - return *((bool *)__pa_nodebug(&dis_ucode_ldr)); -#else - return dis_ucode_ldr; -#endif + load_ucode_amd_bsp(cpuid_1_eax); } void load_ucode_ap(void) { unsigned int cpuid_1_eax; - if (check_loader_disabled_ap()) + if (dis_ucode_ldr) return; cpuid_1_eax = native_cpuid_eax(1); @@ -196,97 +185,47 @@ void load_ucode_ap(void) break; case X86_VENDOR_AMD: if (x86_family(cpuid_1_eax) >= 0x10) - load_ucode_amd_early(cpuid_1_eax); + load_ucode_amd_ap(cpuid_1_eax); break; - default: - break; - } -} - -static int __init save_microcode_in_initrd(void) -{ - struct cpuinfo_x86 *c = &boot_cpu_data; - int ret = -EINVAL; - - switch (c->x86_vendor) { - case X86_VENDOR_INTEL: - if (c->x86 >= 6) - ret = save_microcode_in_initrd_intel(); - break; - case X86_VENDOR_AMD: - if (c->x86 >= 0x10) - ret = save_microcode_in_initrd_amd(cpuid_eax(1)); + case X86_VENDOR_HYGON: + load_ucode_amd_ap(cpuid_1_eax); break; default: break; } - - initrd_gone = true; - - return ret; } -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa) +struct cpio_data __init find_microcode_in_initrd(const char *path) { #ifdef CONFIG_BLK_DEV_INITRD unsigned long start = 0; size_t size; #ifdef CONFIG_X86_32 - struct boot_params *params; - - if (use_pa) - params = (struct boot_params *)__pa_nodebug(&boot_params); - else - params = &boot_params; - - size = params->hdr.ramdisk_size; - - /* - * Set start only if we have an initrd image. We cannot use initrd_start - * because it is not set that early yet. - */ + size = boot_params.hdr.ramdisk_size; + /* Early load on BSP has a temporary mapping. */ if (size) - start = params->hdr.ramdisk_image; + start = initrd_start_early; -# else /* CONFIG_X86_64 */ +#else /* CONFIG_X86_64 */ size = (unsigned long)boot_params.ext_ramdisk_size << 32; size |= boot_params.hdr.ramdisk_size; if (size) { start = (unsigned long)boot_params.ext_ramdisk_image << 32; start |= boot_params.hdr.ramdisk_image; - start += PAGE_OFFSET; } -# endif +#endif /* * Fixup the start address: after reserve_initrd() runs, initrd_start * has the virtual address of the beginning of the initrd. It also * possibly relocates the ramdisk. In either case, initrd_start contains * the updated address so use that instead. - * - * initrd_gone is for the hotplug case where we've thrown out initrd - * already. */ - if (!use_pa) { - if (initrd_gone) - return (struct cpio_data){ NULL, 0, "" }; - if (initrd_start) - start = initrd_start; - } else { - /* - * The picture with physical addresses is a bit different: we - * need to get the *physical* address to which the ramdisk was - * relocated, i.e., relocated_ramdisk (not initrd_start) and - * since we're running from physical addresses, we need to access - * relocated_ramdisk through its *physical* address too. - */ - u64 *rr = (u64 *)__pa_nodebug(&relocated_ramdisk); - if (*rr) - start = *rr; - } + if (initrd_start) + start = initrd_start; return find_cpio_data(path, (void *)start, size, NULL); #else /* !CONFIG_BLK_DEV_INITRD */ @@ -310,6 +249,9 @@ static void reload_early_microcode(unsigned int cpu) if (family >= 0x10) reload_ucode_amd(cpu); break; + case X86_VENDOR_HYGON: + reload_ucode_amd(cpu); + break; default: break; } @@ -330,117 +272,298 @@ static struct platform_device *microcode_pdev; * requirement can be relaxed in the future. Right now, this is conservative * and good. */ -#define SPINUNIT 100 /* 100 nsec */ +enum sibling_ctrl { + /* Spinwait with timeout */ + SCTRL_WAIT, + /* Invoke the microcode_apply() callback */ + SCTRL_APPLY, + /* Proceed without invoking the microcode_apply() callback */ + SCTRL_DONE, +}; + +struct microcode_ctrl { + enum sibling_ctrl ctrl; + enum ucode_state result; + unsigned int ctrl_cpu; + bool nmi_enabled; +}; + +DEFINE_STATIC_KEY_FALSE(microcode_nmi_handler_enable); +static DEFINE_PER_CPU(struct microcode_ctrl, ucode_ctrl); +static atomic_t late_cpus_in, offline_in_nmi; +static unsigned int loops_per_usec; +static cpumask_t cpu_offline_mask; -static int check_online_cpus(void) +static noinstr bool wait_for_cpus(atomic_t *cnt) { - unsigned int cpu; + unsigned int timeout, loops; - /* - * Make sure all CPUs are online. It's fine for SMT to be disabled if - * all the primary threads are still online. - */ - for_each_present_cpu(cpu) { - if (topology_is_primary_thread(cpu) && !cpu_online(cpu)) { - pr_err("Not all CPUs online, aborting microcode update.\n"); - return -EINVAL; + WARN_ON_ONCE(raw_atomic_dec_return(cnt) < 0); + + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (!raw_atomic_read(cnt)) + return true; + + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); + + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); } } - - return 0; + /* Prevent the late comers from making progress and let them time out */ + raw_atomic_inc(cnt); + return false; } -static atomic_t late_cpus_in; -static atomic_t late_cpus_out; - -static int __wait_for_cpus(atomic_t *t, long long timeout) +static noinstr bool wait_for_ctrl(void) { - int all_cpus = num_online_cpus(); + unsigned int timeout, loops; - atomic_inc(t); - - while (atomic_read(t) < all_cpus) { - if (timeout < SPINUNIT) { - pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n", - all_cpus - atomic_read(t)); - return 1; - } + for (timeout = 0; timeout < USEC_PER_SEC; timeout++) { + if (raw_cpu_read(ucode_ctrl.ctrl) != SCTRL_WAIT) + return true; - ndelay(SPINUNIT); - timeout -= SPINUNIT; + for (loops = 0; loops < loops_per_usec; loops++) + cpu_relax(); - touch_nmi_watchdog(); + /* If invoked directly, tickle the NMI watchdog */ + if (!microcode_ops->use_nmi && !(timeout % USEC_PER_MSEC)) { + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + } } - return 0; + return false; } /* - * Returns: - * < 0 - on error - * 0 - success (no update done or microcode was updated) + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. */ -static int __reload_late(void *info) +static noinstr bool load_secondary_wait(unsigned int ctrl_cpu) { - int cpu = smp_processor_id(); - enum ucode_state err; - int ret = 0; + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + raw_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + return false; + } /* - * Wait for all CPUs to arrive. A load will not be attempted unless all - * CPUs show up. - * */ - if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) - return -1; + * Wait for primary threads to complete. If one of them hangs due + * to the update, there is no way out. This is non-recoverable + * because the CPU might hold locks or resources and confuse the + * scheduler, watchdogs etc. There is no way to safely evacuate the + * machine. + */ + if (wait_for_ctrl()) + return true; + instrumentation_begin(); + panic("Microcode load: Primary CPU %d timed out\n", ctrl_cpu); + instrumentation_end(); +} + +/* + * Protected against instrumentation up to the point where the primary + * thread completed the update. See microcode_nmi_handler() for details. + */ +static noinstr void load_secondary(unsigned int cpu) +{ + unsigned int ctrl_cpu = raw_cpu_read(ucode_ctrl.ctrl_cpu); + enum ucode_state ret; + + if (!load_secondary_wait(ctrl_cpu)) { + instrumentation_begin(); + pr_err_once("load: %d CPUs timed out\n", + atomic_read(&late_cpus_in) - 1); + instrumentation_end(); + return; + } + + /* Primary thread completed. Allow to invoke instrumentable code */ + instrumentation_begin(); /* - * On an SMT system, it suffices to load the microcode on one sibling of - * the core because the microcode engine is shared between the threads. - * Synchronization still needs to take place so that no concurrent - * loading attempts happen on multiple threads of an SMT core. See - * below. + * If the primary succeeded then invoke the apply() callback, + * otherwise copy the state from the primary thread. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) == cpu) - err = microcode_ops->apply_microcode(cpu); + if (this_cpu_read(ucode_ctrl.ctrl) == SCTRL_APPLY) + ret = microcode_ops->apply_microcode(cpu); else - goto wait_for_siblings; + ret = per_cpu(ucode_ctrl.result, ctrl_cpu); - if (err >= UCODE_NFOUND) { - if (err == UCODE_ERROR) { - pr_warn("Error reloading microcode on CPU %d\n", cpu); - ret = -1; - } + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); + instrumentation_end(); +} + +static void __load_primary(unsigned int cpu) +{ + struct cpumask *secondaries = topology_sibling_cpumask(cpu); + enum sibling_ctrl ctrl; + enum ucode_state ret; + unsigned int sibling; + + /* Initial rendezvous to ensure that all CPUs have arrived */ + if (!wait_for_cpus(&late_cpus_in)) { + this_cpu_write(ucode_ctrl.result, UCODE_TIMEOUT); + pr_err_once("load: %d CPUs timed out\n", atomic_read(&late_cpus_in) - 1); + return; } -wait_for_siblings: - if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC)) - panic("Timeout during microcode update!\n"); + ret = microcode_ops->apply_microcode(cpu); + this_cpu_write(ucode_ctrl.result, ret); + this_cpu_write(ucode_ctrl.ctrl, SCTRL_DONE); /* - * At least one thread has completed update on each core. - * For others, simply call the update to make sure the - * per-cpu cpuinfo can be updated with right microcode - * revision. + * If the update was successful, let the siblings run the apply() + * callback. If not, tell them it's done. This also covers the + * case where the CPU has uniform loading at package or system + * scope implemented but does not advertise it. */ - if (cpumask_first(topology_sibling_cpumask(cpu)) != cpu) - err = microcode_ops->apply_microcode(cpu); + if (ret == UCODE_UPDATED || ret == UCODE_OK) + ctrl = SCTRL_APPLY; + else + ctrl = SCTRL_DONE; + + for_each_cpu(sibling, secondaries) { + if (sibling != cpu) + per_cpu(ucode_ctrl.ctrl, sibling) = ctrl; + } +} + +static bool kick_offline_cpus(unsigned int nr_offl) +{ + unsigned int cpu, timeout; - return ret; + for_each_cpu(cpu, &cpu_offline_mask) { + /* Enable the rendezvous handler and send NMI */ + per_cpu(ucode_ctrl.nmi_enabled, cpu) = true; + apic_send_nmi_to_offline_cpu(cpu); + } + + /* Wait for them to arrive */ + for (timeout = 0; timeout < (USEC_PER_SEC / 2); timeout++) { + if (atomic_read(&offline_in_nmi) == nr_offl) + return true; + udelay(1); + } + /* Let the others time out */ + return false; +} + +static void release_offline_cpus(void) +{ + unsigned int cpu; + + for_each_cpu(cpu, &cpu_offline_mask) + per_cpu(ucode_ctrl.ctrl, cpu) = SCTRL_DONE; +} + +static void load_primary(unsigned int cpu) +{ + unsigned int nr_offl = cpumask_weight(&cpu_offline_mask); + bool proceed = true; + + /* Kick soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + proceed = kick_offline_cpus(nr_offl); + + /* If the soft-offlined CPUs did not respond, abort */ + if (proceed) + __load_primary(cpu); + + /* Unconditionally release soft-offlined SMT siblings if required */ + if (!cpu && nr_offl) + release_offline_cpus(); } /* - * Reload microcode late on all CPUs. Wait for a sec until they - * all gather together. + * Minimal stub rendezvous handler for soft-offlined CPUs which participate + * in the NMI rendezvous to protect against a concurrent NMI on affected + * CPUs. */ -static int microcode_reload_late(void) +void noinstr microcode_offline_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return; + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + raw_cpu_write(ucode_ctrl.result, UCODE_OFFLINE); + raw_atomic_inc(&offline_in_nmi); + wait_for_ctrl(); +} + +static noinstr bool microcode_update_handler(void) { - int old = boot_cpu_data.microcode, ret; + unsigned int cpu = raw_smp_processor_id(); + + if (raw_cpu_read(ucode_ctrl.ctrl_cpu) == cpu) { + instrumentation_begin(); + load_primary(cpu); + instrumentation_end(); + } else { + load_secondary(cpu); + } + + instrumentation_begin(); + touch_nmi_watchdog(); + instrumentation_end(); + + return true; +} + +/* + * Protection against instrumentation is required for CPUs which are not + * safe against an NMI which is delivered to the secondary SMT sibling + * while the primary thread updates the microcode. Instrumentation can end + * up in #INT3, #DB and #PF. The IRET from those exceptions reenables NMI + * which is the opposite of what the NMI rendezvous is trying to achieve. + * + * The primary thread is safe versus instrumentation as the actual + * microcode update handles this correctly. It's only the sibling code + * path which must be NMI safe until the primary thread completed the + * update. + */ +bool noinstr microcode_nmi_handler(void) +{ + if (!raw_cpu_read(ucode_ctrl.nmi_enabled)) + return false; + + raw_cpu_write(ucode_ctrl.nmi_enabled, false); + return microcode_update_handler(); +} + +static int load_cpus_stopped(void *unused) +{ + if (microcode_ops->use_nmi) { + /* Enable the NMI handler and raise NMI */ + this_cpu_write(ucode_ctrl.nmi_enabled, true); + apic->send_IPI(smp_processor_id(), NMI_VECTOR); + } else { + /* Just invoke the handler directly */ + microcode_update_handler(); + } + return 0; +} + +static int load_late_stop_cpus(bool is_safe) +{ + unsigned int cpu, updated = 0, failed = 0, timedout = 0, siblings = 0; + unsigned int nr_offl, offline = 0; + int old_rev = boot_cpu_data.microcode; struct cpuinfo_x86 prev_info; - pr_err("Attempting late microcode loading - it is dangerous and taints the kernel.\n"); - pr_err("You should switch to early loading, if possible.\n"); + if (!is_safe) { + pr_err("Late microcode loading without minimal revision check.\n"); + pr_err("You should switch to early loading, if possible.\n"); + } - atomic_set(&late_cpus_in, 0); - atomic_set(&late_cpus_out, 0); + atomic_set(&late_cpus_in, num_online_cpus()); + atomic_set(&offline_in_nmi, 0); + loops_per_usec = loops_per_jiffy / (TICK_NSEC / 1000); /* * Take a snapshot before the microcode update in order to compare and @@ -448,52 +571,162 @@ static int microcode_reload_late(void) */ store_cpu_caps(&prev_info); - ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask); - if (!ret) { - pr_info("Reload succeeded, microcode revision: 0x%x -> 0x%x\n", - old, boot_cpu_data.microcode); - microcode_check(&prev_info); - } else { - pr_info("Reload failed, current microcode revision: 0x%x\n", - boot_cpu_data.microcode); + if (microcode_ops->use_nmi) + static_branch_enable_cpuslocked(µcode_nmi_handler_enable); + + stop_machine_cpuslocked(load_cpus_stopped, NULL, cpu_online_mask); + + if (microcode_ops->use_nmi) + static_branch_disable_cpuslocked(µcode_nmi_handler_enable); + + /* Analyze the results */ + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + switch (per_cpu(ucode_ctrl.result, cpu)) { + case UCODE_UPDATED: updated++; break; + case UCODE_TIMEOUT: timedout++; break; + case UCODE_OK: siblings++; break; + case UCODE_OFFLINE: offline++; break; + default: failed++; break; + } + } + + if (microcode_ops->finalize_late_load) + microcode_ops->finalize_late_load(!updated); + + if (!updated) { + /* Nothing changed. */ + if (!failed && !timedout) + return 0; + + nr_offl = cpumask_weight(&cpu_offline_mask); + if (offline < nr_offl) { + pr_warn("%u offline siblings did not respond.\n", + nr_offl - atomic_read(&offline_in_nmi)); + return -EIO; + } + pr_err("update failed: %u CPUs failed %u CPUs timed out\n", + failed, timedout); + return -EIO; + } + + if (!is_safe || failed || timedout) + add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); + + pr_info("load: updated on %u primary CPUs with %u siblings\n", updated, siblings); + if (failed || timedout) { + pr_err("load incomplete. %u CPUs timed out or failed\n", + num_online_cpus() - (updated + siblings)); } + pr_info("revision: 0x%x -> 0x%x\n", old_rev, boot_cpu_data.microcode); + microcode_check(&prev_info); - return ret; + return updated + siblings == num_online_cpus() ? 0 : -EIO; +} + +/* + * This function does two things: + * + * 1) Ensure that all required CPUs which are present and have been booted + * once are online. + * + * To pass this check, all primary threads must be online. + * + * If the microcode load is not safe against NMI then all SMT threads + * must be online as well because they still react to NMIs when they are + * soft-offlined and parked in one of the play_dead() variants. So if a + * NMI hits while the primary thread updates the microcode the resulting + * behaviour is undefined. The default play_dead() implementation on + * modern CPUs uses MWAIT, which is also not guaranteed to be safe + * against a microcode update which affects MWAIT. + * + * As soft-offlined CPUs still react on NMIs, the SMT sibling + * restriction can be lifted when the vendor driver signals to use NMI + * for rendezvous and the APIC provides a mechanism to send an NMI to a + * soft-offlined CPU. The soft-offlined CPUs are then able to + * participate in the rendezvous in a trivial stub handler. + * + * 2) Initialize the per CPU control structure and create a cpumask + * which contains "offline"; secondary threads, so they can be handled + * correctly by a control CPU. + */ +static bool setup_cpus(void) +{ + struct microcode_ctrl ctrl = { .ctrl = SCTRL_WAIT, .result = -1, }; + bool allow_smt_offline; + unsigned int cpu; + + allow_smt_offline = microcode_ops->nmi_safe || + (microcode_ops->use_nmi && apic->nmi_to_offline_cpu); + + cpumask_clear(&cpu_offline_mask); + + for_each_cpu_and(cpu, cpu_present_mask, &cpus_booted_once_mask) { + /* + * Offline CPUs sit in one of the play_dead() functions + * with interrupts disabled, but they still react on NMIs + * and execute arbitrary code. Also MWAIT being updated + * while the offline CPU sits there is not necessarily safe + * on all CPU variants. + * + * Mark them in the offline_cpus mask which will be handled + * by CPU0 later in the update process. + * + * Ensure that the primary thread is online so that it is + * guaranteed that all cores are updated. + */ + if (!cpu_online(cpu)) { + if (topology_is_primary_thread(cpu) || !allow_smt_offline) { + pr_err("CPU %u not online, loading aborted\n", cpu); + return false; + } + cpumask_set_cpu(cpu, &cpu_offline_mask); + per_cpu(ucode_ctrl, cpu) = ctrl; + continue; + } + + /* + * Initialize the per CPU state. This is core scope for now, + * but prepared to take package or system scope into account. + */ + ctrl.ctrl_cpu = cpumask_first(topology_sibling_cpumask(cpu)); + per_cpu(ucode_ctrl, cpu) = ctrl; + } + return true; +} + +static int load_late_locked(void) +{ + if (!setup_cpus()) + return -EBUSY; + + switch (microcode_ops->request_microcode_fw(0, µcode_pdev->dev)) { + case UCODE_NEW: + return load_late_stop_cpus(false); + case UCODE_NEW_SAFE: + return load_late_stop_cpus(true); + case UCODE_NFOUND: + return -ENOENT; + default: + return -EBADFD; + } } static ssize_t reload_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { - enum ucode_state tmp_ret = UCODE_OK; - int bsp = boot_cpu_data.cpu_index; unsigned long val; - ssize_t ret = 0; + ssize_t ret; ret = kstrtoul(buf, 0, &val); if (ret || val != 1) return -EINVAL; cpus_read_lock(); - - ret = check_online_cpus(); - if (ret) - goto put; - - tmp_ret = microcode_ops->request_microcode_fw(bsp, µcode_pdev->dev); - if (tmp_ret != UCODE_NEW) - goto put; - - ret = microcode_reload_late(); -put: + ret = load_late_locked(); cpus_read_unlock(); - if (ret == 0) - ret = size; - - add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK); - - return ret; + return ret ? : size; } static DEVICE_ATTR_WO(reload); @@ -535,17 +768,6 @@ static void microcode_fini_cpu(int cpu) microcode_ops->microcode_fini_cpu(cpu); } -static enum ucode_state microcode_init_cpu(int cpu) -{ - struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - - memset(uci, 0, sizeof(*uci)); - - microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); - - return microcode_ops->apply_microcode(cpu); -} - /** * microcode_bsp_resume - Update boot CPU microcode during resume. */ @@ -564,19 +786,18 @@ static struct syscore_ops mc_syscore_ops = { .resume = microcode_bsp_resume, }; -static int mc_cpu_starting(unsigned int cpu) -{ - enum ucode_state err = microcode_ops->apply_microcode(cpu); - - pr_debug("%s: CPU%d, err: %d\n", __func__, cpu, err); - - return err == UCODE_ERROR; -} - static int mc_cpu_online(unsigned int cpu) { + struct ucode_cpu_info *uci = ucode_cpu_info + cpu; struct device *dev = get_cpu_device(cpu); + memset(uci, 0, sizeof(*uci)); + + microcode_ops->collect_cpu_info(cpu, &uci->cpu_sig); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; + if (sysfs_create_group(&dev->kobj, &mc_attr_group)) pr_err("Failed to create group for CPU%d\n", cpu); return 0; @@ -584,33 +805,13 @@ static int mc_cpu_online(unsigned int cpu) static int mc_cpu_down_prep(unsigned int cpu) { - struct device *dev; - - dev = get_cpu_device(cpu); + struct device *dev = get_cpu_device(cpu); microcode_fini_cpu(cpu); - - /* Suspend is in progress, only remove the interface */ sysfs_remove_group(&dev->kobj, &mc_attr_group); - pr_debug("%s: CPU%d\n", __func__, cpu); - return 0; } -static void setup_online_cpu(struct work_struct *work) -{ - int cpu = smp_processor_id(); - enum ucode_state err; - - err = microcode_init_cpu(cpu); - if (err == UCODE_ERROR) { - pr_err("Error applying microcode on CPU%d\n", cpu); - return; - } - - mc_cpu_online(cpu); -} - static struct attribute *cpu_root_microcode_attrs[] = { #ifdef CONFIG_MICROCODE_LATE_LOADING &dev_attr_reload.attr, @@ -636,6 +837,8 @@ static int __init microcode_init(void) microcode_ops = init_intel_microcode(); else if (c->x86_vendor == X86_VENDOR_AMD) microcode_ops = init_amd_microcode(); + else if (c->x86_vendor == X86_VENDOR_HYGON) + microcode_ops = init_hygon_microcode(); else pr_err("no support for this CPU vendor\n"); @@ -656,14 +859,9 @@ static int __init microcode_init(void) } } - /* Do per-CPU setup */ - schedule_on_each_cpu(setup_online_cpu); - register_syscore_ops(&mc_syscore_ops); - cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:starting", - mc_cpu_starting, NULL); - cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", - mc_cpu_online, mc_cpu_down_prep); + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", + mc_cpu_online, mc_cpu_down_prep); pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); @@ -674,5 +872,4 @@ static int __init microcode_init(void) return error; } -fs_initcall(save_microcode_in_initrd); late_initcall(microcode_init); diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 94dd6af9c963a88982f852bc679ac8d8d4651ae5..6024feb98d29dbba1ea45c35660fdf1b577582cf 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include @@ -32,11 +31,14 @@ static const char ucode_path[] = "kernel/x86/microcode/GenuineIntel.bin"; +#define UCODE_BSP_LOADED ((struct microcode_intel *)0x1UL) + /* Current microcode patch used in early patching on the APs. */ -static struct microcode_intel *intel_ucode_patch; +static struct microcode_intel *ucode_patch_va __read_mostly; +static struct microcode_intel *ucode_patch_late __read_mostly; /* last level cache size per core */ -static int llc_size_per_core; +static unsigned int llc_size_per_core __ro_after_init; /* microcode format is extended from prescott processors */ struct extended_signature { @@ -66,60 +68,52 @@ static inline unsigned int exttable_size(struct extended_sigtable *et) return et->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE; } -int intel_cpu_collect_info(struct ucode_cpu_info *uci) +void intel_collect_cpu_info(struct cpu_signature *sig) { - unsigned int val[2]; - unsigned int family, model; - struct cpu_signature csig = { 0 }; - unsigned int eax, ebx, ecx, edx; - - memset(uci, 0, sizeof(*uci)); - - eax = 0x00000001; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - csig.sig = eax; + sig->sig = cpuid_eax(1); + sig->pf = 0; + sig->rev = intel_get_microcode_revision(); - family = x86_family(eax); - model = x86_model(eax); + if (x86_model(sig->sig) >= 5 || x86_family(sig->sig) > 6) { + unsigned int val[2]; - if (model >= 5 || family > 6) { /* get processor flags from MSR 0x17 */ native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig.pf = 1 << ((val[1] >> 18) & 7); + sig->pf = 1 << ((val[1] >> 18) & 7); } +} +EXPORT_SYMBOL_GPL(intel_collect_cpu_info); - csig.rev = intel_get_microcode_revision(); - - uci->cpu_sig = csig; +static inline bool cpu_signatures_match(struct cpu_signature *s1, unsigned int sig2, + unsigned int pf2) +{ + if (s1->sig != sig2) + return false; - return 0; + /* Processor flags are either both 0 or they intersect. */ + return ((!s1->pf && !pf2) || (s1->pf & pf2)); } -EXPORT_SYMBOL_GPL(intel_cpu_collect_info); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -int intel_find_matching_signature(void *mc, unsigned int csig, int cpf) +bool intel_find_matching_signature(void *mc, struct cpu_signature *sig) { struct microcode_header_intel *mc_hdr = mc; - struct extended_sigtable *ext_hdr; struct extended_signature *ext_sig; + struct extended_sigtable *ext_hdr; int i; - if (intel_cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf)) - return 1; + if (cpu_signatures_match(sig, mc_hdr->sig, mc_hdr->pf)) + return true; /* Look for ext. headers: */ if (get_totalsize(mc_hdr) <= intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE) - return 0; + return false; ext_hdr = mc + intel_microcode_get_datasize(mc_hdr) + MC_HEADER_SIZE; ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE; for (i = 0; i < ext_hdr->count; i++) { - if (intel_cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf)) - return 1; + if (cpu_signatures_match(sig, ext_sig->sig, ext_sig->pf)) + return true; ext_sig++; } return 0; @@ -240,264 +234,91 @@ int intel_microcode_sanity_check(void *mc, bool print_err, int hdr_type) } EXPORT_SYMBOL_GPL(intel_microcode_sanity_check); -/* - * Returns 1 if update has been found, 0 otherwise. - */ -static int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev) +static void update_ucode_pointer(struct microcode_intel *mc) { - struct microcode_header_intel *mc_hdr = mc; - - if (mc_hdr->rev <= new_rev) - return 0; - - return intel_find_matching_signature(mc, csig, cpf); -} - -static struct ucode_patch *memdup_patch(void *data, unsigned int size) -{ - struct ucode_patch *p; - - p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL); - if (!p) - return NULL; - - p->data = kmemdup(data, size, GFP_KERNEL); - if (!p->data) { - kfree(p); - return NULL; - } - - return p; -} - -static void save_microcode_patch(struct ucode_cpu_info *uci, void *data, unsigned int size) -{ - struct microcode_header_intel *mc_hdr, *mc_saved_hdr; - struct ucode_patch *iter, *tmp, *p = NULL; - bool prev_found = false; - unsigned int sig, pf; - - mc_hdr = (struct microcode_header_intel *)data; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - mc_saved_hdr = (struct microcode_header_intel *)iter->data; - sig = mc_saved_hdr->sig; - pf = mc_saved_hdr->pf; - - if (intel_find_matching_signature(data, sig, pf)) { - prev_found = true; - - if (mc_hdr->rev <= mc_saved_hdr->rev) - continue; - - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer %p\n", data); - else { - list_replace(&iter->plist, &p->plist); - kfree(iter->data); - kfree(iter); - } - } - } + kvfree(ucode_patch_va); /* - * There weren't any previous patches found in the list cache; save the - * newly found. + * Save the virtual address for early loading and for eventual free + * on late loading. */ - if (!prev_found) { - p = memdup_patch(data, size); - if (!p) - pr_err("Error allocating buffer for %p\n", data); - else - list_add_tail(&p->plist, µcode_cache); - } - - if (!p) - return; + ucode_patch_va = mc; +} - if (!intel_find_matching_signature(p->data, uci->cpu_sig.sig, uci->cpu_sig.pf)) - return; +static void save_microcode_patch(struct microcode_intel *patch) +{ + unsigned int size = get_totalsize(&patch->hdr); + struct microcode_intel *mc; - /* - * Save for early loading. On 32-bit, that needs to be a physical - * address as the APs are running from physical addresses, before - * paging has been enabled. - */ - if (IS_ENABLED(CONFIG_X86_32)) - intel_ucode_patch = (struct microcode_intel *)__pa_nodebug(p->data); + mc = kvmemdup(patch, size, GFP_KERNEL); + if (mc) + update_ucode_pointer(mc); else - intel_ucode_patch = p->data; + pr_err("Unable to allocate microcode memory size: %u\n", size); } -/* - * Get microcode matching with BSP's model. Only CPUs with the same model as - * BSP can stay in the platform. - */ -static struct microcode_intel * -scan_microcode(void *data, size_t size, struct ucode_cpu_info *uci, bool save) +/* Scan blob for microcode matching the boot CPUs family, model, stepping */ +static __init struct microcode_intel *scan_microcode(void *data, size_t size, + struct ucode_cpu_info *uci, + bool save) { struct microcode_header_intel *mc_header; struct microcode_intel *patch = NULL; + u32 cur_rev = uci->cpu_sig.rev; unsigned int mc_size; - while (size) { - if (size < sizeof(struct microcode_header_intel)) - break; - + for (; size >= sizeof(struct microcode_header_intel); size -= mc_size, data += mc_size) { mc_header = (struct microcode_header_intel *)data; mc_size = get_totalsize(mc_header); - if (!mc_size || - mc_size > size || + if (!mc_size || mc_size > size || intel_microcode_sanity_check(data, false, MC_HEADER_TYPE_MICROCODE) < 0) break; - size -= mc_size; - - if (!intel_find_matching_signature(data, uci->cpu_sig.sig, - uci->cpu_sig.pf)) { - data += mc_size; + if (!intel_find_matching_signature(data, &uci->cpu_sig)) continue; - } + /* + * For saving the early microcode, find the matching revision which + * was loaded on the BSP. + * + * On the BSP during early boot, find a newer revision than + * actually loaded in the CPU. + */ if (save) { - save_microcode_patch(uci, data, mc_size); - goto next; - } - - - if (!patch) { - if (!has_newer_microcode(data, - uci->cpu_sig.sig, - uci->cpu_sig.pf, - uci->cpu_sig.rev)) - goto next; - - } else { - struct microcode_header_intel *phdr = &patch->hdr; - - if (!has_newer_microcode(data, - phdr->sig, - phdr->pf, - phdr->rev)) - goto next; + if (cur_rev != mc_header->rev) + continue; + } else if (cur_rev >= mc_header->rev) { + continue; } - /* We have a newer patch, save it. */ patch = data; - -next: - data += mc_size; - } - - if (size) - return NULL; - - return patch; -} - -static bool load_builtin_intel_microcode(struct cpio_data *cp) -{ - unsigned int eax = 1, ebx, ecx = 0, edx; - struct firmware fw; - char name[30]; - - if (IS_ENABLED(CONFIG_X86_32)) - return false; - - native_cpuid(&eax, &ebx, &ecx, &edx); - - sprintf(name, "intel-ucode/%02x-%02x-%02x", - x86_family(eax), x86_model(eax), x86_stepping(eax)); - - if (firmware_request_builtin(&fw, name)) { - cp->size = fw.size; - cp->data = (void *)fw.data; - return true; - } - - return false; -} - -static void print_ucode_info(int old_rev, int new_rev, unsigned int date) -{ - pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", - old_rev, - new_rev, - date & 0xffff, - date >> 24, - (date >> 16) & 0xff); -} - -#ifdef CONFIG_X86_32 - -static int delay_ucode_info; -static int current_mc_date; -static int early_old_rev; - -/* - * Print early updated ucode info after printk works. This is delayed info dump. - */ -void show_ucode_info_early(void) -{ - struct ucode_cpu_info uci; - - if (delay_ucode_info) { - intel_cpu_collect_info(&uci); - print_ucode_info(early_old_rev, uci.cpu_sig.rev, current_mc_date); - delay_ucode_info = 0; + cur_rev = mc_header->rev; } -} - -/* - * At this point, we can not call printk() yet. Delay printing microcode info in - * show_ucode_info_early() until printk() works. - */ -static void print_ucode(int old_rev, int new_rev, int date) -{ - int *delay_ucode_info_p; - int *current_mc_date_p; - int *early_old_rev_p; - - delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info); - current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date); - early_old_rev_p = (int *)__pa_nodebug(&early_old_rev); - - *delay_ucode_info_p = 1; - *current_mc_date_p = date; - *early_old_rev_p = old_rev; -} -#else -static inline void print_ucode(int old_rev, int new_rev, int date) -{ - print_ucode_info(old_rev, new_rev, date); + return size ? NULL : patch; } -#endif -static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) +static enum ucode_state __apply_microcode(struct ucode_cpu_info *uci, + struct microcode_intel *mc, + u32 *cur_rev) { - struct microcode_intel *mc; - u32 rev, old_rev; + u32 rev; - mc = uci->mc; if (!mc) - return 0; + return UCODE_NFOUND; /* * Save us the MSR write below - which is a particular expensive * operation - when the other hyperthread has updated the microcode * already. */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - uci->cpu_sig.rev = rev; + *cur_rev = intel_get_microcode_revision(); + if (*cur_rev >= mc->hdr.rev) { + uci->cpu_sig.rev = *cur_rev; return UCODE_OK; } - old_rev = rev; - /* * Writeback and invalidate caches before updating microcode to avoid * internal issues depending on what the microcode is updating. @@ -509,247 +330,182 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early) rev = intel_get_microcode_revision(); if (rev != mc->hdr.rev) - return -1; + return UCODE_ERROR; uci->cpu_sig.rev = rev; + return UCODE_UPDATED; +} - if (early) - print_ucode(old_rev, uci->cpu_sig.rev, mc->hdr.date); - else - print_ucode_info(old_rev, uci->cpu_sig.rev, mc->hdr.date); +static enum ucode_state apply_microcode_early(struct ucode_cpu_info *uci) +{ + struct microcode_intel *mc = uci->mc; + enum ucode_state ret; + u32 cur_rev, date; - return 0; + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret == UCODE_UPDATED) { + date = mc->hdr.date; + pr_info_once("updated early: 0x%x -> 0x%x, date = %04x-%02x-%02x\n", + cur_rev, mc->hdr.rev, date & 0xffff, date >> 24, (date >> 16) & 0xff); + } + return ret; } -int __init save_microcode_in_initrd_intel(void) +static __init bool load_builtin_intel_microcode(struct cpio_data *cp) { - struct ucode_cpu_info uci; - struct cpio_data cp; - - /* - * initrd is going away, clear patch ptr. We will scan the microcode one - * last time before jettisoning and save a patch, if found. Then we will - * update that pointer too, with a stable patch address to use when - * resuming the cores. - */ - intel_ucode_patch = NULL; + unsigned int eax = 1, ebx, ecx = 0, edx; + struct firmware fw; + char name[30]; - if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(ucode_path, false); + if (IS_ENABLED(CONFIG_X86_32)) + return false; - if (!(cp.data && cp.size)) - return 0; + native_cpuid(&eax, &ebx, &ecx, &edx); - intel_cpu_collect_info(&uci); + sprintf(name, "intel-ucode/%02x-%02x-%02x", + x86_family(eax), x86_model(eax), x86_stepping(eax)); - scan_microcode(cp.data, cp.size, &uci, true); - return 0; + if (firmware_request_builtin(&fw, name)) { + cp->size = fw.size; + cp->data = (void *)fw.data; + return true; + } + return false; } -/* - * @res_patch, output: a pointer to the patch we found. - */ -static struct microcode_intel *__load_ucode_intel(struct ucode_cpu_info *uci) +static __init struct microcode_intel *get_microcode_blob(struct ucode_cpu_info *uci, bool save) { - static const char *path; struct cpio_data cp; - bool use_pa; - - if (IS_ENABLED(CONFIG_X86_32)) { - path = (const char *)__pa_nodebug(ucode_path); - use_pa = true; - } else { - path = ucode_path; - use_pa = false; - } - /* try built-in microcode first */ if (!load_builtin_intel_microcode(&cp)) - cp = find_microcode_in_initrd(path, use_pa); + cp = find_microcode_in_initrd(ucode_path); if (!(cp.data && cp.size)) return NULL; - intel_cpu_collect_info(uci); + intel_collect_cpu_info(&uci->cpu_sig); - return scan_microcode(cp.data, cp.size, uci, false); + return scan_microcode(cp.data, cp.size, uci, save); } -void __init load_ucode_intel_bsp(void) +/* + * Invoked from an early init call to save the microcode blob which was + * selected during early boot when mm was not usable. The microcode must be + * saved because initrd is going away. It's an early init call so the APs + * just can use the pointer and do not have to scan initrd/builtin firmware + * again. + */ +static int __init save_builtin_microcode(void) { - struct microcode_intel *patch; struct ucode_cpu_info uci; - patch = __load_ucode_intel(&uci); - if (!patch) - return; + if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED) + return 0; - uci.mc = patch; + if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return 0; - apply_microcode_early(&uci, true); + uci.mc = get_microcode_blob(&uci, true); + if (uci.mc) + save_microcode_patch(uci.mc); + return 0; } +early_initcall(save_builtin_microcode); -void load_ucode_intel_ap(void) +/* Load microcode on BSP from initrd or builtin blobs */ +void __init load_ucode_intel_bsp(void) { - struct microcode_intel *patch, **iup; struct ucode_cpu_info uci; - if (IS_ENABLED(CONFIG_X86_32)) - iup = (struct microcode_intel **) __pa_nodebug(&intel_ucode_patch); - else - iup = &intel_ucode_patch; - - if (!*iup) { - patch = __load_ucode_intel(&uci); - if (!patch) - return; - - *iup = patch; - } - - uci.mc = *iup; - - apply_microcode_early(&uci, true); + uci.mc = get_microcode_blob(&uci, false); + if (uci.mc && apply_microcode_early(&uci) == UCODE_UPDATED) + ucode_patch_va = UCODE_BSP_LOADED; } -static struct microcode_intel *find_patch(struct ucode_cpu_info *uci) +void load_ucode_intel_ap(void) { - struct microcode_header_intel *phdr; - struct ucode_patch *iter, *tmp; - - list_for_each_entry_safe(iter, tmp, µcode_cache, plist) { - - phdr = (struct microcode_header_intel *)iter->data; - - if (phdr->rev <= uci->cpu_sig.rev) - continue; - - if (!intel_find_matching_signature(phdr, - uci->cpu_sig.sig, - uci->cpu_sig.pf)) - continue; + struct ucode_cpu_info uci; - return iter->data; - } - return NULL; + uci.mc = ucode_patch_va; + if (uci.mc) + apply_microcode_early(&uci); } +/* Reload microcode on resume */ void reload_ucode_intel(void) { - struct microcode_intel *p; - struct ucode_cpu_info uci; - - intel_cpu_collect_info(&uci); - - p = find_patch(&uci); - if (!p) - return; - - uci.mc = p; + struct ucode_cpu_info uci = { .mc = ucode_patch_va, }; - apply_microcode_early(&uci, false); + if (uci.mc) + apply_microcode_early(&uci); } static int collect_cpu_info(int cpu_num, struct cpu_signature *csig) { - struct cpuinfo_x86 *c = &cpu_data(cpu_num); - unsigned int val[2]; - - memset(csig, 0, sizeof(*csig)); - - csig->sig = cpuid_eax(0x00000001); - - if ((c->x86_model >= 5) || (c->x86 > 6)) { - /* get processor flags from MSR 0x17 */ - rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]); - csig->pf = 1 << ((val[1] >> 18) & 7); - } - - csig->rev = c->microcode; - + intel_collect_cpu_info(csig); return 0; } -static enum ucode_state apply_microcode_intel(int cpu) +static enum ucode_state apply_microcode_late(int cpu) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - struct cpuinfo_x86 *c = &cpu_data(cpu); - bool bsp = c->cpu_index == boot_cpu_data.cpu_index; - struct microcode_intel *mc; + struct microcode_intel *mc = ucode_patch_late; enum ucode_state ret; - static int prev_rev; - u32 rev; + u32 cur_rev; - /* We should bind the task to the CPU */ - if (WARN_ON(raw_smp_processor_id() != cpu)) + if (WARN_ON_ONCE(smp_processor_id() != cpu)) return UCODE_ERROR; - /* Look for a newer patch in our cache: */ - mc = find_patch(uci); - if (!mc) { - mc = uci->mc; - if (!mc) - return UCODE_NFOUND; - } + ret = __apply_microcode(uci, mc, &cur_rev); + if (ret != UCODE_UPDATED && ret != UCODE_OK) + return ret; - /* - * Save us the MSR write below - which is a particular expensive - * operation - when the other hyperthread has updated the microcode - * already. - */ - rev = intel_get_microcode_revision(); - if (rev >= mc->hdr.rev) { - ret = UCODE_OK; - goto out; + if (!cpu && uci->cpu_sig.rev != cur_rev) { + pr_info("Updated to revision 0x%x, date = %04x-%02x-%02x\n", + uci->cpu_sig.rev, mc->hdr.date & 0xffff, mc->hdr.date >> 24, + (mc->hdr.date >> 16) & 0xff); } - /* - * Writeback and invalidate caches before updating microcode to avoid - * internal issues depending on what the microcode is updating. - */ - native_wbinvd(); + cpu_data(cpu).microcode = uci->cpu_sig.rev; + if (!cpu) + boot_cpu_data.microcode = uci->cpu_sig.rev; - /* write microcode via MSR 0x79 */ - wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits); + return ret; +} - rev = intel_get_microcode_revision(); +static bool ucode_validate_minrev(struct microcode_header_intel *mc_header) +{ + int cur_rev = boot_cpu_data.microcode; - if (rev != mc->hdr.rev) { - pr_err("CPU%d update to revision 0x%x failed\n", - cpu, mc->hdr.rev); - return UCODE_ERROR; + /* + * When late-loading, ensure the header declares a minimum revision + * required to perform a late-load. The previously reserved field + * is 0 in older microcode blobs. + */ + if (!mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Microcode header does not specify a required min version\n"); + return false; } - if (bsp && rev != prev_rev) { - pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n", - rev, - mc->hdr.date & 0xffff, - mc->hdr.date >> 24, - (mc->hdr.date >> 16) & 0xff); - prev_rev = rev; + /* + * Check whether the current revision is either greater or equal to + * to the minimum revision specified in the header. + */ + if (cur_rev < mc_header->min_req_ver) { + pr_info("Unsafe microcode update: Current revision 0x%x too old\n", cur_rev); + pr_info("Current should be at 0x%x or higher. Use early loading instead\n", mc_header->min_req_ver); + return false; } - - ret = UCODE_UPDATED; - -out: - uci->cpu_sig.rev = rev; - c->microcode = rev; - - /* Update boot_cpu_data's revision too, if we're on the BSP: */ - if (bsp) - boot_cpu_data.microcode = rev; - - return ret; + return true; } -static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) +static enum ucode_state parse_microcode_blobs(int cpu, struct iov_iter *iter) { struct ucode_cpu_info *uci = ucode_cpu_info + cpu; - unsigned int curr_mc_size = 0, new_mc_size = 0; - enum ucode_state ret = UCODE_OK; - int new_rev = uci->cpu_sig.rev; + bool is_safe, new_is_safe = false; + int cur_rev = uci->cpu_sig.rev; + unsigned int curr_mc_size = 0; u8 *new_mc = NULL, *mc = NULL; - unsigned int csig, cpf; while (iov_iter_count(iter)) { struct microcode_header_intel mc_header; @@ -758,68 +514,66 @@ static enum ucode_state generic_load_microcode(int cpu, struct iov_iter *iter) if (!copy_from_iter_full(&mc_header, sizeof(mc_header), iter)) { pr_err("error! Truncated or inaccessible header in microcode data file\n"); - break; + goto fail; } mc_size = get_totalsize(&mc_header); if (mc_size < sizeof(mc_header)) { pr_err("error! Bad data in microcode data file (totalsize too small)\n"); - break; + goto fail; } data_size = mc_size - sizeof(mc_header); if (data_size > iov_iter_count(iter)) { pr_err("error! Bad data in microcode data file (truncated file?)\n"); - break; + goto fail; } /* For performance reasons, reuse mc area when possible */ if (!mc || mc_size > curr_mc_size) { - vfree(mc); - mc = vmalloc(mc_size); + kvfree(mc); + mc = kvmalloc(mc_size, GFP_KERNEL); if (!mc) - break; + goto fail; curr_mc_size = mc_size; } memcpy(mc, &mc_header, sizeof(mc_header)); data = mc + sizeof(mc_header); if (!copy_from_iter_full(data, data_size, iter) || - intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) { - break; - } + intel_microcode_sanity_check(mc, true, MC_HEADER_TYPE_MICROCODE) < 0) + goto fail; - csig = uci->cpu_sig.sig; - cpf = uci->cpu_sig.pf; - if (has_newer_microcode(mc, csig, cpf, new_rev)) { - vfree(new_mc); - new_rev = mc_header.rev; - new_mc = mc; - new_mc_size = mc_size; - mc = NULL; /* trigger new vmalloc */ - ret = UCODE_NEW; - } - } + if (cur_rev >= mc_header.rev) + continue; - vfree(mc); + if (!intel_find_matching_signature(mc, &uci->cpu_sig)) + continue; - if (iov_iter_count(iter)) { - vfree(new_mc); - return UCODE_ERROR; + is_safe = ucode_validate_minrev(&mc_header); + if (force_minrev && !is_safe) + continue; + + kvfree(new_mc); + cur_rev = mc_header.rev; + new_mc = mc; + new_is_safe = is_safe; + mc = NULL; } + if (iov_iter_count(iter)) + goto fail; + + kvfree(mc); if (!new_mc) return UCODE_NFOUND; - vfree(uci->mc); - uci->mc = (struct microcode_intel *)new_mc; - - /* Save for CPU hotplug */ - save_microcode_patch(uci, new_mc, new_mc_size); + ucode_patch_late = (struct microcode_intel *)new_mc; + return new_is_safe ? UCODE_NEW_SAFE : UCODE_NEW; - pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n", - cpu, new_rev, uci->cpu_sig.rev); - - return ret; +fail: + kvfree(mc); + kvfree(new_mc); + return UCODE_ERROR; } static bool is_blacklisted(unsigned int cpu) @@ -868,26 +622,36 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device) kvec.iov_base = (void *)firmware->data; kvec.iov_len = firmware->size; iov_iter_kvec(&iter, ITER_SOURCE, &kvec, 1, firmware->size); - ret = generic_load_microcode(cpu, &iter); + ret = parse_microcode_blobs(cpu, &iter); release_firmware(firmware); return ret; } +static void finalize_late_load(int result) +{ + if (!result) + update_ucode_pointer(ucode_patch_late); + else + kvfree(ucode_patch_late); + ucode_patch_late = NULL; +} + static struct microcode_ops microcode_intel_ops = { - .request_microcode_fw = request_microcode_fw, - .collect_cpu_info = collect_cpu_info, - .apply_microcode = apply_microcode_intel, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, + .apply_microcode = apply_microcode_late, + .finalize_late_load = finalize_late_load, + .use_nmi = IS_ENABLED(CONFIG_X86_64), }; -static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c) +static __init void calc_llc_size_per_core(struct cpuinfo_x86 *c) { u64 llc_size = c->x86_cache_size * 1024ULL; do_div(llc_size, c->x86_max_cores); - - return (int)llc_size; + llc_size_per_core = (unsigned int)llc_size; } struct microcode_ops * __init init_intel_microcode(void) @@ -900,7 +664,7 @@ struct microcode_ops * __init init_intel_microcode(void) return NULL; } - llc_size_per_core = calc_llc_size_per_core(c); + calc_llc_size_per_core(c); return µcode_intel_ops; } diff --git a/arch/x86/kernel/cpu/microcode/internal.h b/arch/x86/kernel/cpu/microcode/internal.h index bf883aa712330a2d38cbd21f7d43bd2f8263fb76..980ef806b377b9281d132e4f539ca2a08f8ee902 100644 --- a/arch/x86/kernel/cpu/microcode/internal.h +++ b/arch/x86/kernel/cpu/microcode/internal.h @@ -8,43 +8,37 @@ #include #include -struct ucode_patch { - struct list_head plist; - void *data; /* Intel uses only this one */ - unsigned int size; - u32 patch_id; - u16 equiv_cpu; -}; - -extern struct list_head microcode_cache; - struct device; enum ucode_state { UCODE_OK = 0, UCODE_NEW, + UCODE_NEW_SAFE, UCODE_UPDATED, UCODE_NFOUND, UCODE_ERROR, + UCODE_TIMEOUT, + UCODE_OFFLINE, }; struct microcode_ops { enum ucode_state (*request_microcode_fw)(int cpu, struct device *dev); - void (*microcode_fini_cpu)(int cpu); /* - * The generic 'microcode_core' part guarantees that - * the callbacks below run on a target cpu when they - * are being called. + * The generic 'microcode_core' part guarantees that the callbacks + * below run on a target CPU when they are being called. * See also the "Synchronization" section in microcode_core.c. */ - enum ucode_state (*apply_microcode)(int cpu); - int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + enum ucode_state (*apply_microcode)(int cpu); + int (*collect_cpu_info)(int cpu, struct cpu_signature *csig); + void (*finalize_late_load)(int result); + unsigned int nmi_safe : 1, + use_nmi : 1; }; extern struct ucode_cpu_info ucode_cpu_info[]; -struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); +struct cpio_data find_microcode_in_initrd(const char *path); #define MAX_UCODE_COUNT 128 @@ -55,6 +49,9 @@ struct cpio_data find_microcode_in_initrd(const char *path, bool use_pa); #define CPUID_AMD1 QCHAR('A', 'u', 't', 'h') #define CPUID_AMD2 QCHAR('e', 'n', 't', 'i') #define CPUID_AMD3 QCHAR('c', 'A', 'M', 'D') +#define CPUID_HYGON1 QCHAR('H', 'y', 'g', 'o') +#define CPUID_HYGON2 QCHAR('n', 'G', 'e', 'n') +#define CPUID_HYGON3 QCHAR('u', 'i', 'n', 'e') #define CPUID_IS(a, b, c, ebx, ecx, edx) \ (!(((ebx) ^ (a)) | ((edx) ^ (b)) | ((ecx) ^ (c)))) @@ -81,6 +78,9 @@ static inline int x86_cpuid_vendor(void) if (CPUID_IS(CPUID_AMD1, CPUID_AMD2, CPUID_AMD3, ebx, ecx, edx)) return X86_VENDOR_AMD; + if (CPUID_IS(CPUID_HYGON1, CPUID_HYGON2, CPUID_HYGON3, ebx, ecx, edx)) + return X86_VENDOR_HYGON; + return X86_VENDOR_UNKNOWN; } @@ -94,12 +94,12 @@ static inline unsigned int x86_cpuid_family(void) return x86_family(eax); } -extern bool initrd_gone; +extern bool dis_ucode_ldr; +extern bool force_minrev; #ifdef CONFIG_CPU_SUP_AMD void load_ucode_amd_bsp(unsigned int family); void load_ucode_amd_ap(unsigned int family); -void load_ucode_amd_early(unsigned int cpuid_1_eax); int save_microcode_in_initrd_amd(unsigned int family); void reload_ucode_amd(unsigned int cpu); struct microcode_ops *init_amd_microcode(void); @@ -107,23 +107,26 @@ void exit_amd_microcode(void); #else /* CONFIG_CPU_SUP_AMD */ static inline void load_ucode_amd_bsp(unsigned int family) { } static inline void load_ucode_amd_ap(unsigned int family) { } -static inline void load_ucode_amd_early(unsigned int family) { } static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; } static inline void reload_ucode_amd(unsigned int cpu) { } static inline struct microcode_ops *init_amd_microcode(void) { return NULL; } static inline void exit_amd_microcode(void) { } #endif /* !CONFIG_CPU_SUP_AMD */ +#ifdef CONFIG_CPU_SUP_HYGON +const struct microcode_ops *init_hygon_microcode(void); +#else /* CONFIG_CPU_SUP_HYGON */ +static const inline struct microcode_ops *init_hygon_microcode(void) { return NULL; } +#endif /* !CONFIG_CPU_SUP_HYGON */ + #ifdef CONFIG_CPU_SUP_INTEL void load_ucode_intel_bsp(void); void load_ucode_intel_ap(void); -int save_microcode_in_initrd_intel(void); void reload_ucode_intel(void); struct microcode_ops *init_intel_microcode(void); #else /* CONFIG_CPU_SUP_INTEL */ static inline void load_ucode_intel_bsp(void) { } static inline void load_ucode_intel_ap(void) { } -static inline int save_microcode_in_initrd_intel(void) { return -EINVAL; } static inline void reload_ucode_intel(void) { } static inline struct microcode_ops *init_intel_microcode(void) { return NULL; } #endif /* !CONFIG_CPU_SUP_INTEL */ diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 31c0e68f6227292cc5b217d68646ff2e26f3889a..2941134c47da66ebc7fdd0eef96a89d6dea43140 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c @@ -17,14 +17,22 @@ extern const char * const x86_vmx_flags[NVMXINTS*32]; * Get CPU information for use by the procfs. */ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, - unsigned int cpu) + unsigned int cpu, unsigned int index, + bool rich_container, unsigned int total) { #ifdef CONFIG_SMP - seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); - seq_printf(m, "siblings\t: %d\n", - cpumask_weight(topology_core_cpumask(cpu))); - seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); - seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + if (rich_container) { + seq_puts(m, "physical id\t: 0\n"); + seq_printf(m, "siblings\t: %d\n", total); + seq_printf(m, "core id\t\t: %d\n", index); + seq_printf(m, "cpu cores\t: %d\n", total); + } else { + seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); + seq_printf(m, "siblings\t: %d\n", + cpumask_weight(topology_core_cpumask(cpu))); + seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); + seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); + } seq_printf(m, "apicid\t\t: %d\n", c->apicid); seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid); #endif @@ -63,16 +71,20 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) static int show_cpuinfo(struct seq_file *m, void *v) { struct cpuinfo_x86 *c = v; - unsigned int cpu; + unsigned int cpu, index, total; int i; + bool rich_container = false; + + index = cpu = c->cpu_index; + if (check_rich_container(cpu, &index, &rich_container, &total)) + return 0; - cpu = c->cpu_index; seq_printf(m, "processor\t: %u\n" "vendor_id\t: %s\n" "cpu family\t: %d\n" "model\t\t: %u\n" "model name\t: %s\n", - cpu, + index, c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", c->x86, c->x86_model, @@ -95,13 +107,19 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (c->x86_cache_size) seq_printf(m, "cache size\t: %u KB\n", c->x86_cache_size); - show_cpuinfo_core(m, c, cpu); + show_cpuinfo_core(m, c, cpu, index, rich_container, total); show_cpuinfo_misc(m, c); seq_puts(m, "flags\t\t:"); for (i = 0; i < 32*NCAPINTS; i++) - if (cpu_has(c, i) && x86_cap_flags[i] != NULL) - seq_printf(m, " %s", x86_cap_flags[i]); + if (cpu_has(c, i) && x86_cap_flags[i] != NULL) { + if (c->x86_vendor == X86_VENDOR_HYGON) + seq_printf(m, " %s", i == X86_FEATURE_SEV ? "csv" : + (i == X86_FEATURE_SEV_ES ? "csv2" : + x86_cap_flags[i])); + else + seq_printf(m, " %s", x86_cap_flags[i]); + } #ifdef CONFIG_X86_VMX_FEATURE_NAMES if (cpu_has(c, X86_FEATURE_VMX) && c->vmx_capability[0]) { diff --git a/arch/x86/kernel/cpu/resctrl/Makefile b/arch/x86/kernel/cpu/resctrl/Makefile index 4a06c37b9cf11bef1b00b546e3c2344691e6e18a..0c13b0befd8a9b76fe4753b8ee230113e5f3bf54 100644 --- a/arch/x86/kernel/cpu/resctrl/Makefile +++ b/arch/x86/kernel/cpu/resctrl/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o -obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o pseudo_lock.o +obj-$(CONFIG_X86_CPU_RESCTRL) += core.o rdtgroup.o monitor.o +obj-$(CONFIG_X86_CPU_RESCTRL) += ctrlmondata.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += pseudo_lock.o CFLAGS_pseudo_lock.o = -I$(src) diff --git a/arch/x86/kernel/cpu/resctrl/core.c b/arch/x86/kernel/cpu/resctrl/core.c index 030d3b409768dea54875186085a0b6c26c593aaa..51edb6548977f7195362dbb2e2e9fd7a34a13814 100644 --- a/arch/x86/kernel/cpu/resctrl/core.c +++ b/arch/x86/kernel/cpu/resctrl/core.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) "resctrl: " fmt +#include #include #include #include @@ -25,8 +26,15 @@ #include #include "internal.h" -/* Mutex to protect rdtgroup access. */ -DEFINE_MUTEX(rdtgroup_mutex); +/* + * rdt_domain structures are kfree()d when their last CPU goes offline, + * and allocated when the first CPU in a new domain comes online. + * The rdt_resource's domain list is updated when this happens. Readers of + * the domain list must either take cpus_read_lock(), or rely on an RCU + * read-side critical section, to avoid observing concurrent modification. + * All writers take this mutex: + */ +static DEFINE_MUTEX(domain_list_lock); /* * The cached resctrl_pqr_state is strictly per CPU and can never be @@ -36,12 +44,6 @@ DEFINE_MUTEX(rdtgroup_mutex); */ DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state); -/* - * Used to store the max resource name width and max resource data width - * to display the schemata in a tabular format - */ -int max_name_width, max_data_width; - /* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled. @@ -67,7 +69,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L3", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_L3), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -81,7 +82,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "L2", .cache_level = 2, .domains = domain_init(RDT_RESOURCE_L2), - .parse_ctrlval = parse_cbm, .format_str = "%d=%0*x", .fflags = RFTYPE_RES_CACHE, }, @@ -95,7 +95,6 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "MB", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_MBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, @@ -107,13 +106,20 @@ struct rdt_hw_resource rdt_resources_all[] = { .name = "SMBA", .cache_level = 3, .domains = domain_init(RDT_RESOURCE_SMBA), - .parse_ctrlval = parse_bw, .format_str = "%d=%*u", .fflags = RFTYPE_RES_MB, }, }, }; +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &rdt_resources_all[l].r_resctrl; +} + /* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. @@ -136,15 +142,15 @@ static inline void cache_alloc_hsw_probe(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl; - u32 l, h, max_cbm = BIT_MASK(20) - 1; + u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0; - if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0)) + if (wrmsrl_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return; - rdmsr(MSR_IA32_L3_CBM_BASE, l, h); + rdmsrl(MSR_IA32_L3_CBM_BASE, l3_cbm_0); /* If all the bits were set in MSR, return success */ - if (l != max_cbm) + if (l3_cbm_0 != max_cbm) return; hw_res->num_closid = 4; @@ -152,26 +158,12 @@ static inline void cache_alloc_hsw_probe(void) r->cache.cbm_len = 20; r->cache.shareable_bits = 0xc0000; r->cache.min_cbm_bits = 2; + r->cache.arch_has_sparse_bitmasks = false; r->alloc_capable = true; rdt_alloc_capable = true; } -bool is_mba_sc(struct rdt_resource *r) -{ - if (!r) - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc; - - /* - * The software controller support is only applicable to MBA resource. - * Make sure to check for resource type. - */ - if (r->rid != RDT_RESOURCE_MBA) - return false; - - return r->membw.mba_sc; -} - /* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. @@ -220,7 +212,6 @@ static bool __get_mem_config_intel(struct rdt_resource *r) r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD; else r->membw.throttle_mode = THREAD_THROTTLE_MAX; - thread_throttle_mode_init(); r->alloc_capable = true; @@ -230,9 +221,7 @@ static bool __get_mem_config_intel(struct rdt_resource *r) static bool __rdt_get_mem_config_amd(struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - union cpuid_0x10_3_eax eax; - union cpuid_0x10_x_edx edx; - u32 ebx, ecx, subleaf; + u32 eax, ebx, ecx, edx, subleaf; /* * Query CPUID_Fn80000020_EDX_x01 for MBA and @@ -240,9 +229,9 @@ static bool __rdt_get_mem_config_amd(struct rdt_resource *r) */ subleaf = (r->rid == RDT_RESOURCE_SMBA) ? 2 : 1; - cpuid_count(0x80000020, subleaf, &eax.full, &ebx, &ecx, &edx.full); - hw_res->num_closid = edx.split.cos_max + 1; - r->default_ctrl = MAX_MBA_BW_AMD; + cpuid_count(0x80000020, subleaf, &eax, &ebx, &ecx, &edx); + hw_res->num_closid = edx + 1; + r->default_ctrl = 1 << eax; /* AMD does not use delay */ r->membw.delay_linear = false; @@ -267,15 +256,18 @@ static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; + union cpuid_0x10_x_ecx ecx; union cpuid_0x10_x_edx edx; - u32 ebx, ecx; + u32 ebx; - cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full); + cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx.full, &edx.full); hw_res->num_closid = edx.split.cos_max + 1; r->cache.cbm_len = eax.split.cbm_len + 1; r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1; r->cache.shareable_bits = ebx & r->default_ctrl; r->data_width = (r->cache.cbm_len + 3) / 4; + if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) + r->cache.arch_has_sparse_bitmasks = ecx.split.noncont; r->alloc_capable = true; } @@ -299,6 +291,11 @@ static void rdt_get_cdp_l2_config(void) rdt_get_cdp_config(RDT_RESOURCE_L2); } +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) +{ + return rdt_resources_all[l].cdp_enabled; +} + static void mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) { @@ -348,19 +345,6 @@ cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r) wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]); } -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r) -{ - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - /* Find the domain that contains this CPU */ - if (cpumask_test_cpu(cpu, &d->cpu_mask)) - return d; - } - - return NULL; -} - u32 resctrl_arch_get_num_closid(struct rdt_resource *r) { return resctrl_to_arch_res(r)->num_closid; @@ -374,7 +358,7 @@ void rdt_ctrl_update(void *arg) int cpu = smp_processor_id(); struct rdt_domain *d; - d = get_domain_from_cpu(cpu, r); + d = resctrl_get_domain_from_cpu(cpu, r); if (d) { hw_res->msr_update(d, m, r); return; @@ -391,8 +375,8 @@ void rdt_ctrl_update(void *arg) * caller, return the first domain whose id is bigger than the input id. * The domain list is sorted by id in ascending order. */ -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos) +static struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, + struct list_head **pos) { struct rdt_domain *d; struct list_head *l; @@ -416,6 +400,11 @@ struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, return NULL; } +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + return rdt_find_domain(r, id, NULL); +} + static void setup_default_ctrlval(struct rdt_resource *r, u32 *dc) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); @@ -468,13 +457,13 @@ static int arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_domain *hw_dom) { size_t tsize; - if (is_mbm_total_enabled()) { + if (resctrl_arch_is_mbm_total_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_total); hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM; } - if (is_mbm_local_enabled()) { + if (resctrl_arch_is_mbm_local_enabled()) { tsize = sizeof(*hw_dom->arch_mbm_local); hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) { @@ -508,6 +497,8 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) struct rdt_domain *d; int err; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, &add_pos); if (IS_ERR(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -541,11 +532,12 @@ static void domain_add_cpu(int cpu, struct rdt_resource *r) return; } - list_add_tail(&d->list, add_pos); + list_add_tail_rcu(&d->list, add_pos); err = resctrl_online_domain(r, d); if (err) { - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); domain_free(hw_dom); } } @@ -556,6 +548,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) struct rdt_hw_domain *hw_dom; struct rdt_domain *d; + lockdep_assert_held(&domain_list_lock); + d = rdt_find_domain(r, id, NULL); if (IS_ERR_OR_NULL(d)) { pr_warn("Couldn't find cache id for CPU %d\n", cpu); @@ -566,7 +560,8 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) cpumask_clear_cpu(cpu, &d->cpu_mask); if (cpumask_empty(&d->cpu_mask)) { resctrl_offline_domain(r, d); - list_del(&d->list); + list_del_rcu(&d->list); + synchronize_rcu(); /* * rdt_domain "d" is going to be freed below, so clear @@ -578,91 +573,51 @@ static void domain_remove_cpu(int cpu, struct rdt_resource *r) return; } - - if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) { - if (is_mbm_enabled() && cpu == d->mbm_work_cpu) { - cancel_delayed_work(&d->mbm_over); - mbm_setup_overflow_handler(d, 0); - } - if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu && - has_busy_rmid(r, d)) { - cancel_delayed_work(&d->cqm_limbo); - cqm_setup_limbo_handler(d, 0); - } - } } static void clear_closid_rmid(int cpu) { struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state); - state->default_closid = 0; - state->default_rmid = 0; - state->cur_closid = 0; - state->cur_rmid = 0; - wrmsr(MSR_IA32_PQR_ASSOC, 0, 0); + state->default_closid = RESCTRL_RESERVED_CLOSID; + state->default_rmid = RESCTRL_RESERVED_RMID; + state->cur_closid = RESCTRL_RESERVED_CLOSID; + state->cur_rmid = RESCTRL_RESERVED_RMID; + wrmsr(MSR_IA32_PQR_ASSOC, RESCTRL_RESERVED_RMID, + RESCTRL_RESERVED_CLOSID); } -static int resctrl_online_cpu(unsigned int cpu) +static int resctrl_arch_online_cpu(unsigned int cpu) { struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_add_cpu(cpu, r); - /* The cpu is set in default rdtgroup after online. */ - cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); + resctrl_online_cpu(cpu); return 0; } -static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) -{ - struct rdtgroup *cr; - - list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { - if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) { - break; - } - } -} - -static int resctrl_offline_cpu(unsigned int cpu) +static int resctrl_arch_offline_cpu(unsigned int cpu) { - struct rdtgroup *rdtgrp; struct rdt_resource *r; - mutex_lock(&rdtgroup_mutex); + resctrl_offline_cpu(cpu); + + mutex_lock(&domain_list_lock); for_each_capable_rdt_resource(r) domain_remove_cpu(cpu, r); - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { - clear_childcpus(rdtgrp, cpu); - break; - } - } + mutex_unlock(&domain_list_lock); + clear_closid_rmid(cpu); - mutex_unlock(&rdtgroup_mutex); return 0; } -/* - * Choose a width for the resource name and resource data based on the - * resource that has widest name and cbm. - */ -static __init void rdt_init_padding(void) -{ - struct rdt_resource *r; - - for_each_alloc_capable_rdt_resource(r) { - if (r->data_width > max_data_width) - max_data_width = r->data_width; - } -} - enum { RDT_FLAG_CMT, RDT_FLAG_MBM_TOTAL, @@ -688,7 +643,7 @@ struct rdt_options { bool force_off, force_on; }; -static struct rdt_options rdt_options[] __initdata = { +static struct rdt_options rdt_options[] __ro_after_init = { RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC), RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL), RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL), @@ -728,7 +683,7 @@ static int __init set_rdt_options(char *str) } __setup("rdt", set_rdt_options); -bool __init rdt_cpu_has(int flag) +bool rdt_cpu_has(int flag) { bool ret = boot_cpu_has(flag); struct rdt_options *o; @@ -748,6 +703,21 @@ bool __init rdt_cpu_has(int flag) return ret; } +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + if (!rdt_cpu_has(X86_FEATURE_BMEC)) + return false; + + switch (evt) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL); + case QOS_L3_MBM_LOCAL_EVENT_ID: + return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL); + default: + return false; + } +} + static __init bool get_mem_config(void) { struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA]; @@ -757,7 +727,8 @@ static __init bool get_mem_config(void) if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) return __get_mem_config_intel(&hw_res->r_resctrl); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) return __rdt_get_mem_config_amd(&hw_res->r_resctrl); return false; @@ -872,7 +843,6 @@ static __init void rdt_init_res_defs_intel(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = false; r->cache.arch_has_per_cpu_cfg = false; r->cache.min_cbm_bits = 1; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -892,7 +862,7 @@ static __init void rdt_init_res_defs_amd(void) if (r->rid == RDT_RESOURCE_L3 || r->rid == RDT_RESOURCE_L2) { - r->cache.arch_has_sparse_bitmaps = true; + r->cache.arch_has_sparse_bitmasks = true; r->cache.arch_has_per_cpu_cfg = true; r->cache.min_cbm_bits = 0; } else if (r->rid == RDT_RESOURCE_MBA) { @@ -909,7 +879,8 @@ static __init void rdt_init_res_defs(void) { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) rdt_init_res_defs_intel(); - else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) rdt_init_res_defs_amd(); } @@ -940,12 +911,14 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c) c->x86_cache_occ_scale = ebx; c->x86_cache_mbm_width_offset = eax & 0xff; - if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset) + if ((c->x86_vendor == X86_VENDOR_AMD || + c->x86_vendor == X86_VENDOR_HYGON) && + !c->x86_cache_mbm_width_offset) c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD; } } -static int __init resctrl_late_init(void) +static int __init resctrl_arch_late_init(void) { struct rdt_resource *r; int state, ret; @@ -961,15 +934,14 @@ static int __init resctrl_late_init(void) if (!get_rdt_resources()) return -ENODEV; - rdt_init_padding(); - state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/resctrl/cat:online:", - resctrl_online_cpu, resctrl_offline_cpu); + resctrl_arch_online_cpu, + resctrl_arch_offline_cpu); if (state < 0) return state; - ret = rdtgroup_init(); + ret = resctrl_init(); if (ret) { cpuhp_remove_state(state); return ret; @@ -985,12 +957,13 @@ static int __init resctrl_late_init(void) return 0; } -late_initcall(resctrl_late_init); +late_initcall(resctrl_arch_late_init); -static void __exit resctrl_exit(void) +static void __exit resctrl_arch_exit(void) { cpuhp_remove_state(rdt_online); - rdtgroup_exit(); + + resctrl_exit(); } -__exitcall(resctrl_exit); +__exitcall(resctrl_arch_exit); diff --git a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c index b44c487727d456e445a4ad53ddaff76d84e449b1..c5c3eaea27b65be9f729863b9db4f5ecf933c940 100644 --- a/arch/x86/kernel/cpu/resctrl/ctrlmondata.c +++ b/arch/x86/kernel/cpu/resctrl/ctrlmondata.c @@ -19,251 +19,9 @@ #include #include #include -#include "internal.h" - -/* - * Check whether MBA bandwidth percentage value is correct. The value is - * checked against the minimum and max bandwidth values specified by the - * hardware. The allocated bandwidth percentage is rounded to the next - * control step available on the hardware. - */ -static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) -{ - unsigned long bw; - int ret; - - /* - * Only linear delay values is supported for current Intel SKUs. - */ - if (!r->membw.delay_linear && r->membw.arch_needs_linear) { - rdt_last_cmd_puts("No support for non-linear MB domains\n"); - return false; - } - - ret = kstrtoul(buf, 10, &bw); - if (ret) { - rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); - return false; - } - - if ((bw < r->membw.min_bw || bw > r->default_ctrl) && - !is_mba_sc(r)) { - rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, - r->membw.min_bw, r->default_ctrl); - return false; - } - - *data = roundup(bw, (unsigned long)r->membw.bw_gran); - return true; -} - -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct resctrl_staged_config *cfg; - u32 closid = data->rdtgrp->closid; - struct rdt_resource *r = s->res; - unsigned long bw_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - if (!bw_validate(data->buf, &bw_val, r)) - return -EINVAL; - - if (is_mba_sc(r)) { - d->mbps_val[closid] = bw_val; - return 0; - } - - cfg->new_ctrl = bw_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Check whether a cache bit mask is valid. - * For Intel the SDM says: - * Please note that all (and only) contiguous '1' combinations - * are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.). - * Additionally Haswell requires at least two bits set. - * AMD allows non-contiguous bitmasks. - */ -static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) -{ - unsigned long first_bit, zero_bit, val; - unsigned int cbm_len = r->cache.cbm_len; - int ret; - - ret = kstrtoul(buf, 16, &val); - if (ret) { - rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); - return false; - } - - if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { - rdt_last_cmd_puts("Mask out of range\n"); - return false; - } - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Are non-contiguous bitmaps allowed? */ - if (!r->cache.arch_has_sparse_bitmaps && - (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { - rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); - return false; - } +#include - if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("Need at least %d bits in the mask\n", - r->cache.min_cbm_bits); - return false; - } - - *data = val; - return true; -} - -/* - * Read one cache bit mask (hex). Check that it is valid for the current - * resource type. - */ -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d) -{ - struct rdtgroup *rdtgrp = data->rdtgrp; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 cbm_val; - - cfg = &d->staged_config[s->conf_type]; - if (cfg->have_new_ctrl) { - rdt_last_cmd_printf("Duplicate domain %d\n", d->id); - return -EINVAL; - } - - /* - * Cannot set up more than one pseudo-locked region in a cache - * hierarchy. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - rdtgroup_pseudo_locked_in_hierarchy(d)) { - rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); - return -EINVAL; - } - - if (!cbm_validate(data->buf, &cbm_val, r)) - return -EINVAL; - - if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_SHAREABLE) && - rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { - rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); - return -EINVAL; - } - - /* - * The CBM may not overlap with the CBM of another closid if - * either is exclusive. - */ - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { - rdt_last_cmd_puts("Overlaps with exclusive group\n"); - return -EINVAL; - } - - if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { - if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - rdt_last_cmd_puts("Overlaps with other group\n"); - return -EINVAL; - } - } - - cfg->new_ctrl = cbm_val; - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * For each domain in this resource we expect to find a series of: - * id=mask - * separated by ";". The "id" is in decimal, and must match one of - * the "id"s for this resource. - */ -static int parse_line(char *line, struct resctrl_schema *s, - struct rdtgroup *rdtgrp) -{ - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - struct rdt_parse_data data; - char *dom = NULL, *id; - struct rdt_domain *d; - unsigned long dom_id; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && - (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { - rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); - return -EINVAL; - } - -next: - if (!line || line[0] == '\0') - return 0; - dom = strsep(&line, ";"); - id = strsep(&dom, "="); - if (!dom || kstrtoul(id, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); - return -EINVAL; - } - dom = strim(dom); - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - data.buf = dom; - data.rdtgrp = rdtgrp; - if (r->parse_ctrlval(&data, s, d)) - return -EINVAL; - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - cfg = &d->staged_config[t]; - /* - * In pseudo-locking setup mode and just - * parsed a valid CBM that should be - * pseudo-locked. Only one locked region per - * resource group and domain so just do - * the required initialization for single - * region and return. - */ - rdtgrp->plr->s = s; - rdtgrp->plr->d = d; - rdtgrp->plr->cbm = cfg->new_ctrl; - d->plr = rdtgrp->plr; - return 0; - } - goto next; - } - } - return -EINVAL; -} - -static u32 get_config_index(u32 closid, enum resctrl_conf_type type) -{ - switch (type) { - default: - case CDP_NONE: - return closid; - case CDP_CODE: - return closid * 2 + 1; - case CDP_DATA: - return closid * 2; - } -} +#include "internal.h" static bool apply_config(struct rdt_hw_domain *hw_dom, struct resctrl_staged_config *cfg, u32 idx, @@ -286,7 +44,7 @@ int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, t); + u32 idx = resctrl_get_config_index(closid, t); struct msr_param msr_param; if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) @@ -312,6 +70,9 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) struct rdt_domain *d; u32 idx; + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM; @@ -323,7 +84,7 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) if (!cfg->have_new_ctrl) continue; - idx = get_config_index(closid, t); + idx = resctrl_get_config_index(closid, t); if (!apply_config(hw_dom, cfg, idx, cpu_mask)) continue; @@ -350,232 +111,11 @@ int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) return 0; } -static int rdtgroup_parse_resource(char *resname, char *tok, - struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - - list_for_each_entry(s, &resctrl_schema_all, list) { - if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) - return parse_line(tok, s, rdtgrp); - } - rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); - return -EINVAL; -} - -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct resctrl_schema *s; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - char *tok, *resname; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - cpus_read_lock(); - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); - return -ENOENT; - } - rdt_last_cmd_clear(); - - /* - * No changes to pseudo-locked region allowed. It has to be removed - * and re-created instead. - */ - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = -EINVAL; - rdt_last_cmd_puts("Resource group is pseudo-locked\n"); - goto out; - } - - rdt_staged_configs_clear(); - - while ((tok = strsep(&buf, "\n")) != NULL) { - resname = strim(strsep(&tok, ":")); - if (!tok) { - rdt_last_cmd_puts("Missing ':'\n"); - ret = -EINVAL; - goto out; - } - if (tok[0] == '\0') { - rdt_last_cmd_printf("Missing '%s' value\n", resname); - ret = -EINVAL; - goto out; - } - ret = rdtgroup_parse_resource(resname, tok, rdtgrp); - if (ret) - goto out; - } - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - - /* - * Writes to mba_sc resources update the software controller, - * not the control MSR. - */ - if (is_mba_sc(r)) - continue; - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret) - goto out; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * If pseudo-locking fails we keep the resource group in - * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service - * active and updated for just the domain the pseudo-locked - * region was requested for. - */ - ret = rdtgroup_pseudo_lock_create(rdtgrp); - } - -out: - rdt_staged_configs_clear(); - rdtgroup_kn_unlock(of->kn); - cpus_read_unlock(); - return ret ?: nbytes; -} - u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - u32 idx = get_config_index(closid, type); + u32 idx = resctrl_get_config_index(closid, type); return hw_dom->ctrl_val[idx]; } - -static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) -{ - struct rdt_resource *r = schema->res; - struct rdt_domain *dom; - bool sep = false; - u32 ctrl_val; - - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - if (is_mba_sc(r)) - ctrl_val = dom->mbps_val[closid]; - else - ctrl_val = resctrl_arch_get_config(r, dom, closid, - schema->conf_type); - - seq_printf(s, r->format_str, dom->id, max_data_width, - ctrl_val); - sep = true; - } - seq_puts(s, "\n"); -} - -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - struct rdtgroup *rdtgrp; - int ret = 0; - u32 closid; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - list_for_each_entry(schema, &resctrl_schema_all, list) { - seq_printf(s, "%s:uninitialized\n", schema->name); - } - } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%s:%d=%x\n", - rdtgrp->plr->s->res->name, - rdtgrp->plr->d->id, - rdtgrp->plr->cbm); - } - } else { - closid = rdtgrp->closid; - list_for_each_entry(schema, &resctrl_schema_all, list) { - if (closid < schema->num_closid) - show_doms(s, schema, closid); - } - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); - return ret; -} - -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first) -{ - /* - * setup the parameters to send to the IPI to read the data. - */ - rr->rgrp = rdtgrp; - rr->evtid = evtid; - rr->r = r; - rr->d = d; - rr->val = 0; - rr->first = first; - - smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); -} - -int rdtgroup_mondata_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - u32 resid, evtid, domid; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - union mon_data_bits md; - struct rdt_domain *d; - struct rmid_read rr; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto out; - } - - md.priv = of->kn->priv; - resid = md.u.rid; - domid = md.u.domid; - evtid = md.u.evtid; - - r = &rdt_resources_all[resid].r_resctrl; - d = rdt_find_domain(r, domid, NULL); - if (IS_ERR_OR_NULL(d)) { - ret = -ENOENT; - goto out; - } - - mon_event_read(&rr, r, d, rdtgrp, evtid, false); - - if (rr.err == -EIO) - seq_puts(m, "Error\n"); - else if (rr.err == -EINVAL) - seq_puts(m, "Unavailable\n"); - else - seq_printf(m, "%llu\n", rr.val); - -out: - rdtgroup_kn_unlock(of->kn); - return ret; -} diff --git a/arch/x86/kernel/cpu/resctrl/internal.h b/arch/x86/kernel/cpu/resctrl/internal.h index 85ceaf9a31ac20099c86647b4958e4ca0f28fa6b..bf35389926677a29355795f6511e038a7da066f4 100644 --- a/arch/x86/kernel/cpu/resctrl/internal.h +++ b/arch/x86/kernel/cpu/resctrl/internal.h @@ -7,18 +7,14 @@ #include #include #include +#include + +#include #define L3_QOS_CDP_ENABLE 0x01ULL #define L2_QOS_CDP_ENABLE 0x01ULL -#define CQM_LIMBOCHECK_INTERVAL 1000 - -#define MBM_CNTR_WIDTH_BASE 24 -#define MBM_OVERFLOW_INTERVAL 1000 -#define MAX_MBA_BW 100u -#define MBA_IS_LINEAR 0x4 -#define MAX_MBA_BW_AMD 0x800 #define MBM_CNTR_WIDTH_OFFSET_AMD 20 #define RMID_VAL_ERROR BIT_ULL(63) @@ -30,282 +26,6 @@ */ #define MBM_CNTR_WIDTH_OFFSET_MAX (62 - MBM_CNTR_WIDTH_BASE) -/* Reads to Local DRAM Memory */ -#define READS_TO_LOCAL_MEM BIT(0) - -/* Reads to Remote DRAM Memory */ -#define READS_TO_REMOTE_MEM BIT(1) - -/* Non-Temporal Writes to Local Memory */ -#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) - -/* Non-Temporal Writes to Remote Memory */ -#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) - -/* Reads to Local Memory the system identifies as "Slow Memory" */ -#define READS_TO_LOCAL_S_MEM BIT(4) - -/* Reads to Remote Memory the system identifies as "Slow Memory" */ -#define READS_TO_REMOTE_S_MEM BIT(5) - -/* Dirty Victims to All Types of Memory */ -#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) - -/* Max event bits supported */ -#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) - -struct rdt_fs_context { - struct kernfs_fs_context kfc; - bool enable_cdpl2; - bool enable_cdpl3; - bool enable_mba_mbps; -}; - -static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) -{ - struct kernfs_fs_context *kfc = fc->fs_private; - - return container_of(kfc, struct rdt_fs_context, kfc); -} - -DECLARE_STATIC_KEY_FALSE(rdt_enable_key); -DECLARE_STATIC_KEY_FALSE(rdt_mon_enable_key); - -/** - * struct mon_evt - Entry in the event list of a resource - * @evtid: event id - * @name: name of the event - * @configurable: true if the event is configurable - * @list: entry in &rdt_resource->evt_list - */ -struct mon_evt { - enum resctrl_event_id evtid; - char *name; - bool configurable; - struct list_head list; -}; - -/** - * union mon_data_bits - Monitoring details for each event file - * @priv: Used to store monitoring event data in @u - * as kernfs private data - * @rid: Resource id associated with the event file - * @evtid: Event id associated with the event file - * @domid: The domain to which the event file belongs - * @u: Name of the bit fields struct - */ -union mon_data_bits { - void *priv; - struct { - unsigned int rid : 10; - enum resctrl_event_id evtid : 8; - unsigned int domid : 14; - } u; -}; - -struct rmid_read { - struct rdtgroup *rgrp; - struct rdt_resource *r; - struct rdt_domain *d; - enum resctrl_event_id evtid; - bool first; - int err; - u64 val; -}; - -extern bool rdt_alloc_capable; -extern bool rdt_mon_capable; -extern unsigned int rdt_mon_features; -extern struct list_head resctrl_schema_all; - -enum rdt_group_type { - RDTCTRL_GROUP = 0, - RDTMON_GROUP, - RDT_NUM_GROUP, -}; - -/** - * enum rdtgrp_mode - Mode of a RDT resource group - * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations - * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed - * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking - * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations - * allowed AND the allocations are Cache Pseudo-Locked - * @RDT_NUM_MODES: Total number of modes - * - * The mode of a resource group enables control over the allowed overlap - * between allocations associated with different resource groups (classes - * of service). User is able to modify the mode of a resource group by - * writing to the "mode" resctrl file associated with the resource group. - * - * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by - * writing the appropriate text to the "mode" file. A resource group enters - * "pseudo-locked" mode after the schemata is written while the resource - * group is in "pseudo-locksetup" mode. - */ -enum rdtgrp_mode { - RDT_MODE_SHAREABLE = 0, - RDT_MODE_EXCLUSIVE, - RDT_MODE_PSEUDO_LOCKSETUP, - RDT_MODE_PSEUDO_LOCKED, - - /* Must be last */ - RDT_NUM_MODES, -}; - -/** - * struct mongroup - store mon group's data in resctrl fs. - * @mon_data_kn: kernfs node for the mon_data directory - * @parent: parent rdtgrp - * @crdtgrp_list: child rdtgroup node list - * @rmid: rmid for this rdtgroup - */ -struct mongroup { - struct kernfs_node *mon_data_kn; - struct rdtgroup *parent; - struct list_head crdtgrp_list; - u32 rmid; -}; - -/** - * struct pseudo_lock_region - pseudo-lock region information - * @s: Resctrl schema for the resource to which this - * pseudo-locked region belongs - * @d: RDT domain to which this pseudo-locked region - * belongs - * @cbm: bitmask of the pseudo-locked region - * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread - * completion - * @thread_done: variable used by waitqueue to test if pseudo-locking - * thread completed - * @cpu: core associated with the cache on which the setup code - * will be run - * @line_size: size of the cache lines - * @size: size of pseudo-locked region in bytes - * @kmem: the kernel memory associated with pseudo-locked region - * @minor: minor number of character device associated with this - * region - * @debugfs_dir: pointer to this region's directory in the debugfs - * filesystem - * @pm_reqs: Power management QoS requests related to this region - */ -struct pseudo_lock_region { - struct resctrl_schema *s; - struct rdt_domain *d; - u32 cbm; - wait_queue_head_t lock_thread_wq; - int thread_done; - int cpu; - unsigned int line_size; - unsigned int size; - void *kmem; - unsigned int minor; - struct dentry *debugfs_dir; - struct list_head pm_reqs; -}; - -/** - * struct rdtgroup - store rdtgroup's data in resctrl file system. - * @kn: kernfs node - * @rdtgroup_list: linked list for all rdtgroups - * @closid: closid for this rdtgroup - * @cpu_mask: CPUs assigned to this rdtgroup - * @flags: status bits - * @waitcount: how many cpus expect to find this - * group when they acquire rdtgroup_mutex - * @type: indicates type of this rdtgroup - either - * monitor only or ctrl_mon group - * @mon: mongroup related data - * @mode: mode of resource group - * @plr: pseudo-locked region - */ -struct rdtgroup { - struct kernfs_node *kn; - struct list_head rdtgroup_list; - u32 closid; - struct cpumask cpu_mask; - int flags; - atomic_t waitcount; - enum rdt_group_type type; - struct mongroup mon; - enum rdtgrp_mode mode; - struct pseudo_lock_region *plr; -}; - -/* rdtgroup.flags */ -#define RDT_DELETED 1 - -/* rftype.flags */ -#define RFTYPE_FLAGS_CPUS_LIST 1 - -/* - * Define the file type flags for base and info directories. - */ -#define RFTYPE_INFO BIT(0) -#define RFTYPE_BASE BIT(1) -#define RF_CTRLSHIFT 4 -#define RF_MONSHIFT 5 -#define RF_TOPSHIFT 6 -#define RFTYPE_CTRL BIT(RF_CTRLSHIFT) -#define RFTYPE_MON BIT(RF_MONSHIFT) -#define RFTYPE_TOP BIT(RF_TOPSHIFT) -#define RFTYPE_RES_CACHE BIT(8) -#define RFTYPE_RES_MB BIT(9) -#define RF_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) -#define RF_MON_INFO (RFTYPE_INFO | RFTYPE_MON) -#define RF_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) -#define RF_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) - -/* List of all resource groups */ -extern struct list_head rdt_all_groups; - -extern int max_name_width, max_data_width; - -int __init rdtgroup_init(void); -void __exit rdtgroup_exit(void); - -/** - * struct rftype - describe each file in the resctrl file system - * @name: File name - * @mode: Access mode - * @kf_ops: File operations - * @flags: File specific RFTYPE_FLAGS_* flags - * @fflags: File specific RF_* or RFTYPE_* flags - * @seq_show: Show content of the file - * @write: Write to the file - */ -struct rftype { - char *name; - umode_t mode; - const struct kernfs_ops *kf_ops; - unsigned long flags; - unsigned long fflags; - - int (*seq_show)(struct kernfs_open_file *of, - struct seq_file *sf, void *v); - /* - * write() is the generic write callback which maps directly to - * kernfs write operation and overrides all other operations. - * Maximum write size is determined by ->max_write_len. - */ - ssize_t (*write)(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -}; - -/** - * struct mbm_state - status for each MBM counter in each domain - * @prev_bw_bytes: Previous bytes value read for bandwidth calculation - * @prev_bw: The most recent bandwidth in MBps - * @delta_bw: Difference between the current and previous bandwidth - * @delta_comp: Indicates whether to compute the delta_bw - */ -struct mbm_state { - u64 prev_bw_bytes; - u32 prev_bw; - u32 delta_bw; - bool delta_comp; -}; - /** * struct arch_mbm_state - values used to compute resctrl_arch_rmid_read()s * return value. @@ -352,37 +72,6 @@ struct msr_param { u32 high; }; -static inline bool is_llc_occupancy_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_OCCUP_EVENT_ID)); -} - -static inline bool is_mbm_total_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_TOTAL_EVENT_ID)); -} - -static inline bool is_mbm_local_enabled(void) -{ - return (rdt_mon_features & (1 << QOS_L3_MBM_LOCAL_EVENT_ID)); -} - -static inline bool is_mbm_enabled(void) -{ - return (is_mbm_total_enabled() || is_mbm_local_enabled()); -} - -static inline bool is_mbm_event(int e) -{ - return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && - e <= QOS_L3_MBM_LOCAL_EVENT_ID); -} - -struct rdt_parse_data { - struct rdtgroup *rdtgrp; - char *buf; -}; - /** * struct rdt_hw_resource - arch private attributes of a resctrl resource * @r_resctrl: Attributes of the resource used directly by resctrl. @@ -417,28 +106,7 @@ static inline struct rdt_hw_resource *resctrl_to_arch_res(struct rdt_resource *r return container_of(r, struct rdt_hw_resource, r_resctrl); } -int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); -int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, - struct rdt_domain *d); - -extern struct mutex rdtgroup_mutex; - extern struct rdt_hw_resource rdt_resources_all[]; -extern struct rdtgroup rdtgroup_default; -DECLARE_STATIC_KEY_FALSE(rdt_alloc_enable_key); - -extern struct dentry *debugfs_resctrl; - -enum resctrl_res_level { - RDT_RESOURCE_L3, - RDT_RESOURCE_L2, - RDT_RESOURCE_MBA, - RDT_RESOURCE_SMBA, - - /* Must be the last */ - RDT_NUM_RESOURCES, -}; static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) { @@ -448,13 +116,6 @@ static inline struct rdt_resource *resctrl_inc(struct rdt_resource *res) return &hw_res->r_resctrl; } -static inline bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level l) -{ - return rdt_resources_all[l].cdp_enabled; -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable); - /* * To return the common struct rdt_resource, which is contained in struct * rdt_hw_resource, walk the resctrl member of struct rdt_hw_resource. @@ -492,6 +153,15 @@ union cpuid_0x10_3_eax { unsigned int full; }; +/* CPUID.(EAX=10H, ECX=ResID).ECX */ +union cpuid_0x10_x_ecx { + struct { + unsigned int reserved:3; + unsigned int noncont:1; + } split; + unsigned int full; +}; + /* CPUID.(EAX=10H, ECX=ResID).EDX */ union cpuid_0x10_x_edx { struct { @@ -500,61 +170,10 @@ union cpuid_0x10_x_edx { unsigned int full; }; -void rdt_last_cmd_clear(void); -void rdt_last_cmd_puts(const char *s); -__printf(1, 2) -void rdt_last_cmd_printf(const char *fmt, ...); - void rdt_ctrl_update(void *arg); -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); -void rdtgroup_kn_unlock(struct kernfs_node *kn); -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask); -struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id, - struct list_head **pos); -ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off); -int rdtgroup_schemata_show(struct kernfs_open_file *of, - struct seq_file *s, void *v); -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive); -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm); -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); -int rdtgroup_tasks_assigned(struct rdtgroup *r); -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); -int rdt_pseudo_lock_init(void); -void rdt_pseudo_lock_release(void); -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); -struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r); -int closids_supported(void); -void closid_free(int closid); -int alloc_rmid(void); -void free_rmid(u32 rmid); int rdt_get_mon_l3_config(struct rdt_resource *r); -bool __init rdt_cpu_has(int flag); -void mon_event_count(void *info); -int rdtgroup_mondata_show(struct seq_file *m, void *arg); -void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, - struct rdt_domain *d, struct rdtgroup *rdtgrp, - int evtid, int first); -void mbm_setup_overflow_handler(struct rdt_domain *dom, - unsigned long delay_ms); -void mbm_handle_overflow(struct work_struct *work); +bool rdt_cpu_has(int flag); void __init intel_rdt_mbm_apply_quirk(void); -bool is_mba_sc(struct rdt_resource *r); -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms); -void cqm_handle_limbo(struct work_struct *work); -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d); -void __check_limbo(struct rdt_domain *d, bool force_free); void rdt_domain_reconfigure_cdp(struct rdt_resource *r); -void __init thread_throttle_mode_init(void); -void __init mbm_config_rftype_init(const char *config); -void rdt_staged_configs_clear(void); #endif /* _ASM_X86_RESCTRL_INTERNAL_H */ diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c index f136ac046851c87339386968e56301d70069e20c..e0cc1b4992798dbbb5f89b46e287d32d22fe7233 100644 --- a/arch/x86/kernel/cpu/resctrl/monitor.c +++ b/arch/x86/kernel/cpu/resctrl/monitor.c @@ -15,6 +15,7 @@ * Software Developer Manual June 2016, volume 3, section 17.17. */ +#include #include #include #include @@ -24,33 +25,6 @@ #include "internal.h" -struct rmid_entry { - u32 rmid; - int busy; - struct list_head list; -}; - -/* - * @rmid_free_lru - A least recently used list of free RMIDs - * These RMIDs are guaranteed to have an occupancy less than the - * threshold occupancy - */ -static LIST_HEAD(rmid_free_lru); - -/* - * @rmid_limbo_count - count of currently unused but (potentially) - * dirty RMIDs. - * This counts RMIDs that no one is currently using but that - * may have a occupancy value > resctrl_rmid_realloc_threshold. User can - * change the threshold occupancy value. - */ -static unsigned int rmid_limbo_count; - -/* - * @rmid_entry - The entry in the limbo and free lists. - */ -static struct rmid_entry *rmid_ptrs; - /* * Global boolean for rdt_monitor which is true if any * resource monitoring is enabled. @@ -62,17 +36,6 @@ bool rdt_mon_capable; */ unsigned int rdt_mon_features; -/* - * This is the threshold cache occupancy in bytes at which we will consider an - * RMID available for re-allocation. - */ -unsigned int resctrl_rmid_realloc_threshold; - -/* - * This is the maximum value for the reallocation threshold, in bytes. - */ -unsigned int resctrl_rmid_realloc_limit; - #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5)) /* @@ -136,16 +99,6 @@ static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val) return val; } -static inline struct rmid_entry *__rmid_entry(u32 rmid) -{ - struct rmid_entry *entry; - - entry = &rmid_ptrs[rmid]; - WARN_ON(entry->rmid != rmid); - - return entry; -} - static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val) { u64 msr_val; @@ -181,6 +134,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, return &hw_dom->arch_mbm_total[rmid]; case QOS_L3_MBM_LOCAL_EVENT_ID: return &hw_dom->arch_mbm_local[rmid]; + default: + break; } /* Never expect to get here */ @@ -190,7 +145,8 @@ static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom, } void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid) + u32 unused, u32 rmid, + enum resctrl_event_id eventid) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); struct arch_mbm_state *am; @@ -212,11 +168,11 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) { struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); - if (is_mbm_total_enabled()) + if (resctrl_arch_is_mbm_total_enabled()) memset(hw_dom->arch_mbm_total, 0, sizeof(*hw_dom->arch_mbm_total) * r->num_rmid); - if (is_mbm_local_enabled()) + if (resctrl_arch_is_mbm_local_enabled()) memset(hw_dom->arch_mbm_local, 0, sizeof(*hw_dom->arch_mbm_local) * r->num_rmid); } @@ -230,7 +186,8 @@ static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width) } int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val) + u32 unused, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *ignored) { struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d); @@ -238,6 +195,8 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, u64 msr_val, chunks; int ret; + resctrl_arch_rmid_read_context_check(); + if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask)) return -EINVAL; @@ -260,527 +219,11 @@ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, return 0; } -/* - * Check the RMIDs that are marked as busy for this domain. If the - * reported LLC occupancy is below the threshold clear the busy bit and - * decrement the count. If the busy count gets to zero on an RMID, we - * free the RMID - */ -void __check_limbo(struct rdt_domain *d, bool force_free) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - struct rmid_entry *entry; - u32 crmid = 1, nrmid; - bool rmid_dirty; - u64 val = 0; - - /* - * Skip RMID 0 and start from RMID 1 and check all the RMIDs that - * are marked as busy for occupancy < threshold. If the occupancy - * is less than the threshold decrement the busy counter of the - * RMID and move it to the free list when the counter reaches 0. - */ - for (;;) { - nrmid = find_next_bit(d->rmid_busy_llc, r->num_rmid, crmid); - if (nrmid >= r->num_rmid) - break; - - entry = __rmid_entry(nrmid); - - if (resctrl_arch_rmid_read(r, d, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, &val)) { - rmid_dirty = true; - } else { - rmid_dirty = (val >= resctrl_rmid_realloc_threshold); - } - - if (force_free || !rmid_dirty) { - clear_bit(entry->rmid, d->rmid_busy_llc); - if (!--entry->busy) { - rmid_limbo_count--; - list_add_tail(&entry->list, &rmid_free_lru); - } - } - crmid = nrmid + 1; - } -} - -bool has_busy_rmid(struct rdt_resource *r, struct rdt_domain *d) -{ - return find_first_bit(d->rmid_busy_llc, r->num_rmid) != r->num_rmid; -} - -/* - * As of now the RMIDs allocation is global. - * However we keep track of which packages the RMIDs - * are used to optimize the limbo list management. - */ -int alloc_rmid(void) -{ - struct rmid_entry *entry; - - lockdep_assert_held(&rdtgroup_mutex); - - if (list_empty(&rmid_free_lru)) - return rmid_limbo_count ? -EBUSY : -ENOSPC; - - entry = list_first_entry(&rmid_free_lru, - struct rmid_entry, list); - list_del(&entry->list); - - return entry->rmid; -} - -static void add_rmid_to_limbo(struct rmid_entry *entry) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - struct rdt_domain *d; - int cpu, err; - u64 val = 0; - - entry->busy = 0; - cpu = get_cpu(); - list_for_each_entry(d, &r->domains, list) { - if (cpumask_test_cpu(cpu, &d->cpu_mask)) { - err = resctrl_arch_rmid_read(r, d, entry->rmid, - QOS_L3_OCCUP_EVENT_ID, - &val); - if (err || val <= resctrl_rmid_realloc_threshold) - continue; - } - - /* - * For the first limbo RMID in the domain, - * setup up the limbo worker. - */ - if (!has_busy_rmid(r, d)) - cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL); - set_bit(entry->rmid, d->rmid_busy_llc); - entry->busy++; - } - put_cpu(); - - if (entry->busy) - rmid_limbo_count++; - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -void free_rmid(u32 rmid) -{ - struct rmid_entry *entry; - - if (!rmid) - return; - - lockdep_assert_held(&rdtgroup_mutex); - - entry = __rmid_entry(rmid); - - if (is_llc_occupancy_enabled()) - add_rmid_to_limbo(entry); - else - list_add_tail(&entry->list, &rmid_free_lru); -} - -static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 rmid, - enum resctrl_event_id evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return &d->mbm_total[rmid]; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return &d->mbm_local[rmid]; - default: - return NULL; - } -} - -static int __mon_event_count(u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m; - u64 tval = 0; - - if (rr->first) { - resctrl_arch_reset_rmid(rr->r, rr->d, rmid, rr->evtid); - m = get_mbm_state(rr->d, rmid, rr->evtid); - if (m) - memset(m, 0, sizeof(struct mbm_state)); - return 0; - } - - rr->err = resctrl_arch_rmid_read(rr->r, rr->d, rmid, rr->evtid, &tval); - if (rr->err) - return rr->err; - - rr->val += tval; - - return 0; -} - -/* - * mbm_bw_count() - Update bw count from values previously read by - * __mon_event_count(). - * @rmid: The rmid used to identify the cached mbm_state. - * @rr: The struct rmid_read populated by __mon_event_count(). - * - * Supporting function to calculate the memory bandwidth - * and delta bandwidth in MBps. The chunks value previously read by - * __mon_event_count() is compared with the chunks value from the previous - * invocation. This must be called once per second to maintain values in MBps. - */ -static void mbm_bw_count(u32 rmid, struct rmid_read *rr) -{ - struct mbm_state *m = &rr->d->mbm_local[rmid]; - u64 cur_bw, bytes, cur_bytes; - - cur_bytes = rr->val; - bytes = cur_bytes - m->prev_bw_bytes; - m->prev_bw_bytes = cur_bytes; - - cur_bw = bytes / SZ_1M; - - if (m->delta_comp) - m->delta_bw = abs(cur_bw - m->prev_bw); - m->delta_comp = false; - m->prev_bw = cur_bw; -} - -/* - * This is called via IPI to read the CQM/MBM counters - * on a domain. - */ -void mon_event_count(void *info) -{ - struct rdtgroup *rdtgrp, *entry; - struct rmid_read *rr = info; - struct list_head *head; - int ret; - - rdtgrp = rr->rgrp; - - ret = __mon_event_count(rdtgrp->mon.rmid, rr); - - /* - * For Ctrl groups read data from child monitor groups and - * add them together. Count events which are read successfully. - * Discard the rmid_read's reporting errors. - */ - head = &rdtgrp->mon.crdtgrp_list; - - if (rdtgrp->type == RDTCTRL_GROUP) { - list_for_each_entry(entry, head, mon.crdtgrp_list) { - if (__mon_event_count(entry->mon.rmid, rr) == 0) - ret = 0; - } - } - - /* - * __mon_event_count() calls for newly created monitor groups may - * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. - * Discard error if any of the monitor event reads succeeded. - */ - if (ret == 0) - rr->err = 0; -} - -/* - * Feedback loop for MBA software controller (mba_sc) - * - * mba_sc is a feedback loop where we periodically read MBM counters and - * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so - * that: - * - * current bandwidth(cur_bw) < user specified bandwidth(user_bw) - * - * This uses the MBM counters to measure the bandwidth and MBA throttle - * MSRs to control the bandwidth for a particular rdtgrp. It builds on the - * fact that resctrl rdtgroups have both monitoring and control. - * - * The frequency of the checks is 1s and we just tag along the MBM overflow - * timer. Having 1s interval makes the calculation of bandwidth simpler. - * - * Although MBA's goal is to restrict the bandwidth to a maximum, there may - * be a need to increase the bandwidth to avoid unnecessarily restricting - * the L2 <-> L3 traffic. - * - * Since MBA controls the L2 external bandwidth where as MBM measures the - * L3 external bandwidth the following sequence could lead to such a - * situation. - * - * Consider an rdtgroup which had high L3 <-> memory traffic in initial - * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but - * after some time rdtgroup has mostly L2 <-> L3 traffic. - * - * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its - * throttle MSRs already have low percentage values. To avoid - * unnecessarily restricting such rdtgroups, we also increase the bandwidth. - */ -static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) -{ - u32 closid, rmid, cur_msr_val, new_msr_val; - struct mbm_state *pmbm_data, *cmbm_data; - u32 cur_bw, delta_bw, user_bw; - struct rdt_resource *r_mba; - struct rdt_domain *dom_mba; - struct list_head *head; - struct rdtgroup *entry; - - if (!is_mbm_local_enabled()) - return; - - r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - - closid = rgrp->closid; - rmid = rgrp->mon.rmid; - pmbm_data = &dom_mbm->mbm_local[rmid]; - - dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba); - if (!dom_mba) { - pr_warn_once("Failure to get domain for MBA update\n"); - return; - } - - cur_bw = pmbm_data->prev_bw; - user_bw = dom_mba->mbps_val[closid]; - delta_bw = pmbm_data->delta_bw; - - /* MBA resource doesn't support CDP */ - cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); - - /* - * For Ctrl groups read data from child monitor groups. - */ - head = &rgrp->mon.crdtgrp_list; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cur_bw += cmbm_data->prev_bw; - delta_bw += cmbm_data->delta_bw; - } - - /* - * Scale up/down the bandwidth linearly for the ctrl group. The - * bandwidth step is the bandwidth granularity specified by the - * hardware. - * - * The delta_bw is used when increasing the bandwidth so that we - * dont alternately increase and decrease the control values - * continuously. - * - * For ex: consider cur_bw = 90MBps, user_bw = 100MBps and if - * bandwidth step is 20MBps(> user_bw - cur_bw), we would keep - * switching between 90 and 110 continuously if we only check - * cur_bw < user_bw. - */ - if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { - new_msr_val = cur_msr_val - r_mba->membw.bw_gran; - } else if (cur_msr_val < MAX_MBA_BW && - (user_bw > (cur_bw + delta_bw))) { - new_msr_val = cur_msr_val + r_mba->membw.bw_gran; - } else { - return; - } - - resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); - - /* - * Delta values are updated dynamically package wise for each - * rdtgrp every time the throttle MSR changes value. - * - * This is because (1)the increase in bandwidth is not perfectly - * linear and only "approximately" linear even when the hardware - * says it is linear.(2)Also since MBA is a core specific - * mechanism, the delta values vary based on number of cores used - * by the rdtgrp. - */ - pmbm_data->delta_comp = true; - list_for_each_entry(entry, head, mon.crdtgrp_list) { - cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; - cmbm_data->delta_comp = true; - } -} - -static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, int rmid) -{ - struct rmid_read rr; - - rr.first = false; - rr.r = r; - rr.d = d; - - /* - * This is protected from concurrent reads from user - * as both the user and we hold the global mutex. - */ - if (is_mbm_total_enabled()) { - rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; - rr.val = 0; - __mon_event_count(rmid, &rr); - } - if (is_mbm_local_enabled()) { - rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; - rr.val = 0; - __mon_event_count(rmid, &rr); - - /* - * Call the MBA software controller only for the - * control groups and when user has enabled - * the software controller explicitly. - */ - if (is_mba_sc(NULL)) - mbm_bw_count(rmid, &rr); - } -} - -/* - * Handler to scan the limbo list and move the RMIDs - * to free list whose occupancy < threshold_occupancy. - */ -void cqm_handle_limbo(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); - int cpu = smp_processor_id(); - struct rdt_resource *r; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - d = container_of(work, struct rdt_domain, cqm_limbo.work); - - __check_limbo(d, false); - - if (has_busy_rmid(r, d)) - schedule_delayed_work_on(cpu, &d->cqm_limbo, delay); - - mutex_unlock(&rdtgroup_mutex); -} - -void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - cpu = cpumask_any(&dom->cpu_mask); - dom->cqm_work_cpu = cpu; - - schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); -} - -void mbm_handle_overflow(struct work_struct *work) -{ - unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); - struct rdtgroup *prgrp, *crgrp; - int cpu = smp_processor_id(); - struct list_head *head; - struct rdt_resource *r; - struct rdt_domain *d; - - mutex_lock(&rdtgroup_mutex); - - if (!static_branch_likely(&rdt_mon_enable_key)) - goto out_unlock; - - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - d = container_of(work, struct rdt_domain, mbm_over.work); - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - mbm_update(r, d, prgrp->mon.rmid); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) - mbm_update(r, d, crgrp->mon.rmid); - - if (is_mba_sc(NULL)) - update_mba_bw(prgrp, d); - } - - schedule_delayed_work_on(cpu, &d->mbm_over, delay); - -out_unlock: - mutex_unlock(&rdtgroup_mutex); -} - -void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms) -{ - unsigned long delay = msecs_to_jiffies(delay_ms); - int cpu; - - if (!static_branch_likely(&rdt_mon_enable_key)) - return; - cpu = cpumask_any(&dom->cpu_mask); - dom->mbm_work_cpu = cpu; - schedule_delayed_work_on(cpu, &dom->mbm_over, delay); -} - -static int dom_data_init(struct rdt_resource *r) -{ - struct rmid_entry *entry = NULL; - int i, nr_rmids; - - nr_rmids = r->num_rmid; - rmid_ptrs = kcalloc(nr_rmids, sizeof(struct rmid_entry), GFP_KERNEL); - if (!rmid_ptrs) - return -ENOMEM; - - for (i = 0; i < nr_rmids; i++) { - entry = &rmid_ptrs[i]; - INIT_LIST_HEAD(&entry->list); - - entry->rmid = i; - list_add_tail(&entry->list, &rmid_free_lru); - } - - /* - * RMID 0 is special and is always allocated. It's used for all - * tasks that are not monitored. - */ - entry = __rmid_entry(0); - list_del(&entry->list); - - return 0; -} - -static struct mon_evt llc_occupancy_event = { - .name = "llc_occupancy", - .evtid = QOS_L3_OCCUP_EVENT_ID, -}; - -static struct mon_evt mbm_total_event = { - .name = "mbm_total_bytes", - .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, -}; - -static struct mon_evt mbm_local_event = { - .name = "mbm_local_bytes", - .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, -}; - -/* - * Initialize the event list for the resource. - * - * Note that MBM events are also part of RDT_RESOURCE_L3 resource - * because as per the SDM the total and local memory bandwidth - * are enumerated as part of L3 monitoring. - */ -static void l3_mon_evt_init(struct rdt_resource *r) -{ - INIT_LIST_HEAD(&r->evt_list); - - if (is_llc_occupancy_enabled()) - list_add_tail(&llc_occupancy_event.list, &r->evt_list); - if (is_mbm_total_enabled()) - list_add_tail(&mbm_total_event.list, &r->evt_list); - if (is_mbm_local_enabled()) - list_add_tail(&mbm_local_event.list, &r->evt_list); -} - int __init rdt_get_mon_l3_config(struct rdt_resource *r) { unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset; struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); unsigned int threshold; - int ret; resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024; hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale; @@ -808,22 +251,13 @@ int __init rdt_get_mon_l3_config(struct rdt_resource *r) */ resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold); - ret = dom_data_init(r); - if (ret) - return ret; - if (rdt_cpu_has(X86_FEATURE_BMEC)) { - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) { - mbm_total_event.configurable = true; - mbm_config_rftype_init("mbm_total_bytes_config"); - } - if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) { - mbm_local_event.configurable = true; - mbm_config_rftype_init("mbm_local_bytes_config"); - } - } + u32 eax, ebx, ecx, edx; - l3_mon_evt_init(r); + /* Detect list of bandwidth sources that can be tracked */ + cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx); + r->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS; + } r->mon_capable = true; diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 8f559eeae08ed5845c291cd4142d748a3e2cca5c..ba1596afee107f65f784ee004a86d0faabf68eb4 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -39,30 +39,9 @@ */ static u64 prefetch_disable_bits; -/* - * Major number assigned to and shared by all devices exposing - * pseudo-locked regions. - */ -static unsigned int pseudo_lock_major; -static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); - -static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) -{ - const struct rdtgroup *rdtgrp; - - rdtgrp = dev_get_drvdata(dev); - if (mode) - *mode = 0600; - return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); -} - -static const struct class pseudo_lock_class = { - .name = "pseudo_lock", - .devnode = pseudo_lock_devnode, -}; - /** - * get_prefetch_disable_bits - prefetch disable bits of supported platforms + * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported + * platforms * @void: It takes no parameters. * * Capture the list of platforms that have been validated to support @@ -76,14 +55,16 @@ static const struct class pseudo_lock_class = { * in the SDM. * * When adding a platform here also add support for its cache events to - * measure_cycles_perf_fn() + * resctrl_arch_measure_l*_residency() * * Return: * If platform is supported, the bits to disable hardware prefetchers, 0 * if platform is not supported. */ -static u64 get_prefetch_disable_bits(void) +u64 resctrl_arch_get_prefetch_disable_bits(void) { + prefetch_disable_bits = 0; + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL || boot_cpu_data.x86 != 6) return 0; @@ -99,7 +80,8 @@ static u64 get_prefetch_disable_bits(void) * 3 DCU IP Prefetcher Disable (R/W) * 63:4 Reserved */ - return 0xF; + prefetch_disable_bits = 0xF; + break; case INTEL_FAM6_ATOM_GOLDMONT: case INTEL_FAM6_ATOM_GOLDMONT_PLUS: /* @@ -110,308 +92,16 @@ static u64 get_prefetch_disable_bits(void) * 2 DCU Hardware Prefetcher Disable (R/W) * 63:3 Reserved */ - return 0x5; - } - - return 0; -} - -/** - * pseudo_lock_minor_get - Obtain available minor number - * @minor: Pointer to where new minor number will be stored - * - * A bitmask is used to track available minor numbers. Here the next free - * minor number is marked as unavailable and returned. - * - * Return: 0 on success, <0 on failure. - */ -static int pseudo_lock_minor_get(unsigned int *minor) -{ - unsigned long first_bit; - - first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); - - if (first_bit == MINORBITS) - return -ENOSPC; - - __clear_bit(first_bit, &pseudo_lock_minor_avail); - *minor = first_bit; - - return 0; -} - -/** - * pseudo_lock_minor_release - Return minor number to available - * @minor: The minor number made available - */ -static void pseudo_lock_minor_release(unsigned int minor) -{ - __set_bit(minor, &pseudo_lock_minor_avail); -} - -/** - * region_find_by_minor - Locate a pseudo-lock region by inode minor number - * @minor: The minor number of the device representing pseudo-locked region - * - * When the character device is accessed we need to determine which - * pseudo-locked region it belongs to. This is done by matching the minor - * number of the device to the pseudo-locked region it belongs. - * - * Minor numbers are assigned at the time a pseudo-locked region is associated - * with a cache instance. - * - * Return: On success return pointer to resource group owning the pseudo-locked - * region, NULL on failure. - */ -static struct rdtgroup *region_find_by_minor(unsigned int minor) -{ - struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->plr && rdtgrp->plr->minor == minor) { - rdtgrp_match = rdtgrp; - break; - } - } - return rdtgrp_match; -} - -/** - * struct pseudo_lock_pm_req - A power management QoS request list entry - * @list: Entry within the @pm_reqs list for a pseudo-locked region - * @req: PM QoS request - */ -struct pseudo_lock_pm_req { - struct list_head list; - struct dev_pm_qos_request req; -}; - -static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req, *next; - - list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { - dev_pm_qos_remove_request(&pm_req->req); - list_del(&pm_req->list); - kfree(pm_req); - } -} - -/** - * pseudo_lock_cstates_constrain - Restrict cores from entering C6 - * @plr: Pseudo-locked region - * - * To prevent the cache from being affected by power management entering - * C6 has to be avoided. This is accomplished by requesting a latency - * requirement lower than lowest C6 exit latency of all supported - * platforms as found in the cpuidle state tables in the intel_idle driver. - * At this time it is possible to do so with a single latency requirement - * for all supported platforms. - * - * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, - * the ACPI latencies need to be considered while keeping in mind that C2 - * may be set to map to deeper sleep states. In this case the latency - * requirement needs to prevent entering C2 also. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) -{ - struct pseudo_lock_pm_req *pm_req; - int cpu; - int ret; - - for_each_cpu(cpu, &plr->d->cpu_mask) { - pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); - if (!pm_req) { - rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); - ret = -ENOMEM; - goto out_err; - } - ret = dev_pm_qos_add_request(get_cpu_device(cpu), - &pm_req->req, - DEV_PM_QOS_RESUME_LATENCY, - 30); - if (ret < 0) { - rdt_last_cmd_printf("Failed to add latency req CPU%d\n", - cpu); - kfree(pm_req); - ret = -1; - goto out_err; - } - list_add(&pm_req->list, &plr->pm_reqs); - } - - return 0; - -out_err: - pseudo_lock_cstates_relax(plr); - return ret; -} - -/** - * pseudo_lock_region_clear - Reset pseudo-lock region data - * @plr: pseudo-lock region - * - * All content of the pseudo-locked region is reset - any memory allocated - * freed. - * - * Return: void - */ -static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) -{ - plr->size = 0; - plr->line_size = 0; - kfree(plr->kmem); - plr->kmem = NULL; - plr->s = NULL; - if (plr->d) - plr->d->plr = NULL; - plr->d = NULL; - plr->cbm = 0; - plr->debugfs_dir = NULL; -} - -/** - * pseudo_lock_region_init - Initialize pseudo-lock region information - * @plr: pseudo-lock region - * - * Called after user provided a schemata to be pseudo-locked. From the - * schemata the &struct pseudo_lock_region is on entry already initialized - * with the resource, domain, and capacity bitmask. Here the information - * required for pseudo-locking is deduced from this data and &struct - * pseudo_lock_region initialized further. This information includes: - * - size in bytes of the region to be pseudo-locked - * - cache line size to know the stride with which data needs to be accessed - * to be pseudo-locked - * - a cpu associated with the cache instance on which the pseudo-locking - * flow can be executed - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_init(struct pseudo_lock_region *plr) -{ - struct cpu_cacheinfo *ci; - int ret; - int i; - - /* Pick the first cpu we find that is associated with the cache. */ - plr->cpu = cpumask_first(&plr->d->cpu_mask); - - if (!cpu_online(plr->cpu)) { - rdt_last_cmd_printf("CPU %u associated with cache not online\n", - plr->cpu); - ret = -ENODEV; - goto out_region; - } - - ci = get_cpu_cacheinfo(plr->cpu); - - plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); - - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == plr->s->res->cache_level) { - plr->line_size = ci->info_list[i].coherency_line_size; - return 0; - } - } - - ret = -1; - rdt_last_cmd_puts("Unable to determine cache line size\n"); -out_region: - pseudo_lock_region_clear(plr); - return ret; -} - -/** - * pseudo_lock_init - Initialize a pseudo-lock region - * @rdtgrp: resource group to which new pseudo-locked region will belong - * - * A pseudo-locked region is associated with a resource group. When this - * association is created the pseudo-locked region is initialized. The - * details of the pseudo-locked region are not known at this time so only - * allocation is done and association established. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_init(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr; - - plr = kzalloc(sizeof(*plr), GFP_KERNEL); - if (!plr) - return -ENOMEM; - - init_waitqueue_head(&plr->lock_thread_wq); - INIT_LIST_HEAD(&plr->pm_reqs); - rdtgrp->plr = plr; - return 0; -} - -/** - * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked - * @plr: pseudo-lock region - * - * Initialize the details required to set up the pseudo-locked region and - * allocate the contiguous memory that will be pseudo-locked to the cache. - * - * Return: 0 on success, <0 on failure. Descriptive error will be written - * to last_cmd_status buffer. - */ -static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) -{ - int ret; - - ret = pseudo_lock_region_init(plr); - if (ret < 0) - return ret; - - /* - * We do not yet support contiguous regions larger than - * KMALLOC_MAX_SIZE. - */ - if (plr->size > KMALLOC_MAX_SIZE) { - rdt_last_cmd_puts("Requested region exceeds maximum size\n"); - ret = -E2BIG; - goto out_region; - } - - plr->kmem = kzalloc(plr->size, GFP_KERNEL); - if (!plr->kmem) { - rdt_last_cmd_puts("Unable to allocate memory\n"); - ret = -ENOMEM; - goto out_region; + prefetch_disable_bits = 0x5; + break; } - ret = 0; - goto out; -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * pseudo_lock_free - Free a pseudo-locked region - * @rdtgrp: resource group to which pseudo-locked region belonged - * - * The pseudo-locked region's resources have already been released, or not - * yet created at this point. Now it can be freed and disassociated from the - * resource group. - * - * Return: void - */ -static void pseudo_lock_free(struct rdtgroup *rdtgrp) -{ - pseudo_lock_region_clear(rdtgrp->plr); - kfree(rdtgrp->plr); - rdtgrp->plr = NULL; + return prefetch_disable_bits; } /** - * pseudo_lock_fn - Load kernel memory into cache - * @_rdtgrp: resource group to which pseudo-lock region belongs + * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache + * @_plr: the pseudo-lock region descriptor * * This is the core pseudo-locking flow. * @@ -428,10 +118,9 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int pseudo_lock_fn(void *_rdtgrp) +int resctrl_arch_pseudo_lock_fn(void *_plr) { - struct rdtgroup *rdtgrp = _rdtgrp; - struct pseudo_lock_region *plr = rdtgrp->plr; + struct pseudo_lock_region *plr = _plr; u32 rmid_p, closid_p; unsigned long i; u64 saved_msr; @@ -491,7 +180,8 @@ static int pseudo_lock_fn(void *_rdtgrp) * pseudo-locked followed by reading of kernel memory to load it * into the cache. */ - __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, rdtgrp->closid); + __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid); + /* * Cache was flushed earlier. Now access kernel memory to read it * into cache region associated with just activated plr->closid. @@ -539,339 +229,8 @@ static int pseudo_lock_fn(void *_rdtgrp) } /** - * rdtgroup_monitor_in_progress - Test if monitoring in progress - * @rdtgrp: resource group being queried - * - * Return: 1 if monitor groups have been created for this resource - * group, 0 otherwise. - */ -static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) -{ - return !list_empty(&rdtgrp->mon.crdtgrp_list); -} - -/** - * rdtgroup_locksetup_user_restrict - Restrict user access to group - * @rdtgrp: resource group needing access restricted - * - * A resource group used for cache pseudo-locking cannot have cpus or tasks - * assigned to it. This is communicated to the user by restricting access - * to all the files that can be used to make such changes. - * - * Permissions restored with rdtgroup_locksetup_user_restore() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restriction of access an attempt will be made to restore permissions but - * the state of the mode of these files will be uncertain when a failure - * occurs. - */ -static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); - if (ret) - goto err_cpus; - - if (rdt_mon_capable) { - ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); -err_cpus: - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); -err_tasks: - rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); -out: - return ret; -} - -/** - * rdtgroup_locksetup_user_restore - Restore user access to group - * @rdtgrp: resource group needing access restored - * - * Restore all file access previously removed using - * rdtgroup_locksetup_user_restrict() - * - * Return: 0 on success, <0 on failure. If a failure occurs during the - * restoration of access an attempt will be made to restrict permissions - * again but the state of the mode of these files will be uncertain when - * a failure occurs. - */ -static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) -{ - int ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); - if (ret) - return ret; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); - if (ret) - goto err_tasks; - - ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); - if (ret) - goto err_cpus; - - if (rdt_mon_capable) { - ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); - if (ret) - goto err_cpus_list; - } - - ret = 0; - goto out; - -err_cpus_list: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); -err_cpus: - rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); -err_tasks: - rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); -out: - return ret; -} - -/** - * rdtgroup_locksetup_enter - Resource group enters locksetup mode - * @rdtgrp: resource group requested to enter locksetup mode - * - * A resource group enters locksetup mode to reflect that it would be used - * to represent a pseudo-locked region and is in the process of being set - * up to do so. A resource group used for a pseudo-locked region would - * lose the closid associated with it so we cannot allow it to have any - * tasks or cpus assigned nor permit tasks or cpus to be assigned in the - * future. Monitoring of a pseudo-locked region is not allowed either. - * - * The above and more restrictions on a pseudo-locked region are checked - * for and enforced before the resource group enters the locksetup mode. - * - * Returns: 0 if the resource group successfully entered locksetup mode, <0 - * on failure. On failure the last_cmd_status buffer is updated with text to - * communicate details of failure to the user. - */ -int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) -{ - int ret; - - /* - * The default resource group can neither be removed nor lose the - * default closid associated with it. - */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); - return -EINVAL; - } - - /* - * Cache Pseudo-locking not supported when CDP is enabled. - * - * Some things to consider if you would like to enable this - * support (using L3 CDP as example): - * - When CDP is enabled two separate resources are exposed, - * L3DATA and L3CODE, but they are actually on the same cache. - * The implication for pseudo-locking is that if a - * pseudo-locked region is created on a domain of one - * resource (eg. L3CODE), then a pseudo-locked region cannot - * be created on that same domain of the other resource - * (eg. L3DATA). This is because the creation of a - * pseudo-locked region involves a call to wbinvd that will - * affect all cache allocations on particular domain. - * - Considering the previous, it may be possible to only - * expose one of the CDP resources to pseudo-locking and - * hide the other. For example, we could consider to only - * expose L3DATA and since the L3 cache is unified it is - * still possible to place instructions there are execute it. - * - If only one region is exposed to pseudo-locking we should - * still keep in mind that availability of a portion of cache - * for pseudo-locking should take into account both resources. - * Similarly, if a pseudo-locked region is created in one - * resource, the portion of cache used by it should be made - * unavailable to all future allocations from both resources. - */ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || - resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { - rdt_last_cmd_puts("CDP enabled\n"); - return -EINVAL; - } - - /* - * Not knowing the bits to disable prefetching implies that this - * platform does not support Cache Pseudo-Locking. - */ - prefetch_disable_bits = get_prefetch_disable_bits(); - if (prefetch_disable_bits == 0) { - rdt_last_cmd_puts("Pseudo-locking not supported\n"); - return -EINVAL; - } - - if (rdtgroup_monitor_in_progress(rdtgrp)) { - rdt_last_cmd_puts("Monitoring in progress\n"); - return -EINVAL; - } - - if (rdtgroup_tasks_assigned(rdtgrp)) { - rdt_last_cmd_puts("Tasks assigned to resource group\n"); - return -EINVAL; - } - - if (!cpumask_empty(&rdtgrp->cpu_mask)) { - rdt_last_cmd_puts("CPUs assigned to resource group\n"); - return -EINVAL; - } - - if (rdtgroup_locksetup_user_restrict(rdtgrp)) { - rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); - return -EIO; - } - - ret = pseudo_lock_init(rdtgrp); - if (ret) { - rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); - goto out_release; - } - - /* - * If this system is capable of monitoring a rmid would have been - * allocated when the control group was created. This is not needed - * anymore when this group would be used for pseudo-locking. This - * is safe to call on platforms not capable of monitoring. - */ - free_rmid(rdtgrp->mon.rmid); - - ret = 0; - goto out; - -out_release: - rdtgroup_locksetup_user_restore(rdtgrp); -out: - return ret; -} - -/** - * rdtgroup_locksetup_exit - resource group exist locksetup mode - * @rdtgrp: resource group - * - * When a resource group exits locksetup mode the earlier restrictions are - * lifted. - * - * Return: 0 on success, <0 on failure - */ -int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) -{ - int ret; - - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - return ret; - } - rdtgrp->mon.rmid = ret; - } - - ret = rdtgroup_locksetup_user_restore(rdtgrp); - if (ret) { - free_rmid(rdtgrp->mon.rmid); - return ret; - } - - pseudo_lock_free(rdtgrp); - return 0; -} - -/** - * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked - * @d: RDT domain - * @cbm: CBM to test - * - * @d represents a cache instance and @cbm a capacity bitmask that is - * considered for it. Determine if @cbm overlaps with any existing - * pseudo-locked region on @d. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: true if @cbm overlaps with pseudo-locked region on @d, false - * otherwise. - */ -bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) -{ - unsigned int cbm_len; - unsigned long cbm_b; - - if (d->plr) { - cbm_len = d->plr->s->res->cache.cbm_len; - cbm_b = d->plr->cbm; - if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) - return true; - } - return false; -} - -/** - * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy - * @d: RDT domain under test - * - * The setup of a pseudo-locked region affects all cache instances within - * the hierarchy of the region. It is thus essential to know if any - * pseudo-locked regions exist within a cache hierarchy to prevent any - * attempts to create new pseudo-locked regions in the same hierarchy. - * - * Return: true if a pseudo-locked region exists in the hierarchy of @d or - * if it is not possible to test due to memory allocation issue, - * false otherwise. - */ -bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) -{ - cpumask_var_t cpu_with_psl; - struct rdt_resource *r; - struct rdt_domain *d_i; - bool ret = false; - - if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) - return true; - - /* - * First determine which cpus have pseudo-locked regions - * associated with them. - */ - for_each_alloc_capable_rdt_resource(r) { - list_for_each_entry(d_i, &r->domains, list) { - if (d_i->plr) - cpumask_or(cpu_with_psl, cpu_with_psl, - &d_i->cpu_mask); - } - } - - /* - * Next test if new pseudo-locked region would intersect with - * existing region. - */ - if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) - ret = true; - - free_cpumask_var(cpu_with_psl); - return ret; -} - -/** - * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory + * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read + * pseudo-locked memory * @_plr: pseudo-lock region to measure * * There is no deterministic way to test if a memory region is cached. One @@ -884,7 +243,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) * * Return: 0. Waiter on waitqueue will be woken on completion. */ -static int measure_cycles_lat_fn(void *_plr) +int resctrl_arch_measure_cycles_lat_fn(void *_plr) { struct pseudo_lock_region *plr = _plr; u32 saved_low, saved_high; @@ -1068,7 +427,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr, return 0; } -static int measure_l2_residency(void *_plr) +int resctrl_arch_measure_l2_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1106,7 +465,7 @@ static int measure_l2_residency(void *_plr) return 0; } -static int measure_l3_residency(void *_plr) +int resctrl_arch_measure_l3_residency(void *_plr) { struct pseudo_lock_region *plr = _plr; struct residency_counts counts = {0}; @@ -1161,441 +520,3 @@ static int measure_l3_residency(void *_plr) wake_up_interruptible(&plr->lock_thread_wq); return 0; } - -/** - * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region - * @rdtgrp: Resource group to which the pseudo-locked region belongs. - * @sel: Selector of which measurement to perform on a pseudo-locked region. - * - * The measurement of latency to access a pseudo-locked region should be - * done from a cpu that is associated with that pseudo-locked region. - * Determine which cpu is associated with this region and start a thread on - * that cpu to perform the measurement, wait for that thread to complete. - * - * Return: 0 on success, <0 on failure - */ -static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int cpu; - int ret = -1; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out; - } - - if (!plr->d) { - ret = -ENODEV; - goto out; - } - - plr->thread_done = 0; - cpu = cpumask_first(&plr->d->cpu_mask); - if (!cpu_online(cpu)) { - ret = -ENODEV; - goto out; - } - - plr->cpu = cpu; - - if (sel == 1) - thread = kthread_create_on_node(measure_cycles_lat_fn, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 2) - thread = kthread_create_on_node(measure_l2_residency, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else if (sel == 3) - thread = kthread_create_on_node(measure_l3_residency, plr, - cpu_to_node(cpu), - "pseudo_lock_measure/%u", - cpu); - else - goto out; - - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - goto out; - } - kthread_bind(thread, cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) - goto out; - - ret = 0; - -out: - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -static ssize_t pseudo_lock_measure_trigger(struct file *file, - const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct rdtgroup *rdtgrp = file->private_data; - size_t buf_size; - char buf[32]; - int ret; - int sel; - - buf_size = min(count, (sizeof(buf) - 1)); - if (copy_from_user(buf, user_buf, buf_size)) - return -EFAULT; - - buf[buf_size] = '\0'; - ret = kstrtoint(buf, 10, &sel); - if (ret == 0) { - if (sel != 1 && sel != 2 && sel != 3) - return -EINVAL; - ret = debugfs_file_get(file->f_path.dentry); - if (ret) - return ret; - ret = pseudo_lock_measure_cycles(rdtgrp, sel); - if (ret == 0) - ret = count; - debugfs_file_put(file->f_path.dentry); - } - - return ret; -} - -static const struct file_operations pseudo_measure_fops = { - .write = pseudo_lock_measure_trigger, - .open = simple_open, - .llseek = default_llseek, -}; - -/** - * rdtgroup_pseudo_lock_create - Create a pseudo-locked region - * @rdtgrp: resource group to which pseudo-lock region belongs - * - * Called when a resource group in the pseudo-locksetup mode receives a - * valid schemata that should be pseudo-locked. Since the resource group is - * in pseudo-locksetup mode the &struct pseudo_lock_region has already been - * allocated and initialized with the essential information. If a failure - * occurs the resource group remains in the pseudo-locksetup mode with the - * &struct pseudo_lock_region associated with it, but cleared from all - * information and ready for the user to re-attempt pseudo-locking by - * writing the schemata again. - * - * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 - * on failure. Descriptive error will be written to last_cmd_status buffer. - */ -int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - struct task_struct *thread; - unsigned int new_minor; - struct device *dev; - int ret; - - ret = pseudo_lock_region_alloc(plr); - if (ret < 0) - return ret; - - ret = pseudo_lock_cstates_constrain(plr); - if (ret < 0) { - ret = -EINVAL; - goto out_region; - } - - plr->thread_done = 0; - - thread = kthread_create_on_node(pseudo_lock_fn, rdtgrp, - cpu_to_node(plr->cpu), - "pseudo_lock/%u", plr->cpu); - if (IS_ERR(thread)) { - ret = PTR_ERR(thread); - rdt_last_cmd_printf("Locking thread returned error %d\n", ret); - goto out_cstates; - } - - kthread_bind(thread, plr->cpu); - wake_up_process(thread); - - ret = wait_event_interruptible(plr->lock_thread_wq, - plr->thread_done == 1); - if (ret < 0) { - /* - * If the thread does not get on the CPU for whatever - * reason and the process which sets up the region is - * interrupted then this will leave the thread in runnable - * state and once it gets on the CPU it will dereference - * the cleared, but not freed, plr struct resulting in an - * empty pseudo-locking loop. - */ - rdt_last_cmd_puts("Locking thread interrupted\n"); - goto out_cstates; - } - - ret = pseudo_lock_minor_get(&new_minor); - if (ret < 0) { - rdt_last_cmd_puts("Unable to obtain a new minor number\n"); - goto out_cstates; - } - - /* - * Unlock access but do not release the reference. The - * pseudo-locked region will still be here on return. - * - * The mutex has to be released temporarily to avoid a potential - * deadlock with the mm->mmap_lock which is obtained in the - * device_create() and debugfs_create_dir() callpath below as well as - * before the mmap() callback is called. - */ - mutex_unlock(&rdtgroup_mutex); - - if (!IS_ERR_OR_NULL(debugfs_resctrl)) { - plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, - debugfs_resctrl); - if (!IS_ERR_OR_NULL(plr->debugfs_dir)) - debugfs_create_file("pseudo_lock_measure", 0200, - plr->debugfs_dir, rdtgrp, - &pseudo_measure_fops); - } - - dev = device_create(&pseudo_lock_class, NULL, - MKDEV(pseudo_lock_major, new_minor), - rdtgrp, "%s", rdtgrp->kn->name); - - mutex_lock(&rdtgroup_mutex); - - if (IS_ERR(dev)) { - ret = PTR_ERR(dev); - rdt_last_cmd_printf("Failed to create character device: %d\n", - ret); - goto out_debugfs; - } - - /* We released the mutex - check if group was removed while we did so */ - if (rdtgrp->flags & RDT_DELETED) { - ret = -ENODEV; - goto out_device; - } - - plr->minor = new_minor; - - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; - closid_free(rdtgrp->closid); - rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); - rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); - - ret = 0; - goto out; - -out_device: - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); -out_debugfs: - debugfs_remove_recursive(plr->debugfs_dir); - pseudo_lock_minor_release(new_minor); -out_cstates: - pseudo_lock_cstates_relax(plr); -out_region: - pseudo_lock_region_clear(plr); -out: - return ret; -} - -/** - * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region - * @rdtgrp: resource group to which the pseudo-locked region belongs - * - * The removal of a pseudo-locked region can be initiated when the resource - * group is removed from user space via a "rmdir" from userspace or the - * unmount of the resctrl filesystem. On removal the resource group does - * not go back to pseudo-locksetup mode before it is removed, instead it is - * removed directly. There is thus asymmetry with the creation where the - * &struct pseudo_lock_region is removed here while it was not created in - * rdtgroup_pseudo_lock_create(). - * - * Return: void - */ -void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) -{ - struct pseudo_lock_region *plr = rdtgrp->plr; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - /* - * Default group cannot be a pseudo-locked region so we can - * free closid here. - */ - closid_free(rdtgrp->closid); - goto free; - } - - pseudo_lock_cstates_relax(plr); - debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); - device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); - pseudo_lock_minor_release(plr->minor); - -free: - pseudo_lock_free(rdtgrp); -} - -static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = region_find_by_minor(iminor(inode)); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - filp->private_data = rdtgrp; - atomic_inc(&rdtgrp->waitcount); - /* Perform a non-seekable open - llseek is not supported */ - filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) -{ - struct rdtgroup *rdtgrp; - - mutex_lock(&rdtgroup_mutex); - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - filp->private_data = NULL; - atomic_dec(&rdtgrp->waitcount); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int pseudo_lock_dev_mremap(struct vm_area_struct *area) -{ - /* Not supported */ - return -EINVAL; -} - -static const struct vm_operations_struct pseudo_mmap_ops = { - .mremap = pseudo_lock_dev_mremap, -}; - -static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) -{ - unsigned long vsize = vma->vm_end - vma->vm_start; - unsigned long off = vma->vm_pgoff << PAGE_SHIFT; - struct pseudo_lock_region *plr; - struct rdtgroup *rdtgrp; - unsigned long physical; - unsigned long psize; - - mutex_lock(&rdtgroup_mutex); - - rdtgrp = filp->private_data; - WARN_ON(!rdtgrp); - if (!rdtgrp) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - plr = rdtgrp->plr; - - if (!plr->d) { - mutex_unlock(&rdtgroup_mutex); - return -ENODEV; - } - - /* - * Task is required to run with affinity to the cpus associated - * with the pseudo-locked region. If this is not the case the task - * may be scheduled elsewhere and invalidate entries in the - * pseudo-locked region. - */ - if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - physical = __pa(plr->kmem) >> PAGE_SHIFT; - psize = plr->size - off; - - if (off > plr->size) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - /* - * Ensure changes are carried directly to the memory being mapped, - * do not allow copy-on-write mapping. - */ - if (!(vma->vm_flags & VM_SHARED)) { - mutex_unlock(&rdtgroup_mutex); - return -EINVAL; - } - - if (vsize > psize) { - mutex_unlock(&rdtgroup_mutex); - return -ENOSPC; - } - - memset(plr->kmem + off, 0, vsize); - - if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, - vsize, vma->vm_page_prot)) { - mutex_unlock(&rdtgroup_mutex); - return -EAGAIN; - } - vma->vm_ops = &pseudo_mmap_ops; - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static const struct file_operations pseudo_lock_dev_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .read = NULL, - .write = NULL, - .open = pseudo_lock_dev_open, - .release = pseudo_lock_dev_release, - .mmap = pseudo_lock_dev_mmap, -}; - -int rdt_pseudo_lock_init(void) -{ - int ret; - - ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); - if (ret < 0) - return ret; - - pseudo_lock_major = ret; - - ret = class_register(&pseudo_lock_class); - if (ret) { - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - return ret; - } - - return 0; -} - -void rdt_pseudo_lock_release(void) -{ - class_unregister(&pseudo_lock_class); - unregister_chrdev(pseudo_lock_major, "pseudo_lock"); - pseudo_lock_major = 0; -} diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c index 725344048f85da42a826514c66c8abe79f4035ba..fe3952514addf7994fdfb66d6ee46bec6c2fc03e 100644 --- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c +++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c @@ -12,22 +12,8 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#include #include -#include -#include -#include -#include -#include -#include -#include -#include -#include #include -#include -#include - -#include #include #include "internal.h" @@ -35,3840 +21,240 @@ DEFINE_STATIC_KEY_FALSE(rdt_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key); DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key); -static struct kernfs_root *rdt_root; -struct rdtgroup rdtgroup_default; -LIST_HEAD(rdt_all_groups); - -/* list of entries for the schemata file */ -LIST_HEAD(resctrl_schema_all); - -/* Kernel fs node for "info" directory under root */ -static struct kernfs_node *kn_info; - -/* Kernel fs node for "mon_groups" directory under root */ -static struct kernfs_node *kn_mongrp; - -/* Kernel fs node for "mon_data" directory under root */ -static struct kernfs_node *kn_mondata; - -static struct seq_buf last_cmd_status; -static char last_cmd_status_buf[512]; - -struct dentry *debugfs_resctrl; - -void rdt_last_cmd_clear(void) -{ - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_clear(&last_cmd_status); -} -void rdt_last_cmd_puts(const char *s) +/* + * This is safe against resctrl_arch_sched_in() called from __switch_to() + * because __switch_to() is executed with interrupts disabled. A local call + * from update_closid_rmid() is protected against __switch_to() because + * preemption is disabled. + */ +void resctrl_arch_sync_cpu_defaults(void *info) { - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_puts(&last_cmd_status, s); -} + struct resctrl_cpu_sync *r = info; -void rdt_last_cmd_printf(const char *fmt, ...) -{ - va_list ap; + if (r) { + this_cpu_write(pqr_state.default_closid, r->closid); + this_cpu_write(pqr_state.default_rmid, r->rmid); + } - va_start(ap, fmt); - lockdep_assert_held(&rdtgroup_mutex); - seq_buf_vprintf(&last_cmd_status, fmt, ap); - va_end(ap); + /* + * We cannot unconditionally write the MSR because the current + * executing task might have its own closid selected. Just reuse + * the context switch code. + */ + resctrl_arch_sched_in(current); } -void rdt_staged_configs_clear(void) -{ - struct rdt_resource *r; - struct rdt_domain *dom; - - lockdep_assert_held(&rdtgroup_mutex); - - for_each_alloc_capable_rdt_resource(r) { - list_for_each_entry(dom, &r->domains, list) - memset(dom->staged_config, 0, sizeof(dom->staged_config)); - } -} +#define INVALID_CONFIG_INDEX UINT_MAX -/* - * Trivial allocator for CLOSIDs. Since h/w only supports a small number, - * we can keep a bitmap of free CLOSIDs in a single integer. +/** + * mon_event_config_index_get - get the hardware index for the + * configurable event + * @evtid: event id. * - * Using a global CLOSID across all resources has some advantages and - * some drawbacks: - * + We can simply set "current->closid" to assign a task to a resource - * group. - * + Context switch code can avoid extra memory references deciding which - * CLOSID to load into the PQR_ASSOC MSR - * - We give up some options in configuring resource groups across multi-socket - * systems. - * - Our choices on how to configure each resource become progressively more - * limited as the number of resources grows. + * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID + * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID + * INVALID_CONFIG_INDEX for invalid evtid */ -static int closid_free_map; -static int closid_free_map_len; - -int closids_supported(void) +static inline unsigned int mon_event_config_index_get(u32 evtid) { - return closid_free_map_len; + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return 0; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return 1; + default: + /* Should never reach here */ + return INVALID_CONFIG_INDEX; + } } -static void closid_init(void) +void resctrl_arch_mon_event_config_read(void *info) { - struct resctrl_schema *s; - u32 rdt_min_closid = 32; - - /* Compute rdt_min_closid across all resources */ - list_for_each_entry(s, &resctrl_schema_all, list) - rdt_min_closid = min(rdt_min_closid, s->num_closid); + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; + u64 msrval; - closid_free_map = BIT_MASK(rdt_min_closid) - 1; + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + return; + } + rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - /* CLOSID 0 is always reserved for the default group */ - closid_free_map &= ~1; - closid_free_map_len = rdt_min_closid; + /* Report only the valid event configuration bits */ + mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; } -static int closid_alloc(void) +void resctrl_arch_mon_event_config_write(void *info) { - u32 closid = ffs(closid_free_map); - - if (closid == 0) - return -ENOSPC; - closid--; - closid_free_map &= ~(1 << closid); - - return closid; -} + struct resctrl_mon_config_info *mon_info = info; + unsigned int index; -void closid_free(int closid) -{ - closid_free_map |= 1 << closid; -} + index = mon_event_config_index_get(mon_info->evtid); + if (index == INVALID_CONFIG_INDEX) { + pr_warn_once("Invalid event id %d\n", mon_info->evtid); + mon_info->err = -EINVAL; + return; + } + wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); -/** - * closid_allocated - test if provided closid is in use - * @closid: closid to be tested - * - * Return: true if @closid is currently associated with a resource group, - * false if @closid is free - */ -static bool closid_allocated(unsigned int closid) -{ - return (closid_free_map & (1 << closid)) == 0; + mon_info->err = 0; } -/** - * rdtgroup_mode_by_closid - Return mode of resource group with closid - * @closid: closid if the resource group - * - * Each resource group is associated with a @closid. Here the mode - * of a resource group can be queried by searching for it using its closid. - * - * Return: mode as &enum rdtgrp_mode of resource group with closid @closid - */ -enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +static void l3_qos_cfg_update(void *arg) { - struct rdtgroup *rdtgrp; - - list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { - if (rdtgrp->closid == closid) - return rdtgrp->mode; - } + bool *enable = arg; - return RDT_NUM_MODES; + wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); } -static const char * const rdt_mode_str[] = { - [RDT_MODE_SHAREABLE] = "shareable", - [RDT_MODE_EXCLUSIVE] = "exclusive", - [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", - [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", -}; - -/** - * rdtgroup_mode_str - Return the string representation of mode - * @mode: the resource group mode as &enum rdtgroup_mode - * - * Return: string representation of valid mode, "unknown" otherwise - */ -static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +static void l2_qos_cfg_update(void *arg) { - if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) - return "unknown"; + bool *enable = arg; - return rdt_mode_str[mode]; + wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); } -/* set uid and gid of rdtgroup dirs and files to that of the creator */ -static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +static int set_cache_qos_cfg(int level, bool enable) { - struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, - .ia_uid = current_fsuid(), - .ia_gid = current_fsgid(), }; - - if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && - gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) - return 0; + void (*update)(void *arg); + struct rdt_resource *r_l; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int cpu; - return kernfs_setattr(kn, &iattr); -} + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); -static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) -{ - struct kernfs_node *kn; - int ret; + if (level == RDT_RESOURCE_L3) + update = l3_qos_cfg_update; + else if (level == RDT_RESOURCE_L2) + update = l2_qos_cfg_update; + else + return -EINVAL; - kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, - 0, rft->kf_ops, rft, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; + r_l = &rdt_resources_all[level].r_resctrl; + list_for_each_entry(d, &r_l->domains, list) { + if (r_l->cache.arch_has_per_cpu_cfg) + /* Pick all the CPUs in the domain instance */ + for_each_cpu(cpu, &d->cpu_mask) + cpumask_set_cpu(cpu, cpu_mask); + else + /* Pick one CPU from each domain instance to update MSR */ + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); } - return 0; -} + /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, update, &enable, 1); -static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) -{ - struct kernfs_open_file *of = m->private; - struct rftype *rft = of->kn->priv; + free_cpumask_var(cpu_mask); - if (rft->seq_show) - return rft->seq_show(of, m, arg); return 0; } -static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, - size_t nbytes, loff_t off) +/* Restore the qos cfg state when a domain comes online */ +void rdt_domain_reconfigure_cdp(struct rdt_resource *r) { - struct rftype *rft = of->kn->priv; - - if (rft->write) - return rft->write(of, buf, nbytes, off); - - return -EINVAL; -} - -static const struct kernfs_ops rdtgroup_kf_single_ops = { - .atomic_write_len = PAGE_SIZE, - .write = rdtgroup_file_write, - .seq_show = rdtgroup_seqfile_show, -}; + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); -static const struct kernfs_ops kf_mondata_ops = { - .atomic_write_len = PAGE_SIZE, - .seq_show = rdtgroup_mondata_show, -}; + if (!r->cdp_capable) + return; -static bool is_cpu_list(struct kernfs_open_file *of) -{ - struct rftype *rft = of->kn->priv; + if (r->rid == RDT_RESOURCE_L2) + l2_qos_cfg_update(&hw_res->cdp_enabled); - return rft->flags & RFTYPE_FLAGS_CPUS_LIST; + if (r->rid == RDT_RESOURCE_L3) + l3_qos_cfg_update(&hw_res->cdp_enabled); } -static int rdtgroup_cpus_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) +static int cdp_enable(int level) { - struct rdtgroup *rdtgrp; - struct cpumask *mask; - int ret = 0; + struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; + int ret; - rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!r_l->alloc_capable) + return -EINVAL; - if (rdtgrp) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - mask = &rdtgrp->plr->d->cpu_mask; - seq_printf(s, is_cpu_list(of) ? - "%*pbl\n" : "%*pb\n", - cpumask_pr_args(mask)); - } - } else { - seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", - cpumask_pr_args(&rdtgrp->cpu_mask)); - } - } else { - ret = -ENOENT; - } - rdtgroup_kn_unlock(of->kn); + ret = set_cache_qos_cfg(level, true); + if (!ret) + rdt_resources_all[level].cdp_enabled = true; return ret; } -/* - * This is safe against resctrl_sched_in() called from __switch_to() - * because __switch_to() is executed with interrupts disabled. A local call - * from update_closid_rmid() is protected against __switch_to() because - * preemption is disabled. - */ -static void update_cpu_closid_rmid(void *info) +static void cdp_disable(int level) { - struct rdtgroup *r = info; + struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - if (r) { - this_cpu_write(pqr_state.default_closid, r->closid); - this_cpu_write(pqr_state.default_rmid, r->mon.rmid); + if (r_hw->cdp_enabled) { + set_cache_qos_cfg(level, false); + r_hw->cdp_enabled = false; } - - /* - * We cannot unconditionally write the MSR because the current - * executing task might have its own closid selected. Just reuse - * the context switch code. - */ - resctrl_sched_in(current); -} - -/* - * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, - * - * Per task closids/rmids must have been set up before calling this function. - */ -static void -update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) -{ - on_each_cpu_mask(cpu_mask, update_cpu_closid_rmid, r, 1); } -static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask) +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) { - struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; - struct list_head *head; + struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - /* Check whether cpus belong to parent ctrl group */ - cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + if (!hw_res->r_resctrl.cdp_capable) return -EINVAL; - } - - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Give any dropped cpus to parent rdtgroup */ - cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); - update_closid_rmid(tmpmask, prgrp); - } - /* - * If we added cpus, remove them from previous group that owned them - * and update per-cpu rmid - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - if (crgrp == rdtgrp) - continue; - cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, - tmpmask); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (enable) + return cdp_enable(l); - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + cdp_disable(l); return 0; } -static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) -{ - struct rdtgroup *crgrp; - - cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); - /* update the child mon group masks as well*/ - list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) - cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); -} - -static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, - cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +static int reset_all_ctrls(struct rdt_resource *r) { - struct rdtgroup *r, *crgrp; - struct list_head *head; - - /* Check whether cpus are dropped from this group */ - cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); - if (!cpumask_empty(tmpmask)) { - /* Can't drop from default group */ - if (rdtgrp == &rdtgroup_default) { - rdt_last_cmd_puts("Can't drop CPUs from default group\n"); - return -EINVAL; - } + struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); + struct rdt_hw_domain *hw_dom; + struct msr_param msr_param; + cpumask_var_t cpu_mask; + struct rdt_domain *d; + int i; - /* Give any dropped cpus to rdtgroup_default */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, tmpmask); - update_closid_rmid(tmpmask, &rdtgroup_default); - } + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); - /* - * If we added cpus, remove them from previous group and - * the prev group's child groups that owned them - * and update per-cpu closid/rmid. - */ - cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); - if (!cpumask_empty(tmpmask)) { - list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { - if (r == rdtgrp) - continue; - cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); - if (!cpumask_empty(tmpmask1)) - cpumask_rdtgrp_clear(r, tmpmask1); - } - update_closid_rmid(tmpmask, rdtgrp); - } + if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) + return -ENOMEM; - /* Done pushing/pulling - update this group with new mask */ - cpumask_copy(&rdtgrp->cpu_mask, newmask); + msr_param.res = r; + msr_param.low = 0; + msr_param.high = hw_res->num_closid; /* - * Clear child mon group masks since there is a new parent mask - * now and update the rmid for the cpus the child lost. + * Disable resource control for this resource by setting all + * CBMs in all domains to the maximum mask value. Pick one CPU + * from each domain to update the MSRs below. */ - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); - update_closid_rmid(tmpmask, rdtgrp); - cpumask_clear(&crgrp->cpu_mask); - } - - return 0; -} - -static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - cpumask_var_t tmpmask, newmask, tmpmask1; - struct rdtgroup *rdtgrp; - int ret; - - if (!buf) - return -EINVAL; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - return -ENOMEM; - } - if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - return -ENOMEM; - } - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - ret = -ENOENT; - goto unlock; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - if (is_cpu_list(of)) - ret = cpulist_parse(buf, newmask); - else - ret = cpumask_parse(buf, newmask); - - if (ret) { - rdt_last_cmd_puts("Bad CPU list/mask\n"); - goto unlock; - } + list_for_each_entry(d, &r->domains, list) { + hw_dom = resctrl_to_arch_dom(d); + cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - /* check that user didn't specify any offline cpus */ - cpumask_andnot(tmpmask, newmask, cpu_online_mask); - if (!cpumask_empty(tmpmask)) { - ret = -EINVAL; - rdt_last_cmd_puts("Can only assign online CPUs\n"); - goto unlock; + for (i = 0; i < hw_res->num_closid; i++) + hw_dom->ctrl_val[i] = r->default_ctrl; } - if (rdtgrp->type == RDTCTRL_GROUP) - ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); - else if (rdtgrp->type == RDTMON_GROUP) - ret = cpus_mon_write(rdtgrp, newmask, tmpmask); - else - ret = -EINVAL; - -unlock: - rdtgroup_kn_unlock(of->kn); - free_cpumask_var(tmpmask); - free_cpumask_var(newmask); - free_cpumask_var(tmpmask1); + /* Update CBM on all the CPUs in cpu_mask */ + on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - return ret ?: nbytes; -} + free_cpumask_var(cpu_mask); -/** - * rdtgroup_remove - the helper to remove resource group safely - * @rdtgrp: resource group to remove - * - * On resource group creation via a mkdir, an extra kernfs_node reference is - * taken to ensure that the rdtgroup structure remains accessible for the - * rdtgroup_kn_unlock() calls where it is removed. - * - * Drop the extra reference here, then free the rdtgroup structure. - * - * Return: void - */ -static void rdtgroup_remove(struct rdtgroup *rdtgrp) -{ - kernfs_put(rdtgrp->kn); - kfree(rdtgrp); + return 0; } -static void _update_task_closid_rmid(void *task) +void resctrl_arch_reset_resources(void) { - /* - * If the task is still current on this CPU, update PQR_ASSOC MSR. - * Otherwise, the MSR is updated when the task is scheduled in. - */ - if (task == current) - resctrl_sched_in(task); -} + struct rdt_resource *r; -static void update_task_closid_rmid(struct task_struct *t) -{ - if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) - smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); - else - _update_task_closid_rmid(t); -} - -static int __rdtgroup_move_task(struct task_struct *tsk, - struct rdtgroup *rdtgrp) -{ - /* If the task is already in rdtgrp, no need to move the task. */ - if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid && - tsk->rmid == rdtgrp->mon.rmid) || - (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid && - tsk->closid == rdtgrp->mon.parent->closid)) - return 0; - - /* - * Set the task's closid/rmid before the PQR_ASSOC MSR can be - * updated by them. - * - * For ctrl_mon groups, move both closid and rmid. - * For monitor groups, can move the tasks only from - * their parent CTRL group. - */ - - if (rdtgrp->type == RDTCTRL_GROUP) { - WRITE_ONCE(tsk->closid, rdtgrp->closid); - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else if (rdtgrp->type == RDTMON_GROUP) { - if (rdtgrp->mon.parent->closid == tsk->closid) { - WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid); - } else { - rdt_last_cmd_puts("Can't move task to different control group\n"); - return -EINVAL; - } - } - - /* - * Ensure the task's closid and rmid are written before determining if - * the task is current that will decide if it will be interrupted. - * This pairs with the full barrier between the rq->curr update and - * resctrl_sched_in() during context switch. - */ - smp_mb(); - - /* - * By now, the task's closid and rmid are set. If the task is current - * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource - * group go into effect. If the task is not current, the MSR will be - * updated when the task is scheduled in. - */ - update_task_closid_rmid(tsk); - - return 0; -} - -static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_alloc_capable && - (r->type == RDTCTRL_GROUP) && (t->closid == r->closid)); -} - -static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) -{ - return (rdt_mon_capable && - (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid)); -} - -/** - * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group - * @r: Resource group - * - * Return: 1 if tasks have been assigned to @r, 0 otherwise - */ -int rdtgroup_tasks_assigned(struct rdtgroup *r) -{ - struct task_struct *p, *t; - int ret = 0; - - lockdep_assert_held(&rdtgroup_mutex); - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - ret = 1; - break; - } - } - rcu_read_unlock(); - - return ret; -} - -static int rdtgroup_task_write_permission(struct task_struct *task, - struct kernfs_open_file *of) -{ - const struct cred *tcred = get_task_cred(task); - const struct cred *cred = current_cred(); - int ret = 0; - - /* - * Even if we're attaching all tasks in the thread group, we only - * need to check permissions on one of them. - */ - if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && - !uid_eq(cred->euid, tcred->uid) && - !uid_eq(cred->euid, tcred->suid)) { - rdt_last_cmd_printf("No permission to move task %d\n", task->pid); - ret = -EPERM; - } - - put_cred(tcred); - return ret; -} - -static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, - struct kernfs_open_file *of) -{ - struct task_struct *tsk; - int ret; - - rcu_read_lock(); - if (pid) { - tsk = find_task_by_vpid(pid); - if (!tsk) { - rcu_read_unlock(); - rdt_last_cmd_printf("No task %d\n", pid); - return -ESRCH; - } - } else { - tsk = current; - } - - get_task_struct(tsk); - rcu_read_unlock(); - - ret = rdtgroup_task_write_permission(tsk, of); - if (!ret) - ret = __rdtgroup_move_task(tsk, rdtgrp); - - put_task_struct(tsk); - return ret; -} - -static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - pid_t pid; - - if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0) - return -EINVAL; - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - rdt_last_cmd_clear(); - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto unlock; - } - - ret = rdtgroup_move_task(pid, rdtgrp, of); - -unlock: - rdtgroup_kn_unlock(of->kn); - - return ret ?: nbytes; -} - -static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) -{ - struct task_struct *p, *t; - pid_t pid; - - rcu_read_lock(); - for_each_process_thread(p, t) { - if (is_closid_match(t, r) || is_rmid_match(t, r)) { - pid = task_pid_vnr(t); - if (pid) - seq_printf(s, "%d\n", pid); - } - } - rcu_read_unlock(); -} - -static int rdtgroup_tasks_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - int ret = 0; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (rdtgrp) - show_rdt_tasks(rdtgrp, s); - else - ret = -ENOENT; - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -#ifdef CONFIG_PROC_CPU_RESCTRL - -/* - * A task can only be part of one resctrl control group and of one monitor - * group which is associated to that control group. - * - * 1) res: - * mon: - * - * resctrl is not available. - * - * 2) res:/ - * mon: - * - * Task is part of the root resctrl control group, and it is not associated - * to any monitor group. - * - * 3) res:/ - * mon:mon0 - * - * Task is part of the root resctrl control group and monitor group mon0. - * - * 4) res:group0 - * mon: - * - * Task is part of resctrl control group group0, and it is not associated - * to any monitor group. - * - * 5) res:group0 - * mon:mon1 - * - * Task is part of resctrl control group group0 and monitor group mon1. - */ -int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, - struct pid *pid, struct task_struct *tsk) -{ - struct rdtgroup *rdtg; - int ret = 0; - - mutex_lock(&rdtgroup_mutex); - - /* Return empty if resctrl has not been mounted. */ - if (!static_branch_unlikely(&rdt_enable_key)) { - seq_puts(s, "res:\nmon:\n"); - goto unlock; - } - - list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { - struct rdtgroup *crg; - - /* - * Task information is only relevant for shareable - * and exclusive groups. - */ - if (rdtg->mode != RDT_MODE_SHAREABLE && - rdtg->mode != RDT_MODE_EXCLUSIVE) - continue; - - if (rdtg->closid != tsk->closid) - continue; - - seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", - rdtg->kn->name); - seq_puts(s, "mon:"); - list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, - mon.crdtgrp_list) { - if (tsk->rmid != crg->mon.rmid) - continue; - seq_printf(s, "%s", crg->kn->name); - break; - } - seq_putc(s, '\n'); - goto unlock; - } - /* - * The above search should succeed. Otherwise return - * with an error. - */ - ret = -ENOENT; -unlock: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} -#endif - -static int rdt_last_cmd_status_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - int len; - - mutex_lock(&rdtgroup_mutex); - len = seq_buf_used(&last_cmd_status); - if (len) - seq_printf(seq, "%.*s", len, last_cmd_status_buf); - else - seq_puts(seq, "ok\n"); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_num_closids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - - seq_printf(seq, "%u\n", s->num_closid); - return 0; -} - -static int rdt_default_ctrl_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->default_ctrl); - return 0; -} - -static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->cache.min_cbm_bits); - return 0; -} - -static int rdt_shareable_bits_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%x\n", r->cache.shareable_bits); - return 0; -} - -/** - * rdt_bit_usage_show - Display current usage of resources - * - * A domain is a shared resource that can now be allocated differently. Here - * we display the current regions of the domain as an annotated bitmask. - * For each domain of this resource its allocation bitmask - * is annotated as below to indicate the current usage of the corresponding bit: - * 0 - currently unused - * X - currently available for sharing and used by software and hardware - * H - currently used by hardware only but available for software use - * S - currently used and shareable by software only - * E - currently used exclusively by one resource group - * P - currently pseudo-locked by one resource group - */ -static int rdt_bit_usage_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - /* - * Use unsigned long even though only 32 bits are used to ensure - * test_bit() is used safely. - */ - unsigned long sw_shareable = 0, hw_shareable = 0; - unsigned long exclusive = 0, pseudo_locked = 0; - struct rdt_resource *r = s->res; - struct rdt_domain *dom; - int i, hwb, swb, excl, psl; - enum rdtgrp_mode mode; - bool sep = false; - u32 ctrl_val; - - mutex_lock(&rdtgroup_mutex); - hw_shareable = r->cache.shareable_bits; - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_putc(seq, ';'); - sw_shareable = 0; - exclusive = 0; - seq_printf(seq, "%d=", dom->id); - for (i = 0; i < closids_supported(); i++) { - if (!closid_allocated(i)) - continue; - ctrl_val = resctrl_arch_get_config(r, dom, i, - s->conf_type); - mode = rdtgroup_mode_by_closid(i); - switch (mode) { - case RDT_MODE_SHAREABLE: - sw_shareable |= ctrl_val; - break; - case RDT_MODE_EXCLUSIVE: - exclusive |= ctrl_val; - break; - case RDT_MODE_PSEUDO_LOCKSETUP: - /* - * RDT_MODE_PSEUDO_LOCKSETUP is possible - * here but not included since the CBM - * associated with this CLOSID in this mode - * is not initialized and no task or cpu can be - * assigned this CLOSID. - */ - break; - case RDT_MODE_PSEUDO_LOCKED: - case RDT_NUM_MODES: - WARN(1, - "invalid mode for closid %d\n", i); - break; - } - } - for (i = r->cache.cbm_len - 1; i >= 0; i--) { - pseudo_locked = dom->plr ? dom->plr->cbm : 0; - hwb = test_bit(i, &hw_shareable); - swb = test_bit(i, &sw_shareable); - excl = test_bit(i, &exclusive); - psl = test_bit(i, &pseudo_locked); - if (hwb && swb) - seq_putc(seq, 'X'); - else if (hwb && !swb) - seq_putc(seq, 'H'); - else if (!hwb && swb) - seq_putc(seq, 'S'); - else if (excl) - seq_putc(seq, 'E'); - else if (psl) - seq_putc(seq, 'P'); - else /* Unused bits remain */ - seq_putc(seq, '0'); - } - sep = true; - } - seq_putc(seq, '\n'); - mutex_unlock(&rdtgroup_mutex); - return 0; -} - -static int rdt_min_bw_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.min_bw); - return 0; -} - -static int rdt_num_rmids_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - seq_printf(seq, "%d\n", r->num_rmid); - - return 0; -} - -static int rdt_mon_features_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - struct mon_evt *mevt; - - list_for_each_entry(mevt, &r->evt_list, list) { - seq_printf(seq, "%s\n", mevt->name); - if (mevt->configurable) - seq_printf(seq, "%s_config\n", mevt->name); - } - - return 0; -} - -static int rdt_bw_gran_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.bw_gran); - return 0; -} - -static int rdt_delay_linear_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - seq_printf(seq, "%u\n", r->membw.delay_linear); - return 0; -} - -static int max_threshold_occ_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); - - return 0; -} - -static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct resctrl_schema *s = of->kn->parent->priv; - struct rdt_resource *r = s->res; - - if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) - seq_puts(seq, "per-thread\n"); - else - seq_puts(seq, "max\n"); - - return 0; -} - -static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - unsigned int bytes; - int ret; - - ret = kstrtouint(buf, 0, &bytes); - if (ret) - return ret; - - if (bytes > resctrl_rmid_realloc_limit) - return -EINVAL; - - resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); - - return nbytes; -} - -/* - * rdtgroup_mode_show - Display mode of this resource group - */ -static int rdtgroup_mode_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct rdtgroup *rdtgrp; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); - - rdtgroup_kn_unlock(of->kn); - return 0; -} - -static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) -{ - switch (my_type) { - case CDP_CODE: - return CDP_DATA; - case CDP_DATA: - return CDP_CODE; - default: - case CDP_NONE: - return CDP_NONE; - } -} - -/** - * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other - * @r: Resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Checks if provided @cbm intended to be used for @closid on domain - * @d overlaps with any other closids or other hardware usage associated - * with this domain. If @exclusive is true then only overlaps with - * resource groups in exclusive mode will be considered. If @exclusive - * is false then overlaps with any resource group or hardware entities - * will be considered. - * - * @cbm is unsigned long, even if only 32 bits are used, to make the - * bitmap functions work correctly. - * - * Return: false if CBM does not overlap, true if it does. - */ -static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, - unsigned long cbm, int closid, - enum resctrl_conf_type type, bool exclusive) -{ - enum rdtgrp_mode mode; - unsigned long ctrl_b; - int i; - - /* Check for any overlap with regions used by hardware directly */ - if (!exclusive) { - ctrl_b = r->cache.shareable_bits; - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) - return true; - } - - /* Check for overlap with other resource groups */ - for (i = 0; i < closids_supported(); i++) { - ctrl_b = resctrl_arch_get_config(r, d, i, type); - mode = rdtgroup_mode_by_closid(i); - if (closid_allocated(i) && i != closid && - mode != RDT_MODE_PSEUDO_LOCKSETUP) { - if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { - if (exclusive) { - if (mode == RDT_MODE_EXCLUSIVE) - return true; - continue; - } - return true; - } - } - } - - return false; -} - -/** - * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware - * @s: Schema for the resource to which domain instance @d belongs. - * @d: The domain instance for which @closid is being tested. - * @cbm: Capacity bitmask being tested. - * @closid: Intended closid for @cbm. - * @exclusive: Only check if overlaps with exclusive resource groups - * - * Resources that can be allocated using a CBM can use the CBM to control - * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test - * for overlap. Overlap test is not limited to the specific resource for - * which the CBM is intended though - when dealing with CDP resources that - * share the underlying hardware the overlap check should be performed on - * the CDP resource sharing the hardware also. - * - * Refer to description of __rdtgroup_cbm_overlaps() for the details of the - * overlap test. - * - * Return: true if CBM overlap detected, false if there is no overlap - */ -bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, - unsigned long cbm, int closid, bool exclusive) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - struct rdt_resource *r = s->res; - - if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, - exclusive)) - return true; - - if (!resctrl_arch_get_cdp_enabled(r->rid)) - return false; - return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); -} - -/** - * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive - * - * An exclusive resource group implies that there should be no sharing of - * its allocated resources. At the time this group is considered to be - * exclusive this test can determine if its current schemata supports this - * setting by testing for overlap with all other resource groups. - * - * Return: true if resource group can be exclusive, false if there is overlap - * with allocations of other resource groups and thus this resource group - * cannot be exclusive. - */ -static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) -{ - int closid = rdtgrp->closid; - struct resctrl_schema *s; - struct rdt_resource *r; - bool has_cache = false; - struct rdt_domain *d; - u32 ctrl; - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) - continue; - has_cache = true; - list_for_each_entry(d, &r->domains, list) { - ctrl = resctrl_arch_get_config(r, d, closid, - s->conf_type); - if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { - rdt_last_cmd_puts("Schemata overlaps\n"); - return false; - } - } - } - - if (!has_cache) { - rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); - return false; - } - - return true; -} - -/** - * rdtgroup_mode_write - Modify the resource group's mode - * - */ -static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, loff_t off) -{ - struct rdtgroup *rdtgrp; - enum rdtgrp_mode mode; - int ret = 0; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - buf[nbytes - 1] = '\0'; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - rdt_last_cmd_clear(); - - mode = rdtgrp->mode; - - if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || - (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || - (!strcmp(buf, "pseudo-locksetup") && - mode == RDT_MODE_PSEUDO_LOCKSETUP) || - (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) - goto out; - - if (mode == RDT_MODE_PSEUDO_LOCKED) { - rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); - ret = -EINVAL; - goto out; - } - - if (!strcmp(buf, "shareable")) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_SHAREABLE; - } else if (!strcmp(buf, "exclusive")) { - if (!rdtgroup_mode_test_exclusive(rdtgrp)) { - ret = -EINVAL; - goto out; - } - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - ret = rdtgroup_locksetup_exit(rdtgrp); - if (ret) - goto out; - } - rdtgrp->mode = RDT_MODE_EXCLUSIVE; - } else if (!strcmp(buf, "pseudo-locksetup")) { - ret = rdtgroup_locksetup_enter(rdtgrp); - if (ret) - goto out; - rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; - } else { - rdt_last_cmd_puts("Unknown or unsupported mode\n"); - ret = -EINVAL; - } - -out: - rdtgroup_kn_unlock(of->kn); - return ret ?: nbytes; -} - -/** - * rdtgroup_cbm_to_size - Translate CBM to size in bytes - * @r: RDT resource to which @d belongs. - * @d: RDT domain instance. - * @cbm: bitmask for which the size should be computed. - * - * The bitmask provided associated with the RDT domain instance @d will be - * translated into how many bytes it represents. The size in bytes is - * computed by first dividing the total cache size by the CBM length to - * determine how many bytes each bit in the bitmask represents. The result - * is multiplied with the number of bits set in the bitmask. - * - * @cbm is unsigned long, even if only 32 bits are used to make the - * bitmap functions work correctly. - */ -unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, - struct rdt_domain *d, unsigned long cbm) -{ - struct cpu_cacheinfo *ci; - unsigned int size = 0; - int num_b, i; - - num_b = bitmap_weight(&cbm, r->cache.cbm_len); - ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); - for (i = 0; i < ci->num_leaves; i++) { - if (ci->info_list[i].level == r->cache_level) { - size = ci->info_list[i].size / r->cache.cbm_len * num_b; - break; - } - } - - return size; -} - -/** - * rdtgroup_size_show - Display size in bytes of allocated regions - * - * The "size" file mirrors the layout of the "schemata" file, printing the - * size in bytes of each region instead of the capacity bitmask. - * - */ -static int rdtgroup_size_show(struct kernfs_open_file *of, - struct seq_file *s, void *v) -{ - struct resctrl_schema *schema; - enum resctrl_conf_type type; - struct rdtgroup *rdtgrp; - struct rdt_resource *r; - struct rdt_domain *d; - unsigned int size; - int ret = 0; - u32 closid; - bool sep; - u32 ctrl; - - rdtgrp = rdtgroup_kn_lock_live(of->kn); - if (!rdtgrp) { - rdtgroup_kn_unlock(of->kn); - return -ENOENT; - } - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - if (!rdtgrp->plr->d) { - rdt_last_cmd_clear(); - rdt_last_cmd_puts("Cache domain offline\n"); - ret = -ENODEV; - } else { - seq_printf(s, "%*s:", max_name_width, - rdtgrp->plr->s->name); - size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, - rdtgrp->plr->d, - rdtgrp->plr->cbm); - seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); - } - goto out; - } - - closid = rdtgrp->closid; - - list_for_each_entry(schema, &resctrl_schema_all, list) { - r = schema->res; - type = schema->conf_type; - sep = false; - seq_printf(s, "%*s:", max_name_width, schema->name); - list_for_each_entry(d, &r->domains, list) { - if (sep) - seq_putc(s, ';'); - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { - size = 0; - } else { - if (is_mba_sc(r)) - ctrl = d->mbps_val[closid]; - else - ctrl = resctrl_arch_get_config(r, d, - closid, - type); - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) - size = ctrl; - else - size = rdtgroup_cbm_to_size(r, d, ctrl); - } - seq_printf(s, "%d=%u", d->id, size); - sep = true; - } - seq_putc(s, '\n'); - } - -out: - rdtgroup_kn_unlock(of->kn); - - return ret; -} - -struct mon_config_info { - u32 evtid; - u32 mon_config; -}; - -#define INVALID_CONFIG_INDEX UINT_MAX - -/** - * mon_event_config_index_get - get the hardware index for the - * configurable event - * @evtid: event id. - * - * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID - * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID - * INVALID_CONFIG_INDEX for invalid evtid - */ -static inline unsigned int mon_event_config_index_get(u32 evtid) -{ - switch (evtid) { - case QOS_L3_MBM_TOTAL_EVENT_ID: - return 0; - case QOS_L3_MBM_LOCAL_EVENT_ID: - return 1; - default: - /* Should never reach here */ - return INVALID_CONFIG_INDEX; - } -} - -static void mon_event_config_read(void *info) -{ - struct mon_config_info *mon_info = info; - unsigned int index; - u64 msrval; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - rdmsrl(MSR_IA32_EVT_CFG_BASE + index, msrval); - - /* Report only the valid event configuration bits */ - mon_info->mon_config = msrval & MAX_EVT_CONFIG_BITS; -} - -static void mondata_config_read(struct rdt_domain *d, struct mon_config_info *mon_info) -{ - smp_call_function_any(&d->cpu_mask, mon_event_config_read, mon_info, 1); -} - -static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) -{ - struct mon_config_info mon_info = {0}; - struct rdt_domain *dom; - bool sep = false; - - mutex_lock(&rdtgroup_mutex); - - list_for_each_entry(dom, &r->domains, list) { - if (sep) - seq_puts(s, ";"); - - memset(&mon_info, 0, sizeof(struct mon_config_info)); - mon_info.evtid = evtid; - mondata_config_read(dom, &mon_info); - - seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); - sep = true; - } - seq_puts(s, "\n"); - - mutex_unlock(&rdtgroup_mutex); - - return 0; -} - -static int mbm_total_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); - - return 0; -} - -static int mbm_local_bytes_config_show(struct kernfs_open_file *of, - struct seq_file *seq, void *v) -{ - struct rdt_resource *r = of->kn->parent->priv; - - mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); - - return 0; -} - -static void mon_event_config_write(void *info) -{ - struct mon_config_info *mon_info = info; - unsigned int index; - - index = mon_event_config_index_get(mon_info->evtid); - if (index == INVALID_CONFIG_INDEX) { - pr_warn_once("Invalid event id %d\n", mon_info->evtid); - return; - } - wrmsr(MSR_IA32_EVT_CFG_BASE + index, mon_info->mon_config, 0); -} - -static int mbm_config_write_domain(struct rdt_resource *r, - struct rdt_domain *d, u32 evtid, u32 val) -{ - struct mon_config_info mon_info = {0}; - int ret = 0; - - /* mon_config cannot be more than the supported set of events */ - if (val > MAX_EVT_CONFIG_BITS) { - rdt_last_cmd_puts("Invalid event configuration\n"); - return -EINVAL; - } - - /* - * Read the current config value first. If both are the same then - * no need to write it again. - */ - mon_info.evtid = evtid; - mondata_config_read(d, &mon_info); - if (mon_info.mon_config == val) - goto out; - - mon_info.mon_config = val; - - /* - * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the - * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE - * are scoped at the domain level. Writing any of these MSRs - * on one CPU is observed by all the CPUs in the domain. - */ - smp_call_function_any(&d->cpu_mask, mon_event_config_write, - &mon_info, 1); - - /* - * When an Event Configuration is changed, the bandwidth counters - * for all RMIDs and Events will be cleared by the hardware. The - * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for - * every RMID on the next read to any event for every RMID. - * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) - * cleared while it is tracked by the hardware. Clear the - * mbm_local and mbm_total counts for all the RMIDs. - */ - resctrl_arch_reset_rmid_all(r, d); - -out: - return ret; -} - -static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) -{ - char *dom_str = NULL, *id_str; - unsigned long dom_id, val; - struct rdt_domain *d; - int ret = 0; - -next: - if (!tok || tok[0] == '\0') - return 0; - - /* Start processing the strings for each domain */ - dom_str = strim(strsep(&tok, ";")); - id_str = strsep(&dom_str, "="); - - if (!id_str || kstrtoul(id_str, 10, &dom_id)) { - rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); - return -EINVAL; - } - - if (!dom_str || kstrtoul(dom_str, 16, &val)) { - rdt_last_cmd_puts("Non-numeric event configuration value\n"); - return -EINVAL; - } - - list_for_each_entry(d, &r->domains, list) { - if (d->id == dom_id) { - ret = mbm_config_write_domain(r, d, evtid, val); - if (ret) - return -EINVAL; - goto next; - } - } - - return -EINVAL; -} - -static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - - return ret ?: nbytes; -} - -static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, - char *buf, size_t nbytes, - loff_t off) -{ - struct rdt_resource *r = of->kn->parent->priv; - int ret; - - /* Valid input requires a trailing newline */ - if (nbytes == 0 || buf[nbytes - 1] != '\n') - return -EINVAL; - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - buf[nbytes - 1] = '\0'; - - ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); - - mutex_unlock(&rdtgroup_mutex); - - return ret ?: nbytes; -} - -/* rdtgroup information files for one cache resource. */ -static struct rftype res_common_files[] = { - { - .name = "last_cmd_status", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_last_cmd_status_show, - .fflags = RF_TOP_INFO, - }, - { - .name = "num_closids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_closids_show, - .fflags = RF_CTRL_INFO, - }, - { - .name = "mon_features", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_mon_features_show, - .fflags = RF_MON_INFO, - }, - { - .name = "num_rmids", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_num_rmids_show, - .fflags = RF_MON_INFO, - }, - { - .name = "cbm_mask", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_default_ctrl_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_cbm_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_cbm_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "shareable_bits", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_shareable_bits_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "bit_usage", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bit_usage_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "min_bandwidth", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_min_bw_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "bandwidth_gran", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_bw_gran_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - { - .name = "delay_linear", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_delay_linear_show, - .fflags = RF_CTRL_INFO | RFTYPE_RES_MB, - }, - /* - * Platform specific which (if any) capabilities are provided by - * thread_throttle_mode. Defer "fflags" initialization to platform - * discovery. - */ - { - .name = "thread_throttle_mode", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdt_thread_throttle_mode_show, - }, - { - .name = "max_threshold_occupancy", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = max_threshold_occ_write, - .seq_show = max_threshold_occ_show, - .fflags = RF_MON_INFO | RFTYPE_RES_CACHE, - }, - { - .name = "mbm_total_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_total_bytes_config_show, - .write = mbm_total_bytes_config_write, - }, - { - .name = "mbm_local_bytes_config", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = mbm_local_bytes_config_show, - .write = mbm_local_bytes_config_write, - }, - { - .name = "cpus", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "cpus_list", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_cpus_write, - .seq_show = rdtgroup_cpus_show, - .flags = RFTYPE_FLAGS_CPUS_LIST, - .fflags = RFTYPE_BASE, - }, - { - .name = "tasks", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_tasks_write, - .seq_show = rdtgroup_tasks_show, - .fflags = RFTYPE_BASE, - }, - { - .name = "schemata", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_schemata_write, - .seq_show = rdtgroup_schemata_show, - .fflags = RF_CTRL_BASE, - }, - { - .name = "mode", - .mode = 0644, - .kf_ops = &rdtgroup_kf_single_ops, - .write = rdtgroup_mode_write, - .seq_show = rdtgroup_mode_show, - .fflags = RF_CTRL_BASE, - }, - { - .name = "size", - .mode = 0444, - .kf_ops = &rdtgroup_kf_single_ops, - .seq_show = rdtgroup_size_show, - .fflags = RF_CTRL_BASE, - }, - -}; - -static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) -{ - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - lockdep_assert_held(&rdtgroup_mutex); - - for (rft = rfts; rft < rfts + len; rft++) { - if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { - ret = rdtgroup_add_file(kn, rft); - if (ret) - goto error; - } - } - - return 0; -error: - pr_warn("Failed to add %s, err=%d\n", rft->name, ret); - while (--rft >= rfts) { - if ((fflags & rft->fflags) == rft->fflags) - kernfs_remove_by_name(kn, rft->name); - } - return ret; -} - -static struct rftype *rdtgroup_get_rftype_by_name(const char *name) -{ - struct rftype *rfts, *rft; - int len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - return rft; - } - - return NULL; -} - -void __init thread_throttle_mode_init(void) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); - if (!rft) - return; - - rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB; -} - -void __init mbm_config_rftype_init(const char *config) -{ - struct rftype *rft; - - rft = rdtgroup_get_rftype_by_name(config); - if (rft) - rft->fflags = RF_MON_INFO | RFTYPE_RES_CACHE; -} - -/** - * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * - * The permissions of named resctrl file, directory, or link are modified - * to not allow read, write, or execute by any user. - * - * WARNING: This function is intended to communicate to the user that the - * resctrl file has been locked down - that it is not relevant to the - * particular state the system finds itself in. It should not be relied - * on to protect from user access because after the file's permissions - * are restricted the user can still change the permissions using chmod - * from the command line. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn; - int ret = 0; - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - iattr.ia_mode = S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode = S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode = S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -/** - * rdtgroup_kn_mode_restore - Restore user access to named resctrl file - * @r: The resource group with which the file is associated. - * @name: Name of the file - * @mask: Mask of permissions that should be restored - * - * Restore the permissions of the named file. If @name is a directory the - * permissions of its parent will be used. - * - * Return: 0 on success, <0 on failure. - */ -int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, - umode_t mask) -{ - struct iattr iattr = {.ia_valid = ATTR_MODE,}; - struct kernfs_node *kn, *parent; - struct rftype *rfts, *rft; - int ret, len; - - rfts = res_common_files; - len = ARRAY_SIZE(res_common_files); - - for (rft = rfts; rft < rfts + len; rft++) { - if (!strcmp(rft->name, name)) - iattr.ia_mode = rft->mode & mask; - } - - kn = kernfs_find_and_get_ns(r->kn, name, NULL); - if (!kn) - return -ENOENT; - - switch (kernfs_type(kn)) { - case KERNFS_DIR: - parent = kernfs_get_parent(kn); - if (parent) { - iattr.ia_mode |= parent->mode; - kernfs_put(parent); - } - iattr.ia_mode |= S_IFDIR; - break; - case KERNFS_FILE: - iattr.ia_mode |= S_IFREG; - break; - case KERNFS_LINK: - iattr.ia_mode |= S_IFLNK; - break; - } - - ret = kernfs_setattr(kn, &iattr); - kernfs_put(kn); - return ret; -} - -static int rdtgroup_mkdir_info_resdir(void *priv, char *name, - unsigned long fflags) -{ - struct kernfs_node *kn_subdir; - int ret; - - kn_subdir = kernfs_create_dir(kn_info, name, - kn_info->mode, priv); - if (IS_ERR(kn_subdir)) - return PTR_ERR(kn_subdir); - - ret = rdtgroup_kn_set_ugid(kn_subdir); - if (ret) - return ret; - - ret = rdtgroup_add_files(kn_subdir, fflags); - if (!ret) - kernfs_activate(kn_subdir); - - return ret; -} - -static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - unsigned long fflags; - char name[32]; - int ret; - - /* create the directory */ - kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); - if (IS_ERR(kn_info)) - return PTR_ERR(kn_info); - - ret = rdtgroup_add_files(kn_info, RF_TOP_INFO); - if (ret) - goto out_destroy; - - /* loop over enabled controls, these are all alloc_capable */ - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - fflags = r->fflags | RF_CTRL_INFO; - ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); - if (ret) - goto out_destroy; - } - - for_each_mon_capable_rdt_resource(r) { - fflags = r->fflags | RF_MON_INFO; - sprintf(name, "%s_MON", r->name); - ret = rdtgroup_mkdir_info_resdir(r, name, fflags); - if (ret) - goto out_destroy; - } - - ret = rdtgroup_kn_set_ugid(kn_info); - if (ret) - goto out_destroy; - - kernfs_activate(kn_info); - - return 0; - -out_destroy: - kernfs_remove(kn_info); - return ret; -} - -static int -mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, - char *name, struct kernfs_node **dest_kn) -{ - struct kernfs_node *kn; - int ret; - - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - if (dest_kn) - *dest_kn = kn; - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - kernfs_activate(kn); - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -static void l3_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL); -} - -static void l2_qos_cfg_update(void *arg) -{ - bool *enable = arg; - - wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL); -} - -static inline bool is_mba_linear(void) -{ - return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear; -} - -static int set_cache_qos_cfg(int level, bool enable) -{ - void (*update)(void *arg); - struct rdt_resource *r_l; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int cpu; - - if (level == RDT_RESOURCE_L3) - update = l3_qos_cfg_update; - else if (level == RDT_RESOURCE_L2) - update = l2_qos_cfg_update; - else - return -EINVAL; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - r_l = &rdt_resources_all[level].r_resctrl; - list_for_each_entry(d, &r_l->domains, list) { - if (r_l->cache.arch_has_per_cpu_cfg) - /* Pick all the CPUs in the domain instance */ - for_each_cpu(cpu, &d->cpu_mask) - cpumask_set_cpu(cpu, cpu_mask); - else - /* Pick one CPU from each domain instance to update MSR */ - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - } - - /* Update QOS_CFG MSR on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, update, &enable, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* Restore the qos cfg state when a domain comes online */ -void rdt_domain_reconfigure_cdp(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - - if (!r->cdp_capable) - return; - - if (r->rid == RDT_RESOURCE_L2) - l2_qos_cfg_update(&hw_res->cdp_enabled); - - if (r->rid == RDT_RESOURCE_L3) - l3_qos_cfg_update(&hw_res->cdp_enabled); -} - -static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) -{ - u32 num_closid = resctrl_arch_get_num_closid(r); - int cpu = cpumask_any(&d->cpu_mask); - int i; - - d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), - GFP_KERNEL, cpu_to_node(cpu)); - if (!d->mbps_val) - return -ENOMEM; - - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - - return 0; -} - -static void mba_sc_domain_destroy(struct rdt_resource *r, - struct rdt_domain *d) -{ - kfree(d->mbps_val); - d->mbps_val = NULL; -} - -/* - * MBA software controller is supported only if - * MBM is supported and MBA is in linear scale. - */ -static bool supports_mba_mbps(void) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - - return (is_mbm_local_enabled() && - r->alloc_capable && is_mba_linear()); -} - -/* - * Enable or disable the MBA software controller - * which helps user specify bandwidth in MBps. - */ -static int set_mba_sc(bool mba_sc) -{ - struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl; - u32 num_closid = resctrl_arch_get_num_closid(r); - struct rdt_domain *d; - int i; - - if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) - return -EINVAL; - - r->membw.mba_sc = mba_sc; - - list_for_each_entry(d, &r->domains, list) { - for (i = 0; i < num_closid; i++) - d->mbps_val[i] = MBA_MAX_MBPS; - } - - return 0; -} - -static int cdp_enable(int level) -{ - struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl; - int ret; - - if (!r_l->alloc_capable) - return -EINVAL; - - ret = set_cache_qos_cfg(level, true); - if (!ret) - rdt_resources_all[level].cdp_enabled = true; - - return ret; -} - -static void cdp_disable(int level) -{ - struct rdt_hw_resource *r_hw = &rdt_resources_all[level]; - - if (r_hw->cdp_enabled) { - set_cache_qos_cfg(level, false); - r_hw->cdp_enabled = false; - } -} - -int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable) -{ - struct rdt_hw_resource *hw_res = &rdt_resources_all[l]; - - if (!hw_res->r_resctrl.cdp_capable) - return -EINVAL; - - if (enable) - return cdp_enable(l); - - cdp_disable(l); - - return 0; -} - -static void cdp_disable_all(void) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); -} - -/* - * We don't allow rdtgroup directories to be created anywhere - * except the root directory. Thus when looking for the rdtgroup - * structure for a kernfs node we are either looking at a directory, - * in which case the rdtgroup structure is pointed at by the "priv" - * field, otherwise we have a file, and need only look to the parent - * to find the rdtgroup. - */ -static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) -{ - if (kernfs_type(kn) == KERNFS_DIR) { - /* - * All the resource directories use "kn->priv" - * to point to the "struct rdtgroup" for the - * resource. "info" and its subdirectories don't - * have rdtgroup structures, so return NULL here. - */ - if (kn == kn_info || kn->parent == kn_info) - return NULL; - else - return kn->priv; - } else { - return kn->parent->priv; - } -} - -static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - atomic_inc(&rdtgrp->waitcount); - kernfs_break_active_protection(kn); -} - -static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) -{ - if (atomic_dec_and_test(&rdtgrp->waitcount) && - (rdtgrp->flags & RDT_DELETED)) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - kernfs_unbreak_active_protection(kn); - rdtgroup_remove(rdtgrp); - } else { - kernfs_unbreak_active_protection(kn); - } -} - -struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return NULL; - - rdtgroup_kn_get(rdtgrp, kn); - - mutex_lock(&rdtgroup_mutex); - - /* Was this group deleted while we waited? */ - if (rdtgrp->flags & RDT_DELETED) - return NULL; - - return rdtgrp; -} - -void rdtgroup_kn_unlock(struct kernfs_node *kn) -{ - struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); - - if (!rdtgrp) - return; - - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); -} - -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **mon_data_kn); - -static int rdt_enable_ctx(struct rdt_fs_context *ctx) -{ - int ret = 0; - - if (ctx->enable_cdpl2) - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); - - if (!ret && ctx->enable_cdpl3) - ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); - - if (!ret && ctx->enable_mba_mbps) - ret = set_mba_sc(true); - - return ret; -} - -static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) -{ - struct resctrl_schema *s; - const char *suffix = ""; - int ret, cl; - - s = kzalloc(sizeof(*s), GFP_KERNEL); - if (!s) - return -ENOMEM; - - s->res = r; - s->num_closid = resctrl_arch_get_num_closid(r); - if (resctrl_arch_get_cdp_enabled(r->rid)) - s->num_closid /= 2; - - s->conf_type = type; - switch (type) { - case CDP_CODE: - suffix = "CODE"; - break; - case CDP_DATA: - suffix = "DATA"; - break; - case CDP_NONE: - suffix = ""; - break; - } - - ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); - if (ret >= sizeof(s->name)) { - kfree(s); - return -EINVAL; - } - - cl = strlen(s->name); - - /* - * If CDP is supported by this resource, but not enabled, - * include the suffix. This ensures the tabular format of the - * schemata file does not change between mounts of the filesystem. - */ - if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) - cl += 4; - - if (cl > max_name_width) - max_name_width = cl; - - INIT_LIST_HEAD(&s->list); - list_add(&s->list, &resctrl_schema_all); - - return 0; -} - -static int schemata_list_create(void) -{ - struct rdt_resource *r; - int ret = 0; - - for_each_alloc_capable_rdt_resource(r) { - if (resctrl_arch_get_cdp_enabled(r->rid)) { - ret = schemata_list_add(r, CDP_CODE); - if (ret) - break; - - ret = schemata_list_add(r, CDP_DATA); - } else { - ret = schemata_list_add(r, CDP_NONE); - } - - if (ret) - break; - } - - return ret; -} - -static void schemata_list_destroy(void) -{ - struct resctrl_schema *s, *tmp; - - list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { - list_del(&s->list); - kfree(s); - } -} - -static int rdt_get_tree(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct rdt_domain *dom; - struct rdt_resource *r; - int ret; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - /* - * resctrl file system can only be mounted once. - */ - if (static_branch_unlikely(&rdt_enable_key)) { - ret = -EBUSY; - goto out; - } - - ret = rdt_enable_ctx(ctx); - if (ret < 0) - goto out_cdp; - - ret = schemata_list_create(); - if (ret) { - schemata_list_destroy(); - goto out_mba; - } - - closid_init(); - - ret = rdtgroup_create_info_dir(rdtgroup_default.kn); - if (ret < 0) - goto out_schemata_free; - - if (rdt_mon_capable) { - ret = mongroup_create_dir(rdtgroup_default.kn, - &rdtgroup_default, "mon_groups", - &kn_mongrp); - if (ret < 0) - goto out_info; - - ret = mkdir_mondata_all(rdtgroup_default.kn, - &rdtgroup_default, &kn_mondata); - if (ret < 0) - goto out_mongrp; - rdtgroup_default.mon.mon_data_kn = kn_mondata; - } - - ret = rdt_pseudo_lock_init(); - if (ret) - goto out_mondata; - - ret = kernfs_get_tree(fc); - if (ret < 0) - goto out_psl; - - if (rdt_alloc_capable) - static_branch_enable_cpuslocked(&rdt_alloc_enable_key); - if (rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_mon_enable_key); - - if (rdt_alloc_capable || rdt_mon_capable) - static_branch_enable_cpuslocked(&rdt_enable_key); - - if (is_mbm_enabled()) { - r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl; - list_for_each_entry(dom, &r->domains, list) - mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL); - } - - goto out; - -out_psl: - rdt_pseudo_lock_release(); -out_mondata: - if (rdt_mon_capable) - kernfs_remove(kn_mondata); -out_mongrp: - if (rdt_mon_capable) - kernfs_remove(kn_mongrp); -out_info: - kernfs_remove(kn_info); -out_schemata_free: - schemata_list_destroy(); -out_mba: - if (ctx->enable_mba_mbps) - set_mba_sc(false); -out_cdp: - cdp_disable_all(); -out: - rdt_last_cmd_clear(); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); - return ret; -} - -enum rdt_param { - Opt_cdp, - Opt_cdpl2, - Opt_mba_mbps, - nr__rdt_params -}; - -static const struct fs_parameter_spec rdt_fs_parameters[] = { - fsparam_flag("cdp", Opt_cdp), - fsparam_flag("cdpl2", Opt_cdpl2), - fsparam_flag("mba_MBps", Opt_mba_mbps), - {} -}; - -static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - struct fs_parse_result result; - int opt; - - opt = fs_parse(fc, rdt_fs_parameters, param, &result); - if (opt < 0) - return opt; - - switch (opt) { - case Opt_cdp: - ctx->enable_cdpl3 = true; - return 0; - case Opt_cdpl2: - ctx->enable_cdpl2 = true; - return 0; - case Opt_mba_mbps: - if (!supports_mba_mbps()) - return -EINVAL; - ctx->enable_mba_mbps = true; - return 0; - } - - return -EINVAL; -} - -static void rdt_fs_context_free(struct fs_context *fc) -{ - struct rdt_fs_context *ctx = rdt_fc2context(fc); - - kernfs_free_fs_context(fc); - kfree(ctx); -} - -static const struct fs_context_operations rdt_fs_context_ops = { - .free = rdt_fs_context_free, - .parse_param = rdt_parse_param, - .get_tree = rdt_get_tree, -}; - -static int rdt_init_fs_context(struct fs_context *fc) -{ - struct rdt_fs_context *ctx; - - ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); - if (!ctx) - return -ENOMEM; - - ctx->kfc.root = rdt_root; - ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; - fc->fs_private = &ctx->kfc; - fc->ops = &rdt_fs_context_ops; - put_user_ns(fc->user_ns); - fc->user_ns = get_user_ns(&init_user_ns); - fc->global = true; - return 0; -} - -static int reset_all_ctrls(struct rdt_resource *r) -{ - struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); - struct rdt_hw_domain *hw_dom; - struct msr_param msr_param; - cpumask_var_t cpu_mask; - struct rdt_domain *d; - int i; - - if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) - return -ENOMEM; - - msr_param.res = r; - msr_param.low = 0; - msr_param.high = hw_res->num_closid; - - /* - * Disable resource control for this resource by setting all - * CBMs in all domains to the maximum mask value. Pick one CPU - * from each domain to update the MSRs below. - */ - list_for_each_entry(d, &r->domains, list) { - hw_dom = resctrl_to_arch_dom(d); - cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask); - - for (i = 0; i < hw_res->num_closid; i++) - hw_dom->ctrl_val[i] = r->default_ctrl; - } - - /* Update CBM on all the CPUs in cpu_mask */ - on_each_cpu_mask(cpu_mask, rdt_ctrl_update, &msr_param, 1); - - free_cpumask_var(cpu_mask); - - return 0; -} - -/* - * Move tasks from one to the other group. If @from is NULL, then all tasks - * in the systems are moved unconditionally (used for teardown). - * - * If @mask is not NULL the cpus on which moved tasks are running are set - * in that mask so the update smp function call is restricted to affected - * cpus. - */ -static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, - struct cpumask *mask) -{ - struct task_struct *p, *t; - - read_lock(&tasklist_lock); - for_each_process_thread(p, t) { - if (!from || is_closid_match(t, from) || - is_rmid_match(t, from)) { - WRITE_ONCE(t->closid, to->closid); - WRITE_ONCE(t->rmid, to->mon.rmid); - - /* - * Order the closid/rmid stores above before the loads - * in task_curr(). This pairs with the full barrier - * between the rq->curr update and resctrl_sched_in() - * during context switch. - */ - smp_mb(); - - /* - * If the task is on a CPU, set the CPU in the mask. - * The detection is inaccurate as tasks might move or - * schedule before the smp function call takes place. - * In such a case the function call is pointless, but - * there is no other side effect. - */ - if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) - cpumask_set_cpu(task_cpu(t), mask); - } - } - read_unlock(&tasklist_lock); -} - -static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) -{ - struct rdtgroup *sentry, *stmp; - struct list_head *head; - - head = &rdtgrp->mon.crdtgrp_list; - list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { - free_rmid(sentry->mon.rmid); - list_del(&sentry->mon.crdtgrp_list); - - if (atomic_read(&sentry->waitcount) != 0) - sentry->flags = RDT_DELETED; - else - rdtgroup_remove(sentry); - } -} - -/* - * Forcibly remove all of subdirectories under root. - */ -static void rmdir_all_sub(void) -{ - struct rdtgroup *rdtgrp, *tmp; - - /* Move all tasks to the default resource group */ - rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); - - list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { - /* Free any child rmids */ - free_all_child_rdtgrp(rdtgrp); - - /* Remove each rdtgroup other than root */ - if (rdtgrp == &rdtgroup_default) - continue; - - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) - rdtgroup_pseudo_lock_remove(rdtgrp); - - /* - * Give any CPUs back to the default group. We cannot copy - * cpu_online_mask because a CPU might have executed the - * offline callback already, but is still marked online. - */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - free_rmid(rdtgrp->mon.rmid); - - kernfs_remove(rdtgrp->kn); - list_del(&rdtgrp->rdtgroup_list); - - if (atomic_read(&rdtgrp->waitcount) != 0) - rdtgrp->flags = RDT_DELETED; - else - rdtgroup_remove(rdtgrp); - } - /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ - update_closid_rmid(cpu_online_mask, &rdtgroup_default); - - kernfs_remove(kn_info); - kernfs_remove(kn_mongrp); - kernfs_remove(kn_mondata); -} - -static void rdt_kill_sb(struct super_block *sb) -{ - struct rdt_resource *r; - - cpus_read_lock(); - mutex_lock(&rdtgroup_mutex); - - set_mba_sc(false); - - /*Put everything back to default values. */ - for_each_alloc_capable_rdt_resource(r) - reset_all_ctrls(r); - cdp_disable_all(); - rmdir_all_sub(); - rdt_pseudo_lock_release(); - rdtgroup_default.mode = RDT_MODE_SHAREABLE; - schemata_list_destroy(); - static_branch_disable_cpuslocked(&rdt_alloc_enable_key); - static_branch_disable_cpuslocked(&rdt_mon_enable_key); - static_branch_disable_cpuslocked(&rdt_enable_key); - kernfs_kill_sb(sb); - mutex_unlock(&rdtgroup_mutex); - cpus_read_unlock(); -} - -static struct file_system_type rdt_fs_type = { - .name = "resctrl", - .init_fs_context = rdt_init_fs_context, - .parameters = rdt_fs_parameters, - .kill_sb = rdt_kill_sb, -}; - -static int mon_addfile(struct kernfs_node *parent_kn, const char *name, - void *priv) -{ - struct kernfs_node *kn; - int ret = 0; - - kn = __kernfs_create_file(parent_kn, name, 0444, - GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, - &kf_mondata_ops, priv, NULL, NULL); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - kernfs_remove(kn); - return ret; - } - - return ret; -} - -/* - * Remove all subdirectories of mon_data of ctrl_mon groups - * and monitor groups with given domain id. - */ -static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - unsigned int dom_id) -{ - struct rdtgroup *prgrp, *crgrp; - char name[32]; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - sprintf(name, "mon_%s_%02d", r->name, dom_id); - kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); - - list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) - kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); - } -} - -static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, - struct rdt_domain *d, - struct rdt_resource *r, struct rdtgroup *prgrp) -{ - union mon_data_bits priv; - struct kernfs_node *kn; - struct mon_evt *mevt; - struct rmid_read rr; - char name[32]; - int ret; - - sprintf(name, "mon_%s_%02d", r->name, d->id); - /* create the directory */ - kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); - if (IS_ERR(kn)) - return PTR_ERR(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) - goto out_destroy; - - if (WARN_ON(list_empty(&r->evt_list))) { - ret = -EPERM; - goto out_destroy; - } - - priv.u.rid = r->rid; - priv.u.domid = d->id; - list_for_each_entry(mevt, &r->evt_list, list) { - priv.u.evtid = mevt->evtid; - ret = mon_addfile(kn, mevt->name, priv.priv); - if (ret) - goto out_destroy; - - if (is_mbm_event(mevt->evtid)) - mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); - } - kernfs_activate(kn); - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/* - * Add all subdirectories of mon_data for "ctrl_mon" groups - * and "monitor" groups with given domain id. - */ -static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, - struct rdt_domain *d) -{ - struct kernfs_node *parent_kn; - struct rdtgroup *prgrp, *crgrp; - struct list_head *head; - - list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { - parent_kn = prgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, prgrp); - - head = &prgrp->mon.crdtgrp_list; - list_for_each_entry(crgrp, head, mon.crdtgrp_list) { - parent_kn = crgrp->mon.mon_data_kn; - mkdir_mondata_subdir(parent_kn, d, r, crgrp); - } - } -} - -static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, - struct rdt_resource *r, - struct rdtgroup *prgrp) -{ - struct rdt_domain *dom; - int ret; - - list_for_each_entry(dom, &r->domains, list) { - ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); - if (ret) - return ret; - } - - return 0; -} - -/* - * This creates a directory mon_data which contains the monitored data. - * - * mon_data has one directory for each domain which are named - * in the format mon__. For ex: A mon_data - * with L3 domain looks as below: - * ./mon_data: - * mon_L3_00 - * mon_L3_01 - * mon_L3_02 - * ... - * - * Each domain directory has one file per event: - * ./mon_L3_00/: - * llc_occupancy - * - */ -static int mkdir_mondata_all(struct kernfs_node *parent_kn, - struct rdtgroup *prgrp, - struct kernfs_node **dest_kn) -{ - struct rdt_resource *r; - struct kernfs_node *kn; - int ret; - - /* - * Create the mon_data directory first. - */ - ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); - if (ret) - return ret; - - if (dest_kn) - *dest_kn = kn; - - /* - * Create the subdirectories for each domain. Note that all events - * in a domain like L3 are grouped into a resource whose domain is L3 - */ - for_each_mon_capable_rdt_resource(r) { - ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); - if (ret) - goto out_destroy; - } - - return 0; - -out_destroy: - kernfs_remove(kn); - return ret; -} - -/** - * cbm_ensure_valid - Enforce validity on provided CBM - * @_val: Candidate CBM - * @r: RDT resource to which the CBM belongs - * - * The provided CBM represents all cache portions available for use. This - * may be represented by a bitmap that does not consist of contiguous ones - * and thus be an invalid CBM. - * Here the provided CBM is forced to be a valid CBM by only considering - * the first set of contiguous bits as valid and clearing all bits. - * The intention here is to provide a valid default CBM with which a new - * resource group is initialized. The user can follow this with a - * modification to the CBM if the default does not satisfy the - * requirements. - */ -static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) -{ - unsigned int cbm_len = r->cache.cbm_len; - unsigned long first_bit, zero_bit; - unsigned long val = _val; - - if (!val) - return 0; - - first_bit = find_first_bit(&val, cbm_len); - zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); - - /* Clear any remaining bits to ensure contiguous region */ - bitmap_clear(&val, zero_bit, cbm_len - zero_bit); - return (u32)val; -} - -/* - * Initialize cache resources per RDT domain - * - * Set the RDT domain up to start off with all usable allocations. That is, - * all shareable and unused bits. All-zero CBM is invalid. - */ -static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, - u32 closid) -{ - enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); - enum resctrl_conf_type t = s->conf_type; - struct resctrl_staged_config *cfg; - struct rdt_resource *r = s->res; - u32 used_b = 0, unused_b = 0; - unsigned long tmp_cbm; - enum rdtgrp_mode mode; - u32 peer_ctl, ctrl_val; - int i; - - cfg = &d->staged_config[t]; - cfg->have_new_ctrl = false; - cfg->new_ctrl = r->cache.shareable_bits; - used_b = r->cache.shareable_bits; - for (i = 0; i < closids_supported(); i++) { - if (closid_allocated(i) && i != closid) { - mode = rdtgroup_mode_by_closid(i); - if (mode == RDT_MODE_PSEUDO_LOCKSETUP) - /* - * ctrl values for locksetup aren't relevant - * until the schemata is written, and the mode - * becomes RDT_MODE_PSEUDO_LOCKED. - */ - continue; - /* - * If CDP is active include peer domain's - * usage to ensure there is no overlap - * with an exclusive group. - */ - if (resctrl_arch_get_cdp_enabled(r->rid)) - peer_ctl = resctrl_arch_get_config(r, d, i, - peer_type); - else - peer_ctl = 0; - ctrl_val = resctrl_arch_get_config(r, d, i, - s->conf_type); - used_b |= ctrl_val | peer_ctl; - if (mode == RDT_MODE_SHAREABLE) - cfg->new_ctrl |= ctrl_val | peer_ctl; - } - } - if (d->plr && d->plr->cbm > 0) - used_b |= d->plr->cbm; - unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); - unused_b &= BIT_MASK(r->cache.cbm_len) - 1; - cfg->new_ctrl |= unused_b; - /* - * Force the initial CBM to be valid, user can - * modify the CBM based on system availability. - */ - cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); - /* - * Assign the u32 CBM to an unsigned long to ensure that - * bitmap_weight() does not access out-of-bound memory. - */ - tmp_cbm = cfg->new_ctrl; - if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { - rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); - return -ENOSPC; - } - cfg->have_new_ctrl = true; - - return 0; -} - -/* - * Initialize cache resources with default values. - * - * A new RDT group is being created on an allocation capable (CAT) - * supporting system. Set this group up to start off with all usable - * allocations. - * - * If there are no more shareable bits available on any domain then - * the entire allocation will fail. - */ -static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) -{ - struct rdt_domain *d; - int ret; - - list_for_each_entry(d, &s->res->domains, list) { - ret = __init_one_rdt_domain(d, s, closid); - if (ret < 0) - return ret; - } - - return 0; -} - -/* Initialize MBA resource with default values. */ -static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) -{ - struct resctrl_staged_config *cfg; - struct rdt_domain *d; - - list_for_each_entry(d, &r->domains, list) { - if (is_mba_sc(r)) { - d->mbps_val[closid] = MBA_MAX_MBPS; - continue; - } - - cfg = &d->staged_config[CDP_NONE]; - cfg->new_ctrl = r->default_ctrl; - cfg->have_new_ctrl = true; - } -} - -/* Initialize the RDT group's allocations. */ -static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) -{ - struct resctrl_schema *s; - struct rdt_resource *r; - int ret = 0; - - rdt_staged_configs_clear(); - - list_for_each_entry(s, &resctrl_schema_all, list) { - r = s->res; - if (r->rid == RDT_RESOURCE_MBA || - r->rid == RDT_RESOURCE_SMBA) { - rdtgroup_init_mba(r, rdtgrp->closid); - if (is_mba_sc(r)) - continue; - } else { - ret = rdtgroup_init_cat(s, rdtgrp->closid); - if (ret < 0) - goto out; - } - - ret = resctrl_arch_update_domains(r, rdtgrp->closid); - if (ret < 0) { - rdt_last_cmd_puts("Failed to initialize allocations\n"); - goto out; - } - - } - - rdtgrp->mode = RDT_MODE_SHAREABLE; - -out: - rdt_staged_configs_clear(); - return ret; -} - -static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, - const char *name, umode_t mode, - enum rdt_group_type rtype, struct rdtgroup **r) -{ - struct rdtgroup *prdtgrp, *rdtgrp; - struct kernfs_node *kn; - uint files = 0; - int ret; - - prdtgrp = rdtgroup_kn_lock_live(parent_kn); - if (!prdtgrp) { - ret = -ENODEV; - goto out_unlock; - } - - if (rtype == RDTMON_GROUP && - (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { - ret = -EINVAL; - rdt_last_cmd_puts("Pseudo-locking in progress\n"); - goto out_unlock; - } - - /* allocate the rdtgroup. */ - rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); - if (!rdtgrp) { - ret = -ENOSPC; - rdt_last_cmd_puts("Kernel out of memory\n"); - goto out_unlock; - } - *r = rdtgrp; - rdtgrp->mon.parent = prdtgrp; - rdtgrp->type = rtype; - INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); - - /* kernfs creates the directory for rdtgrp */ - kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); - if (IS_ERR(kn)) { - ret = PTR_ERR(kn); - rdt_last_cmd_puts("kernfs create error\n"); - goto out_free_rgrp; - } - rdtgrp->kn = kn; - - /* - * kernfs_remove() will drop the reference count on "kn" which - * will free it. But we still need it to stick around for the - * rdtgroup_kn_unlock(kn) call. Take one extra reference here, - * which will be dropped by kernfs_put() in rdtgroup_remove(). - */ - kernfs_get(kn); - - ret = rdtgroup_kn_set_ugid(kn); - if (ret) { - rdt_last_cmd_puts("kernfs perm error\n"); - goto out_destroy; - } - - files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype); - ret = rdtgroup_add_files(kn, files); - if (ret) { - rdt_last_cmd_puts("kernfs fill error\n"); - goto out_destroy; - } - - if (rdt_mon_capable) { - ret = alloc_rmid(); - if (ret < 0) { - rdt_last_cmd_puts("Out of RMIDs\n"); - goto out_destroy; - } - rdtgrp->mon.rmid = ret; - - ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_idfree; - } - } - kernfs_activate(kn); - - /* - * The caller unlocks the parent_kn upon success. - */ - return 0; - -out_idfree: - free_rmid(rdtgrp->mon.rmid); -out_destroy: - kernfs_put(rdtgrp->kn); - kernfs_remove(rdtgrp->kn); -out_free_rgrp: - kfree(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) -{ - kernfs_remove(rgrp->kn); - free_rmid(rgrp->mon.rmid); - rdtgroup_remove(rgrp); -} - -/* - * Create a monitor group under "mon_groups" directory of a control - * and monitor group(ctrl_mon). This is a resource group - * to monitor a subset of tasks and cpus in its parent ctrl_mon group. - */ -static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp, *prgrp; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); - if (ret) - return ret; - - prgrp = rdtgrp->mon.parent; - rdtgrp->closid = prgrp->closid; - - /* - * Add the rdtgrp to the list of rdtgrps the parent - * ctrl_mon group has to track. - */ - list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); - - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * These are rdtgroups created under the root directory. Can be used - * to allocate and monitor resources. - */ -static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, - const char *name, umode_t mode) -{ - struct rdtgroup *rdtgrp; - struct kernfs_node *kn; - u32 closid; - int ret; - - ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); - if (ret) - return ret; - - kn = rdtgrp->kn; - ret = closid_alloc(); - if (ret < 0) { - rdt_last_cmd_puts("Out of CLOSIDs\n"); - goto out_common_fail; - } - closid = ret; - ret = 0; - - rdtgrp->closid = closid; - ret = rdtgroup_init_alloc(rdtgrp); - if (ret < 0) - goto out_id_free; - - list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); - - if (rdt_mon_capable) { - /* - * Create an empty mon_groups directory to hold the subset - * of tasks and cpus to monitor. - */ - ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); - if (ret) { - rdt_last_cmd_puts("kernfs subdir error\n"); - goto out_del_list; - } - } - - goto out_unlock; - -out_del_list: - list_del(&rdtgrp->rdtgroup_list); -out_id_free: - closid_free(closid); -out_common_fail: - mkdir_rdt_prepare_clean(rdtgrp); -out_unlock: - rdtgroup_kn_unlock(parent_kn); - return ret; -} - -/* - * We allow creating mon groups only with in a directory called "mon_groups" - * which is present in every ctrl_mon group. Check if this is a valid - * "mon_groups" directory. - * - * 1. The directory should be named "mon_groups". - * 2. The mon group itself should "not" be named "mon_groups". - * This makes sure "mon_groups" directory always has a ctrl_mon group - * as parent. - */ -static bool is_mon_groups(struct kernfs_node *kn, const char *name) -{ - return (!strcmp(kn->name, "mon_groups") && - strcmp(name, "mon_groups")); -} - -static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, - umode_t mode) -{ - /* Do not accept '\n' to avoid unparsable situation. */ - if (strchr(name, '\n')) - return -EINVAL; - - /* - * If the parent directory is the root directory and RDT - * allocation is supported, add a control and monitoring - * subdirectory - */ - if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn) - return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); - - /* - * If RDT monitoring is supported and the parent directory is a valid - * "mon_groups" directory, add a monitoring subdirectory. - */ - if (rdt_mon_capable && is_mon_groups(parent_kn, name)) - return rdtgroup_mkdir_mon(parent_kn, name, mode); - - return -EPERM; -} - -static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - int cpu; - - /* Give any tasks back to the parent group */ - rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); - - /* Update per cpu rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) - per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid; - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - rdtgrp->flags = RDT_DELETED; - free_rmid(rdtgrp->mon.rmid); - - /* - * Remove the rdtgrp from the parent ctrl_mon group's list - */ - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_del(&rdtgrp->mon.crdtgrp_list); - - kernfs_remove(rdtgrp->kn); - - return 0; -} - -static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) -{ - rdtgrp->flags = RDT_DELETED; - list_del(&rdtgrp->rdtgroup_list); - - kernfs_remove(rdtgrp->kn); - return 0; -} - -static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) -{ - int cpu; - - /* Give any tasks back to the default group */ - rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); - - /* Give any CPUs back to the default group */ - cpumask_or(&rdtgroup_default.cpu_mask, - &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); - - /* Update per cpu closid and rmid of the moved CPUs first */ - for_each_cpu(cpu, &rdtgrp->cpu_mask) { - per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid; - per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid; - } - - /* - * Update the MSR on moved CPUs and CPUs which have moved - * task running on them. - */ - cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); - update_closid_rmid(tmpmask, NULL); - - closid_free(rdtgrp->closid); - free_rmid(rdtgrp->mon.rmid); - - rdtgroup_ctrl_remove(rdtgrp); - - /* - * Free all the child monitor group rmids. - */ - free_all_child_rdtgrp(rdtgrp); - - return 0; -} - -static int rdtgroup_rmdir(struct kernfs_node *kn) -{ - struct kernfs_node *parent_kn = kn->parent; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret = 0; - - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) - return -ENOMEM; - - rdtgrp = rdtgroup_kn_lock_live(kn); - if (!rdtgrp) { - ret = -EPERM; - goto out; - } - - /* - * If the rdtgroup is a ctrl_mon group and parent directory - * is the root directory, remove the ctrl_mon group. - * - * If the rdtgroup is a mon group and parent directory - * is a valid "mon_groups" directory, remove the mon group. - */ - if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && - rdtgrp != &rdtgroup_default) { - if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || - rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { - ret = rdtgroup_ctrl_remove(rdtgrp); - } else { - ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); - } - } else if (rdtgrp->type == RDTMON_GROUP && - is_mon_groups(parent_kn, kn->name)) { - ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); - } else { - ret = -EPERM; - } - -out: - rdtgroup_kn_unlock(kn); - free_cpumask_var(tmpmask); - return ret; -} - -/** - * mongrp_reparent() - replace parent CTRL_MON group of a MON group - * @rdtgrp: the MON group whose parent should be replaced - * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp - * @cpus: cpumask provided by the caller for use during this call - * - * Replaces the parent CTRL_MON group for a MON group, resulting in all member - * tasks' CLOSID immediately changing to that of the new parent group. - * Monitoring data for the group is unaffected by this operation. - */ -static void mongrp_reparent(struct rdtgroup *rdtgrp, - struct rdtgroup *new_prdtgrp, - cpumask_var_t cpus) -{ - struct rdtgroup *prdtgrp = rdtgrp->mon.parent; - - WARN_ON(rdtgrp->type != RDTMON_GROUP); - WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); - - /* Nothing to do when simply renaming a MON group. */ - if (prdtgrp == new_prdtgrp) - return; - - WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); - list_move_tail(&rdtgrp->mon.crdtgrp_list, - &new_prdtgrp->mon.crdtgrp_list); - - rdtgrp->mon.parent = new_prdtgrp; - rdtgrp->closid = new_prdtgrp->closid; - - /* Propagate updated closid to all tasks in this group. */ - rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); - - update_closid_rmid(cpus, NULL); -} - -static int rdtgroup_rename(struct kernfs_node *kn, - struct kernfs_node *new_parent, const char *new_name) -{ - struct rdtgroup *new_prdtgrp; - struct rdtgroup *rdtgrp; - cpumask_var_t tmpmask; - int ret; - - rdtgrp = kernfs_to_rdtgroup(kn); - new_prdtgrp = kernfs_to_rdtgroup(new_parent); - if (!rdtgrp || !new_prdtgrp) - return -ENOENT; - - /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ - rdtgroup_kn_get(rdtgrp, kn); - rdtgroup_kn_get(new_prdtgrp, new_parent); - - mutex_lock(&rdtgroup_mutex); - - rdt_last_cmd_clear(); - - /* - * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if - * either kernfs_node is a file. - */ - if (kernfs_type(kn) != KERNFS_DIR || - kernfs_type(new_parent) != KERNFS_DIR) { - rdt_last_cmd_puts("Source and destination must be directories"); - ret = -EPERM; - goto out; - } - - if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { - ret = -ENOENT; - goto out; - } - - if (rdtgrp->type != RDTMON_GROUP || !kn->parent || - !is_mon_groups(kn->parent, kn->name)) { - rdt_last_cmd_puts("Source must be a MON group\n"); - ret = -EPERM; - goto out; - } - - if (!is_mon_groups(new_parent, new_name)) { - rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); - ret = -EPERM; - goto out; - } - - /* - * If the MON group is monitoring CPUs, the CPUs must be assigned to the - * current parent CTRL_MON group and therefore cannot be assigned to - * the new parent, making the move illegal. - */ - if (!cpumask_empty(&rdtgrp->cpu_mask) && - rdtgrp->mon.parent != new_prdtgrp) { - rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); - ret = -EPERM; - goto out; - } - - /* - * Allocate the cpumask for use in mongrp_reparent() to avoid the - * possibility of failing to allocate it after kernfs_rename() has - * succeeded. - */ - if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { - ret = -ENOMEM; - goto out; - } - - /* - * Perform all input validation and allocations needed to ensure - * mongrp_reparent() will succeed before calling kernfs_rename(), - * otherwise it would be necessary to revert this call if - * mongrp_reparent() failed. - */ - ret = kernfs_rename(kn, new_parent, new_name); - if (!ret) - mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); - - free_cpumask_var(tmpmask); - -out: - mutex_unlock(&rdtgroup_mutex); - rdtgroup_kn_put(rdtgrp, kn); - rdtgroup_kn_put(new_prdtgrp, new_parent); - return ret; -} - -static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) -{ - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) - seq_puts(seq, ",cdp"); - - if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) - seq_puts(seq, ",cdpl2"); - - if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl)) - seq_puts(seq, ",mba_MBps"); - - return 0; -} - -static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { - .mkdir = rdtgroup_mkdir, - .rmdir = rdtgroup_rmdir, - .rename = rdtgroup_rename, - .show_options = rdtgroup_show_options, -}; - -static int __init rdtgroup_setup_root(void) -{ - int ret; - - rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, - KERNFS_ROOT_CREATE_DEACTIVATED | - KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, - &rdtgroup_default); - if (IS_ERR(rdt_root)) - return PTR_ERR(rdt_root); - - mutex_lock(&rdtgroup_mutex); - - rdtgroup_default.closid = 0; - rdtgroup_default.mon.rmid = 0; - rdtgroup_default.type = RDTCTRL_GROUP; - INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); - - list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); - - ret = rdtgroup_add_files(kernfs_root_to_node(rdt_root), RF_CTRL_BASE); - if (ret) { - kernfs_destroy_root(rdt_root); - goto out; - } - - rdtgroup_default.kn = kernfs_root_to_node(rdt_root); - kernfs_activate(rdtgroup_default.kn); - -out: - mutex_unlock(&rdtgroup_mutex); - - return ret; -} - -static void domain_destroy_mon_state(struct rdt_domain *d) -{ - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - kfree(d->mbm_local); -} - -void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - lockdep_assert_held(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - mba_sc_domain_destroy(r, d); - - if (!r->mon_capable) - return; - - /* - * If resctrl is mounted, remove all the - * per domain monitor data directories. - */ - if (static_branch_unlikely(&rdt_mon_enable_key)) - rmdir_mondata_subdir_allrdtgrp(r, d->id); - - if (is_mbm_enabled()) - cancel_delayed_work(&d->mbm_over); - if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) { - /* - * When a package is going down, forcefully - * decrement rmid->ebusy. There is no way to know - * that the L3 was flushed and hence may lead to - * incorrect counts in rare scenarios, but leaving - * the RMID as busy creates RMID leaks if the - * package never comes back. - */ - __check_limbo(d, true); - cancel_delayed_work(&d->cqm_limbo); - } - - domain_destroy_mon_state(d); -} - -static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) -{ - size_t tsize; - - if (is_llc_occupancy_enabled()) { - d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL); - if (!d->rmid_busy_llc) - return -ENOMEM; - } - if (is_mbm_total_enabled()) { - tsize = sizeof(*d->mbm_total); - d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL); - if (!d->mbm_total) { - bitmap_free(d->rmid_busy_llc); - return -ENOMEM; - } - } - if (is_mbm_local_enabled()) { - tsize = sizeof(*d->mbm_local); - d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL); - if (!d->mbm_local) { - bitmap_free(d->rmid_busy_llc); - kfree(d->mbm_total); - return -ENOMEM; - } - } - - return 0; -} - -int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) -{ - int err; - - lockdep_assert_held(&rdtgroup_mutex); - - if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) - /* RDT_RESOURCE_MBA is never mon_capable */ - return mba_sc_domain_allocate(r, d); - - if (!r->mon_capable) - return 0; - - err = domain_setup_mon_state(r, d); - if (err) - return err; - - if (is_mbm_enabled()) { - INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); - mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL); - } - - if (is_llc_occupancy_enabled()) - INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); - - /* If resctrl is mounted, add per domain monitor data directories. */ - if (static_branch_unlikely(&rdt_mon_enable_key)) - mkdir_mondata_subdir_allrdtgrp(r, d); - - return 0; -} - -/* - * rdtgroup_init - rdtgroup initialization - * - * Setup resctrl file system including set up root, create mount point, - * register rdtgroup filesystem, and initialize files under root directory. - * - * Return: 0 on success or -errno - */ -int __init rdtgroup_init(void) -{ - int ret = 0; - - seq_buf_init(&last_cmd_status, last_cmd_status_buf, - sizeof(last_cmd_status_buf)); - - ret = rdtgroup_setup_root(); - if (ret) - return ret; - - ret = sysfs_create_mount_point(fs_kobj, "resctrl"); - if (ret) - goto cleanup_root; - - ret = register_filesystem(&rdt_fs_type); - if (ret) - goto cleanup_mountpoint; - - /* - * Adding the resctrl debugfs directory here may not be ideal since - * it would let the resctrl debugfs directory appear on the debugfs - * filesystem before the resctrl filesystem is mounted. - * It may also be ok since that would enable debugging of RDT before - * resctrl is mounted. - * The reason why the debugfs directory is created here and not in - * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and - * during the debugfs directory creation also &sb->s_type->i_mutex_key - * (the lockdep class of inode->i_rwsem). Other filesystem - * interactions (eg. SyS_getdents) have the lock ordering: - * &sb->s_type->i_mutex_key --> &mm->mmap_lock - * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex - * is taken, thus creating dependency: - * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause - * issues considering the other two lock dependencies. - * By creating the debugfs directory here we avoid a dependency - * that may cause deadlock (even though file operations cannot - * occur until the filesystem is mounted, but I do not know how to - * tell lockdep that). - */ - debugfs_resctrl = debugfs_create_dir("resctrl", NULL); - - return 0; - -cleanup_mountpoint: - sysfs_remove_mount_point(fs_kobj, "resctrl"); -cleanup_root: - kernfs_destroy_root(rdt_root); - - return ret; -} - -void __exit rdtgroup_exit(void) -{ - debugfs_remove_recursive(debugfs_resctrl); - unregister_filesystem(&rdt_fs_type); - sysfs_remove_mount_point(fs_kobj, "resctrl"); - kernfs_destroy_root(rdt_root); + for_each_capable_rdt_resource(r) + reset_all_ctrls(r); } diff --git a/arch/x86/kernel/cpu/zhaoxin.c b/arch/x86/kernel/cpu/zhaoxin.c index 05fa4ef634902293e3286705134168b40d812932..8e4201ad1d23115bd589b259dee3534e6d224ae6 100644 --- a/arch/x86/kernel/cpu/zhaoxin.c +++ b/arch/x86/kernel/cpu/zhaoxin.c @@ -79,16 +79,31 @@ static void early_init_zhaoxin(struct cpuinfo_x86 *c) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); } + /* + * These CPUs declare support SSE4.2 instruction sets but + * having low performance CRC32C instruction implementation. + */ + if (c->x86 == 0x6 || (c->x86 == 0x7 && c->x86_model <= 0x3b)) + set_cpu_cap(c, X86_FEATURE_CRC32C_LOW_PERF); + + if (cpuid_eax(0xC0000000) >= 0xC0000006) + c->x86_capability[CPUID_C000_0006_EAX] = cpuid_eax(0xC0000006); + + if (detect_extended_topology_early(c) < 0) + detect_ht_early(c); } static void init_zhaoxin(struct cpuinfo_x86 *c) { early_init_zhaoxin(c); + detect_extended_topology(c); init_intel_cacheinfo(c); - detect_num_cpu_cores(c); + if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { + detect_num_cpu_cores(c); #ifdef CONFIG_X86_32 detect_ht(c); #endif + } if (c->cpuid_level > 9) { unsigned int eax = cpuid_eax(10); diff --git a/arch/x86/kernel/cpu/zxpause.c b/arch/x86/kernel/cpu/zxpause.c new file mode 100644 index 0000000000000000000000000000000000000000..7f55f5d9e8c0cbb70c30c260705040901eee2a53 --- /dev/null +++ b/arch/x86/kernel/cpu/zxpause.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +#define ZXPAUSE_C02_ENABLE 0 + +#define ZXPAUSE_CTRL_VAL(max_time, c02_disable) \ + (((max_time) & MSR_ZX_PAUSE_CONTROL_TIME_MASK) | \ + ((c02_disable) & MSR_ZX_PAUSE_CONTROL_C02_DISABLE)) + +/* + * Cache ZX_PAUSE_CONTROL MSR. This is a systemwide control. By default, + * zxpause max time is 100000 in TSC-quanta and C0.2 is enabled + */ +static u32 zxpause_control_cached = ZXPAUSE_CTRL_VAL(100000, ZXPAUSE_C02_ENABLE); + +/* + * Cache the original ZX_PAUSE_CONTROL MSR value which is configured by + * hardware or BIOS before kernel boot. + */ +static u32 orig_zxpause_control_cached __ro_after_init; + +/* + * Serialize access to zxpause_control_cached and ZX_PAUSE_CONTROL MSR in + * the sysfs write functions. + */ +static DEFINE_MUTEX(zxpause_lock); + +static void zxpause_update_control_msr(void *unused) +{ + lockdep_assert_irqs_disabled(); + wrmsr(MSR_ZX_PAUSE_CONTROL, READ_ONCE(zxpause_control_cached), 0); +} + +/* + * The CPU hotplug callback sets the control MSR to the global control + * value. + * + * Disable interrupts so the read of zxpause_control_cached and the WRMSR + * are protected against a concurrent sysfs write. Otherwise the sysfs + * write could update the cached value after it had been read on this CPU + * and issue the IPI before the old value had been written. The IPI would + * interrupt, write the new value and after return from IPI the previous + * value would be written by this CPU. + * + * With interrupts disabled the upcoming CPU either sees the new control + * value or the IPI is updating this CPU to the new control value after + * interrupts have been reenabled. + */ +static int zxpause_cpu_online(unsigned int cpu) +{ + local_irq_disable(); + zxpause_update_control_msr(NULL); + local_irq_enable(); + return 0; +} + +/* + * The CPU hotplug callback sets the control MSR to the original control + * value. + */ +static int zxpause_cpu_offline(unsigned int cpu) +{ + /* + * This code is protected by the CPU hotplug already and + * orig_zxpause_control_cached is never changed after it caches + * the original control MSR value in zxpause_init(). So there + * is no race condition here. + */ + wrmsr(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached, 0); + + return 0; +} + +/* + * On resume, restore ZX_PAUSE_CONTROL MSR on the boot processor which + * is the only active CPU at this time. The MSR is set up on the APs via the + * CPU hotplug callback. + * + * This function is invoked on resume from suspend and hibernation. On + * resume from suspend the restore should be not required, but we neither + * trust the firmware nor does it matter if the same value is written + * again. + */ +static void zxpause_syscore_resume(void) +{ + zxpause_update_control_msr(NULL); +} + +static struct syscore_ops zxpause_syscore_ops = { + .resume = zxpause_syscore_resume, +}; + +/* sysfs interface */ + +/* + * When bit 0 in ZX_PAUSE_CONTROL MSR is 1, C0.2 is disabled. + * Otherwise, C0.2 is enabled. + */ +static inline bool zxpause_ctrl_c02_enabled(u32 ctrl) +{ + return !(ctrl & MSR_ZX_PAUSE_CONTROL_C02_DISABLE); +} + +static inline u32 zxpause_ctrl_max_time(u32 ctrl) +{ + return ctrl & MSR_ZX_PAUSE_CONTROL_TIME_MASK; +} + +static inline void zxpause_update_control(u32 maxtime, bool c02_enable) +{ + u32 ctrl = maxtime & MSR_ZX_PAUSE_CONTROL_TIME_MASK; + + if (!c02_enable) + ctrl |= MSR_ZX_PAUSE_CONTROL_C02_DISABLE; + + WRITE_ONCE(zxpause_control_cached, ctrl); + /* Propagate to all CPUs */ + on_each_cpu(zxpause_update_control_msr, NULL, 1); +} + +static ssize_t +enable_c02_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%d\n", zxpause_ctrl_c02_enabled(ctrl)); +} + +static ssize_t enable_c02_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + bool c02_enable; + u32 ctrl; + int ret; + + ret = kstrtobool(buf, &c02_enable); + if (ret) + return ret; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (c02_enable != zxpause_ctrl_c02_enabled(ctrl)) + zxpause_update_control(ctrl, c02_enable); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(enable_c02); + +static ssize_t +max_time_show(struct device *kobj, struct device_attribute *attr, char *buf) +{ + u32 ctrl = READ_ONCE(zxpause_control_cached); + + return sprintf(buf, "%u\n", zxpause_ctrl_max_time(ctrl)); +} + +static ssize_t max_time_store(struct device *kobj, + struct device_attribute *attr, + const char *buf, size_t count) +{ + u32 max_time, ctrl; + int ret; + + ret = kstrtou32(buf, 0, &max_time); + if (ret) + return ret; + + /* bits[1:0] must be zero */ + if (max_time & ~MSR_ZX_PAUSE_CONTROL_TIME_MASK) + return -EINVAL; + + mutex_lock(&zxpause_lock); + + ctrl = READ_ONCE(zxpause_control_cached); + if (max_time != zxpause_ctrl_max_time(ctrl)) + zxpause_update_control(max_time, zxpause_ctrl_c02_enabled(ctrl)); + + mutex_unlock(&zxpause_lock); + + return count; +} +static DEVICE_ATTR_RW(max_time); + +static struct attribute *zxpause_attrs[] = { + &dev_attr_enable_c02.attr, + &dev_attr_max_time.attr, + NULL +}; + +static struct attribute_group zxpause_attr_group = { + .attrs = zxpause_attrs, + .name = "zxpause_control", +}; + +static int __init zxpause_init(void) +{ + struct device *dev; + int ret; + + if (!boot_cpu_has(X86_FEATURE_ZXPAUSE)) + return -ENODEV; + + /* + * Cache the original control MSR value before the control MSR is + * changed. This is the only place where orig_zxpause_control_cached + * is modified. + */ + rdmsrl(MSR_ZX_PAUSE_CONTROL, orig_zxpause_control_cached); + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "zxpause:online", + zxpause_cpu_online, zxpause_cpu_offline); + if (ret < 0) { + /* + * On failure, the control MSR on all CPUs has the + * original control value. + */ + return ret; + } + + register_syscore_ops(&zxpause_syscore_ops); + + /* + * Add zxpause control interface. Ignore failure, so at least the + * default values are set up in case the machine manages to boot. + */ + dev = bus_get_dev_root(&cpu_subsys); + return sysfs_create_group(&dev->kobj, &zxpause_attr_group); +} +device_initcall(zxpause_init); diff --git a/arch/x86/kernel/csv-shared.c b/arch/x86/kernel/csv-shared.c new file mode 100644 index 0000000000000000000000000000000000000000..fd55e570bbbbde43092c40bcfb09fc337fef8645 --- /dev/null +++ b/arch/x86/kernel/csv-shared.c @@ -0,0 +1,215 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon CSV support + * + * This file is shared between decompression boot code and running + * linux kernel. + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include + +#define CPUID_VENDOR_HygonGenuine_ebx 0x6f677948 +#define CPUID_VENDOR_HygonGenuine_ecx 0x656e6975 +#define CPUID_VENDOR_HygonGenuine_edx 0x6e65476e + +#define MSR_CSV3_ENABLED_BIT 30 +#define MSR_CSV3_ENABLED BIT_ULL(MSR_CSV3_ENABLED_BIT) + +/* + ****************************** CSV3 secure call ******************************* + * + * CSV3 guest is based on hygon secure isolated virualization feature. An secure + * processor which resides in hygon SOC manages guest's private memory. The + * secure processor allocates or frees private memory for CSV3 guest and manages + * CSV3 guest's nested page table. + * + * As the secure processor is considered as a PCI device in host, CSV3 guest can + * not communicate with it directly. Howerver, CSV3 guest must request the secure + * processor to change its physical memory between private memory and shared + * memory. CSV3 secure call command is a method used to communicate with secure + * processor that host cannot tamper with the data in CSV3 guest. Host can only + * perform an external command to notify the secure processor to handle the + * pending guest's command. + * + * CSV3 secure call pages: + * Secure call pages are two dedicated pages that reserved by BIOS. We define + * secure call pages as page A and page B. During guest launch stage, the secure + * processor will parse the address of secure call pages. The secure processor + * maps the two pages with same private memory page in NPT. The secure processor + * always set one page as present and another page as non-present in NPT. + + * CSV3 secure call main work flow: + * If we write the guest's commands in one page then read them from another page, + * nested page fault happens and the guest exits to host. Then host will perform + * an external command with the gpa(page A or page B) to the secure processor. + * The secure processor checks that the gpa in NPF belongs to secure call pages, + * read the guest's command to handle, then switch the present bit between the + * two pages. + * + * guest page A guest page B + * | | + * ____|______________|____ + * | | + * | nested page table | + * |______________________| + * \ / + * \ / + * \ / + * \ / + * \ / + * secure memory page + * + * CSV3_SECURE_CMD_ENC: + * CSV3 guest declares a specifid memory range as secure. By default, all of + * CSV3 guest's memory mapped as secure. + * The secure processor allocate a block of secure memory and map the memory + * in CSV3 guest's NPT with the specified guest physical memory range in CSV3 + * secure call. + * + * CSV3_SECURE_CMD_DEC: + * CSV3 guest declares a specified memory range as shared. + * The secure processor save the guest physical memory range in its own ram + * and free the range in CSV3 guest's NPT. When CSV3 guest access the memory, + * a new nested page fault happens. + * + * CSV3_SECURE_CMD_RESET: + * CSV3 guest switches all of the shared memory to secure. + * The secure processor resets all the shared memory in CSV3 guest's NPT and + * clears the saved shared memory range. Then the secure process allocates + * secure memory to map in CSV3 guest's NPT. + * + * CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE: + * CSV3 guest wants to change the secure call pages. + * The secure processor re-init the secure call context. + */ +enum csv3_secure_command_type { + CSV3_SECURE_CMD_ENC = 1, + CSV3_SECURE_CMD_DEC, + CSV3_SECURE_CMD_RESET, + CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE, +}; + +/* + * Secure call page fields. + * Secure call page size is 4KB always. We define CSV3 secure call page structure + * as below. + * guid: Must be in the first 128 bytes of the page. Its value should be + * (0xceba2fa59a5d926ful, 0xa556555d276b21abul) always. + * cmd_type: Command to be issued to the secure processor. + * nums: number of entries in the command. + * base_address:Start address of the memory range. + * size: Size of the memory range. + */ +#define SECURE_CALL_ENTRY_MAX (254) + +/* size of secure call cmd is 4KB. */ +struct csv3_secure_call_cmd { + union { + u8 guid[16]; + u64 guid_64[2]; + }; + u32 cmd_type; + u32 nums; + u64 unused; + struct { + u64 base_address; + u64 size; + } entry[SECURE_CALL_ENTRY_MAX]; +}; + +/* csv3 secure call guid, do not change the value. */ +#define CSV3_SECURE_CALL_GUID_LOW 0xceba2fa59a5d926ful +#define CSV3_SECURE_CALL_GUID_HIGH 0xa556555d276b21abul + +static u64 csv3_boot_sc_page_a __initdata = -1ul; +static u64 csv3_boot_sc_page_b __initdata = -1ul; +static u32 early_page_idx __initdata; + +/** + * csv3_scan_secure_call_pages - try to find the secure call pages. + * @boot_params: boot parameters where e820_table resides. + * + * The secure call pages are reserved by BIOS. We scan all the reserved pages + * to check the CSV3 secure call guid bytes. + */ +void __init csv3_scan_secure_call_pages(struct boot_params *boot_params) +{ + struct boot_e820_entry *entry; + struct csv3_secure_call_cmd *sc_page; + u64 offset; + u64 addr; + u8 i; + u8 table_num; + int count = 0; + + if (!boot_params) + return; + + if (csv3_boot_sc_page_a != -1ul && csv3_boot_sc_page_b != -1ul) + return; + + table_num = min_t(u8, boot_params->e820_entries, + E820_MAX_ENTRIES_ZEROPAGE); + entry = &boot_params->e820_table[0]; + for (i = 0; i < table_num; i++) { + if (entry[i].type != E820_TYPE_RESERVED) + continue; + + addr = entry[i].addr & PAGE_MASK; + for (offset = 0; offset < entry[i].size; offset += PAGE_SIZE) { + sc_page = (void *)(addr + offset); + if (sc_page->guid_64[0] == CSV3_SECURE_CALL_GUID_LOW && + sc_page->guid_64[1] == CSV3_SECURE_CALL_GUID_HIGH) { + if (count == 0) + csv3_boot_sc_page_a = addr + offset; + else if (count == 1) + csv3_boot_sc_page_b = addr + offset; + count++; + } + if (count >= 2) + return; + } + } +} + +/** + * csv3_early_secure_call - issue early secure call command at the stage where + * identity page table is created. + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +void __init csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + /* identity mapping at the stage. */ + page_rd = (void *)(early_page_idx ? csv3_boot_sc_page_a : csv3_boot_sc_page_b); + page_wr = (void *)(early_page_idx ? csv3_boot_sc_page_b : csv3_boot_sc_page_a); + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + early_page_idx ^= 1; +} diff --git a/arch/x86/kernel/csv.c b/arch/x86/kernel/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..c0ad12aa94f3e5965656b14c67d51c9052998cb5 --- /dev/null +++ b/arch/x86/kernel/csv.c @@ -0,0 +1,306 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include + +#include "../mm/mm_internal.h" +#include "csv-shared.c" + +u32 vendor_ebx __section(".data") = 0; +u32 vendor_ecx __section(".data") = 0; +u32 vendor_edx __section(".data") = 0; + +struct secure_call_pages { + struct csv3_secure_call_cmd page_a; + struct csv3_secure_call_cmd page_b; +}; + +static u32 csv3_percpu_secure_call_init __initdata; +static u32 early_secure_call_page_idx __initdata; + +static DEFINE_PER_CPU(struct secure_call_pages*, secure_call_data); +static DEFINE_PER_CPU(int, secure_call_page_idx); + +typedef void (*csv3_secure_call_func)(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type); + +bool noinstr csv3_active(void) +{ + if (vendor_ebx == 0 || vendor_ecx == 0 || vendor_edx == 0) { + u32 eax = 0; + + native_cpuid(&eax, &vendor_ebx, &vendor_ecx, &vendor_edx); + } + + /* HygonGenuine */ + if (vendor_ebx == CPUID_VENDOR_HygonGenuine_ebx && + vendor_ecx == CPUID_VENDOR_HygonGenuine_ecx && + vendor_edx == CPUID_VENDOR_HygonGenuine_edx) + return !!(sev_status & MSR_CSV3_ENABLED); + else + return false; +} + +void __init csv_early_reset_memory(struct boot_params *bp) +{ + if (!csv3_active()) + return; + + csv3_scan_secure_call_pages(bp); + csv3_early_secure_call(0, 0, CSV3_SECURE_CMD_RESET); +} + +void __init csv_early_update_memory_dec(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_DEC); +} + +void __init csv_early_update_memory_enc(u64 vaddr, u64 pages) +{ + if (!csv3_active()) + return; + + if (pages) + csv3_early_secure_call(__pa(vaddr), pages, CSV3_SECURE_CMD_ENC); +} + +static void __init csv3_alloc_secure_call_data(int cpu) +{ + struct secure_call_pages *data; + + data = memblock_alloc(sizeof(*data), PAGE_SIZE); + if (!data) + panic("Can't allocate CSV3 secure all data"); + + per_cpu(secure_call_data, cpu) = data; +} + +static void __init csv3_secure_call_update_table(void) +{ + int cpu; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (!csv3_active()) + return; + + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, PAGE_SIZE); + + while (1) { + page_wr->cmd_type = CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE; + page_wr->nums = 0; + + /* initialize per-cpu secure call pages */ + for_each_possible_cpu(cpu) { + if (cpu >= SECURE_CALL_ENTRY_MAX) + panic("csv does not support cpus > %d\n", + SECURE_CALL_ENTRY_MAX); + csv3_alloc_secure_call_data(cpu); + data = per_cpu(secure_call_data, cpu); + per_cpu(secure_call_page_idx, cpu) = 0; + page_wr->entry[cpu].base_address = __pa(data); + page_wr->entry[cpu].size = PAGE_SIZE * 2; + page_wr->nums++; + } + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != CSV3_SECURE_CMD_UPDATE_SECURE_CALL_TABLE) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); +} + +/** + * __csv3_early_secure_call - issue secure call command at the stage where new + * kernel page table is created and early identity page + * table is deprecated . + * @base_address: Start address of the specified memory range. + * @num_pages: number of the specific pages. + * @cmd_type: Secure call cmd type. + */ +static void __init __csv3_early_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + u32 cmd_ack; + + if (csv3_boot_sc_page_a == -1ul || csv3_boot_sc_page_b == -1ul) + return; + + if (!csv3_percpu_secure_call_init) { + csv3_secure_call_update_table(); + csv3_percpu_secure_call_init = 1; + } + + if (early_secure_call_page_idx == 0) { + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } else { + page_wr = (void *)early_memremap_encrypted(csv3_boot_sc_page_a, + PAGE_SIZE); + page_rd = (void *)early_memremap_encrypted(csv3_boot_sc_page_b, + PAGE_SIZE); + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the mb below. + */ + mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + early_memunmap(page_rd, PAGE_SIZE); + early_memunmap(page_wr, PAGE_SIZE); + + early_secure_call_page_idx ^= 1; +} + +static void csv3_secure_call(u64 base_address, u64 num_pages, + enum csv3_secure_command_type cmd_type) +{ + u32 cmd_ack; + struct secure_call_pages *data; + struct csv3_secure_call_cmd *page_rd; + struct csv3_secure_call_cmd *page_wr; + int page_idx; + int cpu; + + preempt_disable(); + + cpu = smp_processor_id(); + data = per_cpu(secure_call_data, cpu); + page_idx = per_cpu(secure_call_page_idx, cpu); + + if (page_idx == 0) { + page_rd = &data->page_a; + page_wr = &data->page_b; + } else { + page_rd = &data->page_b; + page_wr = &data->page_a; + } + + while (1) { + page_wr->cmd_type = (u32)cmd_type; + page_wr->nums = 1; + page_wr->entry[0].base_address = base_address; + page_wr->entry[0].size = num_pages << PAGE_SHIFT; + + /* + * Write command in page_wr must be done before retrieve cmd + * ack from page_rd, and it is ensured by the smp_mb below. + */ + smp_mb(); + + cmd_ack = page_rd->cmd_type; + if (cmd_ack != cmd_type) + break; + } + + per_cpu(secure_call_page_idx, cpu) ^= 1; + preempt_enable(); +} + +static void __csv3_memory_enc_dec(csv3_secure_call_func secure_call, u64 vaddr, + u64 pages, bool enc) +{ + u64 vaddr_end, vaddr_next; + u64 psize, pmask; + u64 last_paddr, paddr; + u64 last_psize = 0; + pte_t *kpte; + int level; + enum csv3_secure_command_type cmd_type; + + cmd_type = enc ? CSV3_SECURE_CMD_ENC : CSV3_SECURE_CMD_DEC; + vaddr_next = vaddr; + vaddr_end = vaddr + (pages << PAGE_SHIFT); + for (; vaddr < vaddr_end; vaddr = vaddr_next) { + kpte = lookup_address(vaddr, &level); + if (!kpte || pte_none(*kpte)) { + panic("invalid pte, vaddr 0x%llx\n", vaddr); + goto out; + } + + psize = page_level_size(level); + pmask = page_level_mask(level); + + vaddr_next = (vaddr & pmask) + psize; + paddr = ((pte_pfn(*kpte) << PAGE_SHIFT) & pmask) + + (vaddr & ~pmask); + psize -= (vaddr & ~pmask); + + if (vaddr_end - vaddr < psize) + psize = vaddr_end - vaddr; + if (last_psize == 0 || (last_paddr + last_psize) == paddr) { + last_paddr = (last_psize == 0 ? paddr : last_paddr); + last_psize += psize; + } else { + secure_call(last_paddr, last_psize >> PAGE_SHIFT, + cmd_type); + last_paddr = paddr; + last_psize = psize; + } + } + + if (last_psize) + secure_call(last_paddr, last_psize >> PAGE_SHIFT, cmd_type); + +out: + return; +} + +void __init csv_early_memory_enc_dec(u64 vaddr, u64 size, bool enc) +{ + u64 npages; + + if (!csv3_active()) + return; + + npages = (size + (vaddr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT; + __csv3_memory_enc_dec(__csv3_early_secure_call, vaddr & PAGE_MASK, + npages, enc); +} + +void csv_memory_enc_dec(u64 vaddr, u64 pages, bool enc) +{ + if (!csv3_active()) + return; + + __csv3_memory_enc_dec(csv3_secure_call, vaddr & PAGE_MASK, pages, enc); +} diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index a6c1867fc7aa3ea0eb9b24fc1992c90119d2febb..b5f5e0916894406261efd7312efb607ec7ab29cb 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,7 @@ #include #include #include +#include static void __init fix_hypertransport_config(int num, int slot, int func) { @@ -685,6 +687,20 @@ static void __init apple_airport_reset(int bus, int slot, int func) early_iounmap(mmio, BCM4331_MMIO_SIZE); } +bool is_zhaoxin_kh40000; + +static void quirk_zhaoxin_dma_patch(int num, int slot, int func) +{ + u8 revision; + + revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); + if (revision == 0x10) { + is_zhaoxin_kh40000 = true; + dma_ops = &kh40000_dma_direct_ops; + pr_info("zhaoxin direct dma patch enabled\n"); + } +} + #define QFLAG_APPLY_ONCE 0x1 #define QFLAG_APPLIED 0x2 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) @@ -728,6 +744,10 @@ static struct chipset early_qrk[] __initdata = { PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet}, { PCI_VENDOR_ID_BROADCOM, 0x4331, PCI_CLASS_NETWORK_OTHER, PCI_ANY_ID, 0, apple_airport_reset}, + { PCI_VENDOR_ID_ZHAOXIN, 0x1001, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, + { PCI_VENDOR_ID_ZHAOXIN, 0x345B, PCI_CLASS_BRIDGE_HOST, + PCI_BASE_CLASS_BRIDGE, QFLAG_APPLY_ONCE, quirk_zhaoxin_dma_patch }, {} }; diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 246a609f889b20986fe9b66d2449a81db125aec1..de001b2146abf3bef00b4632ada3e7a483421411 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -29,11 +30,33 @@ static void __init i386_default_early_setup(void) x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc; } +#ifdef CONFIG_MICROCODE_INITRD32 +unsigned long __initdata initrd_start_early; +static pte_t __initdata *initrd_pl2p_start, *initrd_pl2p_end; + +static void zap_early_initrd_mapping(void) +{ + pte_t *pl2p = initrd_pl2p_start; + + for (; pl2p < initrd_pl2p_end; pl2p++) { + *pl2p = (pte_t){ .pte = 0 }; + + if (!IS_ENABLED(CONFIG_X86_PAE)) + *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = (pte_t) {.pte = 0}; + } +} +#else +static inline void zap_early_initrd_mapping(void) { } +#endif + asmlinkage __visible void __init __noreturn i386_start_kernel(void) { /* Make sure IDT is set up before any exception happens */ idt_setup_early_handler(); + load_ucode_bsp(); + zap_early_initrd_mapping(); + cr4_init_shadow(); sanitize_boot_params(&boot_params); @@ -69,52 +92,83 @@ asmlinkage __visible void __init __noreturn i386_start_kernel(void) * to the first kernel PMD. Note the upper half of each PMD or PTE are * always zero at this stage. */ -void __init mk_early_pgtbl_32(void); -void __init mk_early_pgtbl_32(void) -{ -#ifdef __pa -#undef __pa -#endif -#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET) - pte_t pte, *ptep; - int i; - unsigned long *ptr; - /* Enough space to fit pagetables for the low memory linear map */ - const unsigned long limit = __pa(_end) + - (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); #ifdef CONFIG_X86_PAE - pmd_t pl2, *pl2p = (pmd_t *)__pa(initial_pg_pmd); -#define SET_PL2(pl2, val) { (pl2).pmd = (val); } +typedef pmd_t pl2_t; +#define pl2_base initial_pg_pmd +#define SET_PL2(val) { .pmd = (val), } #else - pgd_t pl2, *pl2p = (pgd_t *)__pa(initial_page_table); -#define SET_PL2(pl2, val) { (pl2).pgd = (val); } +typedef pgd_t pl2_t; +#define pl2_base initial_page_table +#define SET_PL2(val) { .pgd = (val), } #endif - ptep = (pte_t *)__pa(__brk_base); - pte.pte = PTE_IDENT_ATTR; - +static __init __no_stack_protector pte_t init_map(pte_t pte, pte_t **ptep, pl2_t **pl2p, + const unsigned long limit) +{ while ((pte.pte & PTE_PFN_MASK) < limit) { + pl2_t pl2 = SET_PL2((unsigned long)*ptep | PDE_IDENT_ATTR); + int i; + + **pl2p = pl2; + if (!IS_ENABLED(CONFIG_X86_PAE)) { + /* Kernel PDE entry */ + *(*pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; + } - SET_PL2(pl2, (unsigned long)ptep | PDE_IDENT_ATTR); - *pl2p = pl2; -#ifndef CONFIG_X86_PAE - /* Kernel PDE entry */ - *(pl2p + ((PAGE_OFFSET >> PGDIR_SHIFT))) = pl2; -#endif for (i = 0; i < PTRS_PER_PTE; i++) { - *ptep = pte; + **ptep = pte; pte.pte += PAGE_SIZE; - ptep++; + (*ptep)++; } - - pl2p++; + (*pl2p)++; } + return pte; +} + +void __init __no_stack_protector mk_early_pgtbl_32(void) +{ + /* Enough space to fit pagetables for the low memory linear map */ + unsigned long limit = __pa_nodebug(_end) + (PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT); + pte_t pte, *ptep = (pte_t *)__pa_nodebug(__brk_base); + struct boot_params __maybe_unused *params; + pl2_t *pl2p = (pl2_t *)__pa_nodebug(pl2_base); + unsigned long *ptr; + + pte.pte = PTE_IDENT_ATTR; + pte = init_map(pte, &ptep, &pl2p, limit); - ptr = (unsigned long *)__pa(&max_pfn_mapped); + ptr = (unsigned long *)__pa_nodebug(&max_pfn_mapped); /* Can't use pte_pfn() since it's a call with CONFIG_PARAVIRT */ *ptr = (pte.pte & PTE_PFN_MASK) >> PAGE_SHIFT; - ptr = (unsigned long *)__pa(&_brk_end); + ptr = (unsigned long *)__pa_nodebug(&_brk_end); *ptr = (unsigned long)ptep + PAGE_OFFSET; -} +#ifdef CONFIG_MICROCODE_INITRD32 + /* Running on a hypervisor? */ + if (native_cpuid_ecx(1) & BIT(31)) + return; + + params = (struct boot_params *)__pa_nodebug(&boot_params); + if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image) + return; + + /* Save the virtual start address */ + ptr = (unsigned long *)__pa_nodebug(&initrd_start_early); + *ptr = (pte.pte & PTE_PFN_MASK) + PAGE_OFFSET; + *ptr += ((unsigned long)params->hdr.ramdisk_image) & ~PAGE_MASK; + + /* Save PLP2 for cleanup */ + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_start); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; + + limit = (unsigned long)params->hdr.ramdisk_image; + pte.pte = PTE_IDENT_ATTR | PFN_ALIGN(limit); + limit = (unsigned long)params->hdr.ramdisk_image + params->hdr.ramdisk_size; + + init_map(pte, &ptep, &pl2p, limit); + + ptr = (unsigned long *)__pa_nodebug(&initrd_pl2p_end); + *ptr = (unsigned long)pl2p + PAGE_OFFSET; +#endif +} diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index bbc21798df10ec79fa7267b80d6ee4e0a4f987a0..612c649743b01661ebc2e7b196f38366691678f8 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -41,6 +41,7 @@ #include #include #include +#include /* * Manage page tables very early on. @@ -161,6 +162,14 @@ static unsigned long __head sme_postprocess_startup(struct boot_params *bp, pmdv i = pmd_index(vaddr); pmd[i] -= sme_get_me_mask(); } + + /* On CSV3, move the shared pages out of isolated memory region. */ + if (csv3_active()) { + vaddr = (unsigned long)__start_bss_decrypted; + csv_early_reset_memory(bp); + csv_early_update_memory_dec((unsigned long)vaddr, + (vaddr_end - vaddr) >> PAGE_SHIFT); + } } /* @@ -319,6 +328,54 @@ unsigned long __head __startup_64(unsigned long physaddr, return sme_postprocess_startup(bp, pmd); } +#ifdef CONFIG_AMD_MEM_ENCRYPT +extern bool bsp_flush_bss_decrypted_section_done; + +void __ref early_clflush_bss_decrypted_section(void) +{ + /* Only allow bsp flush these caches and the bsp must at early boot stage */ + if (bsp_flush_bss_decrypted_section_done) + return; + + if (read_cr3_pa() != __pa_nodebug(early_top_pgt)) + return; + + if (sme_get_me_mask()) { + unsigned long vaddr, vaddr_end; + char *cl, *start, *end; + + /* + * The memory region of .bss..decrypted section maybe mapped + * with encryption in earlier stage. If the correspond stale + * caches lives in earlier stage were not flushed before we + * access that memory region, then Linux will crash later + * because the stale caches will pollute the memory. So we + * need flush the caches with encrypted mapping before we + * access .bss..decrypted section. + * + * The function __startup_64() have already filled the + * encrypted mapping for .bss..decrypted section, use that + * mapping here. + */ + vaddr = (unsigned long)__start_bss_decrypted - + __START_KERNEL_map + phys_base; + vaddr_end = (unsigned long)__end_bss_decrypted - + __START_KERNEL_map + phys_base; + + /* Hardcode cl-size to 64 at this stage. */ + start = (char *)(vaddr & ~63); + end = (char *)((vaddr_end + 63) & ~63); + + asm volatile("mfence" : : : "memory"); + for (cl = start; cl != end; cl += 64) + clflush(cl); + asm volatile("mfence" : : : "memory"); + } + + bsp_flush_bss_decrypted_section_done = true; +} +#endif + /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) { diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c9318993f9594562f8518d60d7182d9e1f483f1e..63f6ff4b28eb17bdea42085476ed6dff47f1ed2b 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -118,11 +118,6 @@ SYM_CODE_START(startup_32) movl %eax, pa(olpc_ofw_pgd) #endif -#ifdef CONFIG_MICROCODE - /* Early load ucode on BSP. */ - call load_ucode_bsp -#endif - /* Create early pagetables. */ call mk_early_pgtbl_32 @@ -157,11 +152,6 @@ SYM_FUNC_START(startup_32_smp) movl %eax,%ss leal -__PAGE_OFFSET(%ecx),%esp -#ifdef CONFIG_MICROCODE - /* Early load ucode on AP. */ - call load_ucode_ap -#endif - .Ldefault_entry: movl $(CR0_STATE & ~X86_CR0_PG),%eax movl %eax,%cr0 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index ea6995920b7aa920f7a604d22f750123491fba4b..a60eecb3cc048b886e1b42af78b08e295c028696 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -359,6 +359,14 @@ SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL) shrq $32, %rdx wrmsr +#ifdef CONFIG_AMD_MEM_ENCRYPT + /* + * Ensure .bss.decrypted memory's stale caches which lived in earlier + * stage to be flushed. + */ + call early_clflush_bss_decrypted_section +#endif + /* Setup and Load IDT */ call early_setup_idt @@ -495,6 +503,8 @@ SYM_CODE_END(vc_boot_ghcb) SYM_DATA(initial_code, .quad x86_64_start_kernel) #ifdef CONFIG_AMD_MEM_ENCRYPT SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb) +SYM_DATA(bsp_flush_bss_decrypted_section_done, .byte 0x0) + .balign 8 #endif SYM_DATA(trampoline_lock, .quad 0); diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 4766b6bed4439330a8cb10bd53abc25331bc3230..17e955ab69feda933cca3708822f6f9f598e31bf 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -343,6 +344,9 @@ static noinstr void default_do_nmi(struct pt_regs *regs) instrumentation_begin(); + if (microcode_nmi_handler_enabled() && microcode_nmi_handler()) + goto out; + handled = nmi_handle(NMI_LOCAL, regs); __this_cpu_add(nmi_stats.normal, handled); if (handled) { @@ -498,8 +502,11 @@ DEFINE_IDTENTRY_RAW(exc_nmi) if (IS_ENABLED(CONFIG_NMI_CHECK_CPU)) raw_atomic_long_inc(&nsp->idt_calls); - if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) + if (IS_ENABLED(CONFIG_SMP) && arch_cpu_is_offline(smp_processor_id())) { + if (microcode_nmi_handler_enabled()) + microcode_offline_nmi_handler(); return; + } if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { this_cpu_write(nmi_state, NMI_LATCHED); diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 708c87b88cc150ee64145de90de938e0482afa3f..619560eb9f94c049fdcbb1a2e4dcb48938fff674 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -212,7 +212,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) switch_fpu_finish(); /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 33b268747bb7bbce00e1b12b8e00928ce7305acc..7bdbe94344a23b166f9f04fb781c279172e7edbd 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -659,7 +659,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) } /* Load the Intel cache allocation PQR MSR. */ - resctrl_sched_in(next_p); + resctrl_arch_sched_in(next_p); return prev_p; } diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b098b1fa2470816a35d7784d93c0e7ae0fc83f6d..ad50d8e23328121531307ee593f70a6a255cec0b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -55,6 +55,7 @@ #include #include #include +#include /* * max_low_pfn_mapped: highest directly mapped pfn < 4 GB @@ -226,8 +227,6 @@ static void __init reserve_brk(void) _brk_start = 0; } -u64 relocated_ramdisk; - #ifdef CONFIG_BLK_DEV_INITRD static u64 __init get_ramdisk_image(void) @@ -253,6 +252,34 @@ static u64 __init get_ramdisk_size(void) return ramdisk_size; } +#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) + +static void __init copy_early_initrd(void *dest, phys_addr_t src, + unsigned long size) +{ + unsigned long slop, clen; + char *p; + + while (size) { + slop = offset_in_page(src); + clen = size; + if (clen > MAX_MAP_CHUNK - slop) + clen = MAX_MAP_CHUNK - slop; + /* + * _ENC flag should be preserved so that when SME is enabled initrd + * can be mapped as encrypted, as it had been encrypted earlier. + * This flag won't impact on other platforms like TDX/SEV enabled. + */ + p = early_memremap_prot(src & PAGE_MASK, clen + slop, + pgprot_val(FIXMAP_PAGE_NORMAL)); + memcpy(dest, p + slop, clen); + early_memunmap(p, clen + slop); + dest += clen; + src += clen; + size -= clen; + } +} + static void __init relocate_initrd(void) { /* Assume only end is not page aligned */ @@ -261,7 +288,7 @@ static void __init relocate_initrd(void) u64 area_size = PAGE_ALIGN(ramdisk_size); /* We need to move the initrd down into directly mapped mem */ - relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, + u64 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0, PFN_PHYS(max_pfn_mapped)); if (!relocated_ramdisk) panic("Cannot find place for new RAMDISK of size %lld\n", @@ -272,7 +299,7 @@ static void __init relocate_initrd(void) printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n", relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1); - copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size); + copy_early_initrd((void *)initrd_start, ramdisk_image, ramdisk_size); printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to" " [mem %#010llx-%#010llx]\n", @@ -1218,6 +1245,10 @@ void __init setup_arch(char **cmdline_p) early_acpi_boot_init(); initmem_init(); + + /* Try to reserve contiguous memory to support CSV3 */ + early_csv_reserve_mem(); + dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT); if (boot_cpu_has(X86_FEATURE_GBPAGES)) diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c index d87c6ff1f5136e23ec5162223619a4f2cafb0672..f334b81b0853604a83af14a94b22fb8975b528f3 100644 --- a/arch/x86/kernel/sev.c +++ b/arch/x86/kernel/sev.c @@ -1864,6 +1864,15 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co struct ghcb *ghcb; bool ret = true; + /* + * Make sure the codes between __sev_get_ghcb() and __sev_put_ghcb() + * keep in atomic context. If #VC comes from kernel mode, then the + * codes here are in atomic context. If #VC comes from user mode, then + * it's necessary to switch to atomic context manually. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_add(HARDIRQ_OFFSET); + ghcb = __sev_get_ghcb(&state); vc_ghcb_invalidate(ghcb); @@ -1874,6 +1883,9 @@ static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_co __sev_put_ghcb(&state); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && !in_nmi()) + __preempt_count_sub(HARDIRQ_OFFSET); + /* Done - now check the result */ switch (result) { case ES_OK: diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 2a187c0cbd5b11ac52f8a0f32d299624b6de0e17..fac3b8a523ac857b4a9e8fc80cca15703a8e8bd2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -258,12 +258,9 @@ static void notrace start_secondary(void *unused) cpu_init_exception_handling(); /* - * 32-bit systems load the microcode from the ASM startup code for - * historical reasons. - * - * On 64-bit systems load it before reaching the AP alive - * synchronization point below so it is not part of the full per - * CPU serialized bringup part when "parallel" bringup is enabled. + * Load the microcode before reaching the AP alive synchronization + * point below so it is not part of the full per CPU serialized + * bringup part when "parallel" bringup is enabled. * * That's even safe when hyperthreading is enabled in the CPU as * the core code starts the primary threads first and leaves the @@ -276,8 +273,7 @@ static void notrace start_secondary(void *unused) * CPUID, MSRs etc. must be strictly serialized to maintain * software state correctness. */ - if (IS_ENABLED(CONFIG_X86_64)) - load_ucode_ap(); + load_ucode_ap(); /* * Synchronization point with the hotplug core. Sets this CPUs @@ -747,6 +743,7 @@ const struct cpumask *cpu_clustergroup_mask(int cpu) { return cpu_l2c_shared_mask(cpu); } +EXPORT_SYMBOL_GPL(cpu_clustergroup_mask); static void impress_friends(void) { diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index e42faa792c07931083988931a5061b5e21429499..4b790925b3e6d255cb26e65d19e4dd4c68d4576b 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -102,6 +102,8 @@ static __init void x86_late_time_init(void) if (static_cpu_has(X86_FEATURE_WAITPKG)) use_tpause_delay(); + else if (static_cpu_has(X86_FEATURE_ZXPAUSE)) + use_zxpause_delay(); } /* diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index 1123ef3ccf9011f55f60d72fb78de75b947b0474..d0369e9d6b8c4eaa86469e7cd58c3a72c81083cd 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -232,6 +232,11 @@ bool tsc_store_and_check_tsc_adjust(bool bootcpu) if (bootval != ref->adjusted) { cur->adjusted = ref->adjusted; wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted); + } else if (cur->adjusted != bootval) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + cur->adjusted = bootval; + } } /* * We have the TSCs forced to be in sync on this package. Skip sync diff --git a/arch/x86/kernel/zhaoxin_kh40000.c b/arch/x86/kernel/zhaoxin_kh40000.c new file mode 100644 index 0000000000000000000000000000000000000000..e8dd3bd43e72f39c722d378bdcff44c7062832d4 --- /dev/null +++ b/arch/x86/kernel/zhaoxin_kh40000.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "../../../kernel/dma/direct.h" + +/*** + * usage: + * set "zhaoxin_patch_bitmask=" in cmdline + * value description: + * bit 0: enable(1) node check or not(0). default 1 + */ +enum { + ZHAOXIN_P2CW_NODE_CHECK = BIT(0), + ZHAOXIN_PATCH_CODE_MAX = ZHAOXIN_P2CW_NODE_CHECK, +}; + +#define ZHAOXIN_PATCH_CODE_DEFAULT ZHAOXIN_P2CW_NODE_CHECK + +unsigned long zhaoxin_patch_code = ZHAOXIN_PATCH_CODE_DEFAULT; + +static int __init zhaoxin_patch_code_setup(char *str) +{ + int err = kstrtoul(str, 0, &zhaoxin_patch_code); + + if (err || (zhaoxin_patch_code > ZHAOXIN_PATCH_CODE_MAX)) { + pr_err("cmdline 'zhaoxin_patch_bitmask=%s' inappropriate\n", + str); + return err; + } + + if (ZHAOXIN_P2CW_NODE_CHECK | zhaoxin_patch_code) + pr_info("zhaoxin dma patch node check is enabled\n"); + + return 0; +} +__setup("zhaoxin_patch_bitmask=", zhaoxin_patch_code_setup); + +static struct pci_dev *kh40000_get_pci_dev(struct device *dev) +{ + if (dev_is_pci(dev)) + return to_pci_dev(dev); + + if (dev->parent) + return kh40000_get_pci_dev(dev->parent); + + return NULL; +} + +static void kh40000_sync_single_dma_for_cpu(struct device *dev, dma_addr_t paddr, + enum dma_data_direction dir, bool is_iommu) +{ + u8 vid; + struct pci_dev *pci; + u64 dma_mask = *dev->dma_mask; + + /* check direction */ + if ((dir != DMA_FROM_DEVICE) && (dir != DMA_BIDIRECTIONAL)) + return; + + /* check dma capability */ + if (dma_mask <= DMA_BIT_MASK(32)) + return; + + /* check device type */ + pci = kh40000_get_pci_dev(dev); + if (pci == NULL) + return; + + /* get real physical address */ + if (is_iommu) { + struct iommu_domain *domain = iommu_get_dma_domain(dev); + + paddr = iommu_iova_to_phys(domain, paddr); + if (!paddr) + return; + } + + /* check node or not */ + if ((zhaoxin_patch_code & ZHAOXIN_P2CW_NODE_CHECK) + && pfn_to_nid(PFN_DOWN(paddr)) == dev_to_node(dev)) + return; + + /* flush data by one pci read cycle */ + pci_read_config_byte(pci, PCI_VENDOR_ID, &vid); +} + +/* zhaoxin kh-40000 direct dma ops */ +static void *kh40000_dma_direct_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + if (dev->coherent_dma_mask > DMA_BIT_MASK(32)) + gfp |= __GFP_THISNODE; + + return dma_direct_alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_dma_direct_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_unmap_page(dev, addr, size, dir, attrs); +} + +static void kh40000_dma_direct_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nents, enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_sync_sg_for_cpu(dev, sgl, nents, dir); +} + +static void kh40000_dma_direct_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); + dma_direct_sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nents, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 0); + + dma_direct_unmap_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_dma_direct_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 0); +} + +const struct dma_map_ops kh40000_dma_direct_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_dma_direct_alloc, + .sync_sg_for_cpu = kh40000_dma_direct_sync_sg_for_cpu, + .unmap_page = kh40000_dma_direct_unmap_page, + .sync_single_for_cpu = kh40000_dma_direct_sync_single_for_cpu, + .unmap_sg = kh40000_dma_direct_unmap_sg, + .unmap_resource = kh40000_dma_direct_unmap_resource, + .dma_supported = dma_direct_supported, + .free = dma_direct_free, + .alloc_pages = dma_direct_alloc_pages, + .free_pages = dma_direct_free_pages, + .sync_single_for_device = dma_direct_sync_single_for_device, + .sync_sg_for_device = dma_direct_sync_sg_for_device, + .get_required_mask = dma_direct_get_required_mask, + .max_mapping_size = dma_direct_max_mapping_size, + .mmap = dma_direct_mmap, + .get_sgtable = dma_direct_get_sgtable, + .map_page = dma_direct_map_page, + .map_sg = dma_direct_map_sg, + .map_resource = dma_direct_map_resource, +}; + +/* zhaoxin kh-40000 iommu dma ops */ +static const struct dma_map_ops *iommu_dma_ops; + +static void *kh40000_iommu_dma_alloc(struct device *dev, size_t size, + dma_addr_t *addr, gfp_t gfp, unsigned long attrs) +{ + gfp |= __GFP_THISNODE; + + return iommu_dma_ops->alloc(dev, size, addr, gfp, attrs); +} + +static void kh40000_iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs) +{ + iommu_dma_ops->free(dev, size, cpu_addr, handle, attrs); +} + +static struct page *kh40000_dma_common_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) +{ + return iommu_dma_ops->alloc_pages(dev, size, dma_handle, dir, gfp); +} + +static void kh40000_dma_common_free_pages(struct device *dev, size_t size, struct page *page, + dma_addr_t dma_handle, enum dma_data_direction dir) +{ + iommu_dma_ops->free_pages(dev, size, page, dma_handle, dir); +} + +static struct sg_table *kh40000_iommu_dma_alloc_noncontiguous(struct device *dev, + size_t size, enum dma_data_direction dir, gfp_t gfp, + unsigned long attrs) +{ + return iommu_dma_ops->alloc_noncontiguous(dev, size, dir, gfp, attrs); +} + +static void kh40000_iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir) +{ + return iommu_dma_ops->free_noncontiguous(dev, size, sgt, dir); +} + +static int kh40000_iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); +} + +static void kh40000_iommu_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_page(dev, addr, size, dir, attrs); +} + +static int kh40000_iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs) +{ + return iommu_dma_ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); +} + +static dma_addr_t kh40000_iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + return iommu_dma_ops->map_page(dev, page, offset, size, dir, attrs); +} + +static int kh40000_iommu_dma_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_sg(dev, sgl, nents, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir, unsigned long attrs) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->unmap_sg(dev, sgl, nelems, dir, attrs); +} + +static void kh40000_iommu_dma_sync_single_for_cpu(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->sync_single_for_cpu(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_single_for_device(struct device *dev, + dma_addr_t addr, size_t size, enum dma_data_direction dir) +{ + iommu_dma_ops->sync_single_for_device(dev, addr, size, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_cpu(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + struct scatterlist *sg; + int i; + + for_each_sg(sgl, sg, nelems, i) + kh40000_sync_single_dma_for_cpu(dev, sg_dma_address(sg), dir, 1); + iommu_dma_ops->sync_sg_for_cpu(dev, sgl, nelems, dir); +} + +static void kh40000_iommu_dma_sync_sg_for_device(struct device *dev, + struct scatterlist *sgl, int nelems, + enum dma_data_direction dir) +{ + iommu_dma_ops->sync_sg_for_device(dev, sgl, nelems, dir); +} + +static dma_addr_t kh40000_iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + return iommu_dma_ops->map_resource(dev, phys, size, dir, attrs); +} + +static void kh40000_iommu_dma_unmap_resource(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + kh40000_sync_single_dma_for_cpu(dev, addr, dir, 1); + iommu_dma_ops->unmap_resource(dev, addr, size, dir, attrs); +} + +static unsigned long kh40000_iommu_dma_get_merge_boundary(struct device *dev) +{ + return iommu_dma_ops->get_merge_boundary(dev); +} + +static size_t kh40000_iommu_dma_opt_mapping_size(void) +{ + return iommu_dma_ops->opt_mapping_size(); +} + +const struct dma_map_ops kh40000_dma_iommu_ops = { + .flags = DMA_F_PCI_P2PDMA_SUPPORTED, + .alloc = kh40000_iommu_dma_alloc, + .free = kh40000_iommu_dma_free, + .unmap_page = kh40000_iommu_dma_unmap_page, + .alloc_pages = kh40000_dma_common_alloc_pages, + .free_pages = kh40000_dma_common_free_pages, + .alloc_noncontiguous = kh40000_iommu_dma_alloc_noncontiguous, + .free_noncontiguous = kh40000_iommu_dma_free_noncontiguous, + .mmap = kh40000_iommu_dma_mmap, + .get_sgtable = kh40000_iommu_dma_get_sgtable, + .map_page = kh40000_iommu_dma_map_page, + .map_sg = kh40000_iommu_dma_map_sg, + .unmap_sg = kh40000_iommu_dma_unmap_sg, + .sync_single_for_cpu = kh40000_iommu_dma_sync_single_for_cpu, + .sync_single_for_device = kh40000_iommu_dma_sync_single_for_device, + .sync_sg_for_cpu = kh40000_iommu_dma_sync_sg_for_cpu, + .sync_sg_for_device = kh40000_iommu_dma_sync_sg_for_device, + .map_resource = kh40000_iommu_dma_map_resource, + .unmap_resource = kh40000_iommu_dma_unmap_resource, + .get_merge_boundary = kh40000_iommu_dma_get_merge_boundary, + .opt_mapping_size = kh40000_iommu_dma_opt_mapping_size, +}; + +void kh40000_set_iommu_dma_ops(struct device *dev) +{ + if (dev->dma_ops) { + iommu_dma_ops = dev->dma_ops; + set_dma_ops(dev, &kh40000_dma_iommu_ops); + pr_info_once("zhaoxin iommu dma patch enabled\n"); + } +} diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig index ed90f148140dfe093bed15a033b6e322a3cb5f2a..463732963a15f8c6e34cae258a503a159b17c402 100644 --- a/arch/x86/kvm/Kconfig +++ b/arch/x86/kvm/Kconfig @@ -154,4 +154,14 @@ config KVM_PROVE_MMU config KVM_EXTERNAL_WRITE_TRACKING bool +config KVM_SUPPORTS_CSV_REUSE_ASID + def_bool y + bool "Reuse the same ASID for different HYGON CSV guests" + depends on KVM_AMD_SEV && CPU_SUP_HYGON + depends on !CGROUP_MISC + help + Provide support for reuse the same ASID for difference HYGON + CSV guests, this allow the user to create more CSV guests on + HYGON CPUs with limited ASIDs. + endif # VIRTUALIZATION diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 80e3fe184d17e64984d2f2d4adba00ec9d5ad57e..16b4637033879487ebcd670a67cd78187ca3ddab 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile @@ -12,7 +12,7 @@ include $(srctree)/virt/kvm/Makefile.kvm kvm-y += x86.o emulate.o i8259.o irq.o lapic.o \ i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \ hyperv.o debugfs.o mmu/mmu.o mmu/page_track.o \ - mmu/spte.o + mmu/spte.o hygon/psp.o ifdef CONFIG_HYPERV kvm-y += kvm_onhyperv.o @@ -37,6 +37,8 @@ obj-$(CONFIG_KVM) += kvm.o obj-$(CONFIG_KVM_INTEL) += kvm-intel.o obj-$(CONFIG_KVM_AMD) += kvm-amd.o +kvm-amd-$(CONFIG_HYGON_CSV) += svm/csv.o + AFLAGS_svm/vmenter.o := -iquote $(obj) $(obj)/svm/vmenter.o: $(obj)/kvm-asm-offsets.h diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 773132c3bf5af760827f8dd0ab9601de702744a5..7b70ebf4678f85da32c593fc0c449c469d5238a8 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -669,7 +669,7 @@ void kvm_set_cpu_caps(void) kvm_cpu_cap_mask(CPUID_7_1_EAX, F(AVX_VNNI) | F(AVX512_BF16) | F(CMPCCXADD) | F(FZRM) | F(FSRS) | F(FSRC) | - F(AMX_FP16) | F(AVX_IFMA) + F(AMX_FP16) | F(AVX_IFMA) | F(LAM) ); kvm_cpu_cap_init_kvm_defined(CPUID_7_1_EDX, @@ -785,6 +785,9 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); + /* Zhaoxin 0xC0000006 leaf */ + kvm_cpu_cap_mask(CPUID_C000_0006_EAX, 0 /* bit0: zxpause */ | 0 /* bit1 HMAC */); + /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and @@ -1289,17 +1292,22 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) } /*Add support for Centaur's CPUID instruction*/ case 0xC0000000: - /*Just support up to 0xC0000004 now*/ - entry->eax = min(entry->eax, 0xC0000004); + /* Extended to 0xC0000006 */ + entry->eax = min(entry->eax, 0xC0000006); break; case 0xC0000001: cpuid_entry_override(entry, CPUID_C000_0001_EDX); break; + case 0xC0000006: + cpuid_entry_override(entry, CPUID_C000_0006_EAX); + break; + case 3: /* Processor serial number */ case 5: /* MONITOR/MWAIT */ case 0xC0000002: case 0xC0000003: case 0xC0000004: + case 0xC0000005: default: entry->eax = entry->ebx = entry->ecx = entry->edx = 0; break; diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 284fa4704553da1345731927225084e7daeeba4b..93c63ba2933711100f5e4a674ea1eaca5e5daff4 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -47,11 +47,6 @@ static inline bool kvm_vcpu_is_legal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) return !(gpa & vcpu->arch.reserved_gpa_bits); } -static inline bool kvm_vcpu_is_illegal_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) -{ - return !kvm_vcpu_is_legal_gpa(vcpu, gpa); -} - static inline bool kvm_vcpu_is_legal_aligned_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, gpa_t alignment) { @@ -278,4 +273,12 @@ static __always_inline bool guest_can_use(struct kvm_vcpu *vcpu, vcpu->arch.governed_features.enabled); } +static inline bool kvm_vcpu_is_legal_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) +{ + if (guest_can_use(vcpu, X86_FEATURE_LAM)) + cr3 &= ~(X86_CR3_LAM_U48 | X86_CR3_LAM_U57); + + return kvm_vcpu_is_legal_gpa(vcpu, cr3); +} + #endif diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 2673cd5c46cb486b9e78a20c589462552f0b4747..e223043ef5b26f23be5b2f0606641f66c5cd18aa 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -687,8 +687,8 @@ static unsigned insn_alignment(struct x86_emulate_ctxt *ctxt, unsigned size) static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, struct segmented_address addr, unsigned *max_size, unsigned size, - bool write, bool fetch, - enum x86emul_mode mode, ulong *linear) + enum x86emul_mode mode, ulong *linear, + unsigned int flags) { struct desc_struct desc; bool usable; @@ -701,7 +701,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, *max_size = 0; switch (mode) { case X86EMUL_MODE_PROT64: - *linear = la; + *linear = la = ctxt->ops->get_untagged_addr(ctxt, la, flags); va_bits = ctxt_virt_addr_bits(ctxt); if (!__is_canonical_address(la, va_bits)) goto bad; @@ -717,11 +717,11 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt, if (!usable) goto bad; /* code segment in protected mode or read-only data segment */ - if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) - || !(desc.type & 2)) && write) + if ((((ctxt->mode != X86EMUL_MODE_REAL) && (desc.type & 8)) || !(desc.type & 2)) && + (flags & X86EMUL_F_WRITE)) goto bad; /* unreadable code segment */ - if (!fetch && (desc.type & 8) && !(desc.type & 2)) + if (!(flags & X86EMUL_F_FETCH) && (desc.type & 8) && !(desc.type & 2)) goto bad; lim = desc_limit_scaled(&desc); if (!(desc.type & 8) && (desc.type & 4)) { @@ -757,8 +757,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ulong *linear) { unsigned max_size; - return __linearize(ctxt, addr, &max_size, size, write, false, - ctxt->mode, linear); + return __linearize(ctxt, addr, &max_size, size, ctxt->mode, linear, + write ? X86EMUL_F_WRITE : 0); } static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) @@ -771,7 +771,8 @@ static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst) if (ctxt->op_bytes != sizeof(unsigned long)) addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1); - rc = __linearize(ctxt, addr, &max_size, 1, false, true, ctxt->mode, &linear); + rc = __linearize(ctxt, addr, &max_size, 1, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (rc == X86EMUL_CONTINUE) ctxt->_eip = addr.ea; return rc; @@ -907,8 +908,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) * boundary check itself. Instead, we use max_size to check * against op_size. */ - rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode, - &linear); + rc = __linearize(ctxt, addr, &max_size, 0, ctxt->mode, &linear, + X86EMUL_F_FETCH); if (unlikely(rc != X86EMUL_CONTINUE)) return rc; @@ -3439,8 +3440,10 @@ static int em_invlpg(struct x86_emulate_ctxt *ctxt) { int rc; ulong linear; + unsigned int max_size; - rc = linearize(ctxt, ctxt->src.addr.mem, 1, false, &linear); + rc = __linearize(ctxt, ctxt->src.addr.mem, &max_size, 1, ctxt->mode, + &linear, X86EMUL_F_INVLPG); if (rc == X86EMUL_CONTINUE) ctxt->ops->invlpg(ctxt, linear); /* Disable writeback. */ diff --git a/arch/x86/kvm/governed_features.h b/arch/x86/kvm/governed_features.h index 423a73395c102ca908453016e416dbdfb7fc2b5b..ad463b1ed4e4a87c29aa9d5af3842fbd4e039e41 100644 --- a/arch/x86/kvm/governed_features.h +++ b/arch/x86/kvm/governed_features.h @@ -16,6 +16,7 @@ KVM_GOVERNED_X86_FEATURE(PAUSEFILTER) KVM_GOVERNED_X86_FEATURE(PFTHRESHOLD) KVM_GOVERNED_X86_FEATURE(VGIF) KVM_GOVERNED_X86_FEATURE(VNMI) +KVM_GOVERNED_X86_FEATURE(LAM) #undef KVM_GOVERNED_X86_FEATURE #undef KVM_GOVERNED_FEATURE diff --git a/arch/x86/kvm/hygon/psp.c b/arch/x86/kvm/hygon/psp.c new file mode 100644 index 0000000000000000000000000000000000000000..9181ec2406ecfdcc21942fc7f57732d7a8b81093 --- /dev/null +++ b/arch/x86/kvm/hygon/psp.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSP virtualization + * + * Copyright (c) 2023, HYGON CORPORATION. All rights reserved. + * Author: Ge Yang + * + */ + +#include +#include +#include +#include +#include + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "vpsp: " fmt + +/* + * The file mainly implements the base execution + * logic of virtual PSP in kernel mode, which mainly includes: + * (1) Obtain the VM command and preprocess the pointer + * mapping table information in the command buffer + * (2) The command that has been converted will interact + * with the channel of the psp through the driver and + * try to obtain the execution result + * (3) The executed command data is recovered according to + * the multilevel pointer of the mapping table, and then returned to the VM + * + * The primary implementation logic of virtual PSP in kernel mode + * call trace: + * guest command(vmmcall) + * | + * | |-> kvm_pv_psp_cmd_pre_op + * | | + * | | -> guest_addr_map_table_op + * | | + * | | -> guest_multiple_level_gpa_replace + * | + * kvm_pv_psp_op->|-> vpsp_try_do_cmd/vpsp_try_get_result <====> psp device driver + * | + * | + * |-> kvm_pv_psp_cmd_post_op + * | + * | -> guest_addr_map_table_op + * | + * | -> guest_multiple_level_gpa_restore + */ + +#define TKM_CMD_ID_MIN 0x120 +#define TKM_CMD_ID_MAX 0x12f + +struct psp_cmdresp_head { + uint32_t buf_size; + uint32_t cmdresp_size; + uint32_t cmdresp_code; +} __packed; + +/** + * struct map_tbl - multilevel pointer address mapping table + * + * @parent_pa: parent address block's physics address + * @offset: offset in parent address block + * @size: submemory size + * @align: submemory align size, hva need to keep size alignment in kernel + * @hva: submemory copy block in kernel virtual address + */ +struct map_tbl { + uint64_t parent_pa; + uint32_t offset; + uint32_t size; + uint32_t align; + uint64_t hva; +} __packed; + +struct addr_map_tbls { + uint32_t tbl_nums; + struct map_tbl tbl[]; +} __packed; + +/* gpa and hva conversion maintenance table for internal use */ +struct gpa2hva_t { + void *hva; + gpa_t gpa; +}; + +struct gpa2hva_tbls { + uint32_t max_nums; + uint32_t tbl_nums; + struct gpa2hva_t tbl[]; +}; + +/* save command data for restoring later */ +struct vpsp_hbuf_wrapper { + void *data; + uint32_t data_size; + struct addr_map_tbls *map_tbls; + struct gpa2hva_tbls *g2h_tbls; +}; + +/* + * Virtual PSP host memory information maintenance, used in ringbuffer mode + */ +struct vpsp_hbuf_wrapper +g_hbuf_wrap[CSV_COMMAND_PRIORITY_NUM][CSV_RING_BUFFER_SIZE / CSV_RING_BUFFER_ESIZE] = {0}; + +void __maybe_unused map_tbl_dump(const char *title, struct addr_map_tbls *tbls) +{ + int i; + + pr_info("[%s]-> map_tbl_nums: %d", title, tbls->tbl_nums); + for (i = 0; i < tbls->tbl_nums; i++) { + pr_info("\t[%d]: parent_pa: 0x%llx, offset: 0x%x, size: 0x%x, align: 0x%x hva: 0x%llx", + i, tbls->tbl[i].parent_pa, tbls->tbl[i].offset, + tbls->tbl[i].size, tbls->tbl[i].align, tbls->tbl[i].hva); + } + pr_info("\n"); +} + +void __maybe_unused g2h_tbl_dump(const char *title, struct gpa2hva_tbls *tbls) +{ + int i; + + pr_info("[%s]-> g2h_tbl_nums: %d, max_nums: %d", title, tbls->tbl_nums, + tbls->max_nums); + for (i = 0; i < tbls->tbl_nums; i++) + pr_info("\t[%d]: hva: 0x%llx, gpa: 0x%llx", i, + (uint64_t)tbls->tbl[i].hva, tbls->tbl[i].gpa); + pr_info("\n"); +} + +static int gpa2hva_tbl_fill(struct gpa2hva_tbls *tbls, void *hva, gpa_t gpa) +{ + uint32_t fill_idx = tbls->tbl_nums; + + if (fill_idx >= tbls->max_nums) + return -EFAULT; + + tbls->tbl[fill_idx].hva = hva; + tbls->tbl[fill_idx].gpa = gpa; + tbls->tbl_nums = fill_idx + 1; + + return 0; +} + +static void clear_hva_in_g2h_tbls(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + g2h->tbl[i].hva = NULL; + } +} + +static void *get_hva_from_gpa(struct gpa2hva_tbls *g2h, gpa_t gpa) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].gpa == gpa) + return (void *)g2h->tbl[i].hva; + } + + return NULL; +} + +static gpa_t get_gpa_from_hva(struct gpa2hva_tbls *g2h, void *hva) +{ + int i; + + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva == hva) + return g2h->tbl[i].gpa; + } + + return 0; +} + +/* + * The virtual machine multilevel pointer command buffer handles the + * execution entity, synchronizes the data in the original gpa to the + * newly allocated hva(host virtual address) and updates the mapping + * relationship in the parent memory + */ +static int guest_multiple_level_gpa_replace(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + uint32_t sub_block_size; + uint64_t sub_paddr; + void *parent_kva = NULL; + + /* kmalloc memory for child block */ + sub_block_size = max(tbl->size, tbl->align); + tbl->hva = (uint64_t)kzalloc(sub_block_size, GFP_KERNEL); + if (!tbl->hva) + return -ENOMEM; + + /* get child gpa from parent gpa */ + if (unlikely(kvm_read_guest(kvm, tbl->parent_pa + tbl->offset, + &sub_paddr, sizeof(sub_paddr)))) { + pr_err("[%s]: kvm_read_guest for parent gpa failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* copy child block data from gpa to hva */ + if (unlikely(kvm_read_guest(kvm, sub_paddr, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_read_guest for sub_data failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* get hva from gpa */ + parent_kva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_kva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + /* replace pa of hva from gpa */ + *(uint64_t *)((uint8_t *)parent_kva + tbl->offset) = __psp_pa(tbl->hva); + + /* fill in gpa and hva to map table for restoring later */ + if (unlikely(gpa2hva_tbl_fill(g2h, (void *)tbl->hva, sub_paddr))) { + pr_err("[%s]: gpa2hva_tbl_fill for sub_addr failed\n", + __func__); + ret = -EFAULT; + goto e_free; + } + + return ret; + +e_free: + kfree((const void *)tbl->hva); + return ret; +} + +/* The virtual machine multi-level pointer command memory handles the + * execution entity, synchronizes the data in the hva(host virtual + * address) back to the memory corresponding to the gpa, and restores + * the mapping relationship in the original parent memory + */ +static int guest_multiple_level_gpa_restore(struct kvm *kvm, + struct map_tbl *tbl, struct gpa2hva_tbls *g2h) +{ + int ret = 0; + gpa_t sub_gpa; + void *parent_hva = NULL; + + /* get gpa from hva */ + sub_gpa = get_gpa_from_hva(g2h, (void *)tbl->hva); + if (unlikely(!sub_gpa)) { + pr_err("[%s]: get_gpa_from_hva for sub_gpa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* copy child block data from hva to gpa */ + if (unlikely(kvm_write_guest(kvm, sub_gpa, (void *)tbl->hva, + tbl->size))) { + pr_err("[%s]: kvm_write_guest for sub_gpa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* get parent hva from parent gpa */ + parent_hva = get_hva_from_gpa(g2h, tbl->parent_pa); + if (unlikely(!parent_hva)) { + pr_err("[%s]: get_hva_from_gpa for parent_pa failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* restore gpa from pa of hva in parent block */ + *(uint64_t *)((uint8_t *)parent_hva + tbl->offset) = sub_gpa; + + /* free child block memory */ + clear_hva_in_g2h_tbls(g2h, (void *)tbl->hva); + kfree((const void *)tbl->hva); + tbl->hva = 0; + +end: + return ret; +} + +/* + * The virtual machine multilevel pointer command memory processing + * executes upper-layer abstract interfaces, including replacing and + * restoring two sub-processing functions + */ +static int guest_addr_map_table_op(struct kvm *kvm, struct gpa2hva_tbls *g2h, + struct addr_map_tbls *map_tbls, int op) +{ + int ret = 0; + int i; + uint64_t *sub_paddr_ptr; + + if (op) { + for (i = map_tbls->tbl_nums - 1; i >= 0; i--) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if ((uint64_t)g2h->tbl[0].hva == map_tbls->tbl[i].hva) { + *sub_paddr_ptr = g2h->tbl[0].gpa; + continue; + } + } + + /* restore new pa of kva with the gpa from guest */ + if (unlikely(guest_multiple_level_gpa_restore(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_restore failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } else { + for (i = 0; i < map_tbls->tbl_nums; i++) { + /* check if the gpa of root points to itself */ + if (map_tbls->tbl[i].parent_pa == g2h->tbl[0].gpa) { + sub_paddr_ptr = (uint64_t *)((uint8_t *)g2h->tbl[0].hva + + map_tbls->tbl[i].offset); + /* if the child paddr is equal to the parent paddr */ + if (*sub_paddr_ptr == map_tbls->tbl[i].parent_pa) { + *sub_paddr_ptr = __psp_pa(g2h->tbl[0].hva); + map_tbls->tbl[i].hva = (uint64_t)g2h->tbl[0].hva; + continue; + } + } + + /* check if parent_pa is valid */ + if (unlikely(!get_hva_from_gpa(g2h, map_tbls->tbl[i].parent_pa))) { + pr_err("[%s]: g2h->tbl[%d].parent_pa: 0x%llx is invalid\n", + __func__, i, map_tbls->tbl[i].parent_pa); + ret = -EFAULT; + goto end; + } + + /* replace the gpa from guest with the new pa of kva */ + if (unlikely(guest_multiple_level_gpa_replace(kvm, + &map_tbls->tbl[i], g2h))) { + pr_err("[%s]: guest_multiple_level_gpa_replace failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + } + +end: + return ret; +} + +static void kvm_pv_psp_mem_free(struct gpa2hva_tbls *g2h, struct addr_map_tbls + *map_tbl, void *data) +{ + int i; + + if (g2h) { + for (i = 0; i < g2h->tbl_nums; i++) { + if (g2h->tbl[i].hva && (g2h->tbl[i].hva != data)) { + kfree(g2h->tbl[i].hva); + g2h->tbl[i].hva = NULL; + } + } + kfree(g2h); + } + + kfree(map_tbl); + kfree(data); +} + +/* + * Obtain the VM command and preprocess the pointer mapping table + * information in the command buffer, the processed data will be + * used to interact with the psp device + */ +static int kvm_pv_psp_cmd_pre_op(struct kvm *kvm, gpa_t data_gpa, + gpa_t table_gpa, struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + void *data = NULL; + struct psp_cmdresp_head psp_head; + uint32_t data_size; + struct addr_map_tbls map_head, *map_tbls = NULL; + uint32_t map_tbl_size; + struct gpa2hva_tbls *g2h = NULL; + uint32_t g2h_tbl_size; + + if (unlikely(kvm_read_guest(kvm, data_gpa, &psp_head, + sizeof(struct psp_cmdresp_head)))) + return -EFAULT; + + data_size = psp_head.buf_size; + data = kzalloc(data_size, GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (unlikely(kvm_read_guest(kvm, data_gpa, data, data_size))) { + ret = -EFAULT; + goto end; + } + + if (table_gpa) { + /* parse address map table from guest */ + if (unlikely(kvm_read_guest(kvm, table_gpa, &map_head, + sizeof(struct addr_map_tbls)))) { + pr_err("[%s]: kvm_read_guest for map_head failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + map_tbl_size = sizeof(struct addr_map_tbls) + map_head.tbl_nums + * sizeof(struct map_tbl); + map_tbls = kzalloc(map_tbl_size, GFP_KERNEL); + if (!map_tbls) { + ret = -ENOMEM; + goto end; + } + + if (unlikely(kvm_read_guest(kvm, table_gpa, map_tbls, + map_tbl_size))) { + pr_err("[%s]: kvm_read_guest for map_tbls failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* init for gpa2hva table*/ + g2h_tbl_size = sizeof(struct gpa2hva_tbls) + (map_head.tbl_nums + + 1) * sizeof(struct gpa2hva_t); + g2h = kzalloc(g2h_tbl_size, GFP_KERNEL); + if (!g2h) { + ret = -ENOMEM; + goto end; + } + g2h->max_nums = map_head.tbl_nums + 1; + + /* fill the root parent address */ + if (gpa2hva_tbl_fill(g2h, data, data_gpa)) { + pr_err("[%s]: gpa2hva_tbl_fill for root data address failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + if (guest_addr_map_table_op(kvm, g2h, map_tbls, 0)) { + pr_err("[%s]: guest_addr_map_table_op for replacing failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + hbuf->data = data; + hbuf->data_size = data_size; + hbuf->map_tbls = map_tbls; + hbuf->g2h_tbls = g2h; + +end: + if (ret && data) + kfree(data); + return ret; +} + +/* + * The executed command data is recovered according to the multilevel + * pointer of the mapping table when the command has finished + * interacting with the psp device + */ +static int kvm_pv_psp_cmd_post_op(struct kvm *kvm, gpa_t data_gpa, + struct vpsp_hbuf_wrapper *hbuf) +{ + int ret = 0; + + if (hbuf->map_tbls) { + if (guest_addr_map_table_op(kvm, hbuf->g2h_tbls, + hbuf->map_tbls, 1)) { + pr_err("[%s]: guest_addr_map_table_op for restoring failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + + /* restore cmdresp's buffer from context */ + if (unlikely(kvm_write_guest(kvm, data_gpa, hbuf->data, + hbuf->data_size))) { + pr_err("[%s]: kvm_write_guest for cmdresp data failed\n", + __func__); + ret = -EFAULT; + goto end; + } + +end: + /* release memory and clear hbuf */ + kvm_pv_psp_mem_free(hbuf->g2h_tbls, hbuf->map_tbls, hbuf->data); + memset(hbuf, 0, sizeof(*hbuf)); + + return ret; +} + +static int cmd_type_is_tkm(int cmd) +{ + if (cmd >= TKM_CMD_ID_MIN && cmd <= TKM_CMD_ID_MAX) + return 1; + return 0; +} + +/* + * The primary implementation interface of virtual PSP in kernel mode + */ +int kvm_pv_psp_op(struct kvm *kvm, int cmd, gpa_t data_gpa, gpa_t psp_ret_gpa, + gpa_t table_gpa) +{ + int ret = 0; + struct vpsp_ret psp_ret = {0}; + struct vpsp_hbuf_wrapper hbuf = {0}; + struct vpsp_cmd *vcmd = (struct vpsp_cmd *)&cmd; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + uint32_t index = 0; + uint32_t vid = 0; + + // only tkm cmd need vid + if (cmd_type_is_tkm(vcmd->cmd_id)) { + // check the permission to use the default vid when no vid is set + ret = vpsp_get_vid(&vid, kvm->userspace_pid); + if (ret && !vpsp_get_default_vid_permission()) { + pr_err("[%s]: not allowed tkm command without vid\n", __func__); + return -EFAULT; + } + } + + if (unlikely(kvm_read_guest(kvm, psp_ret_gpa, &psp_ret, + sizeof(psp_ret)))) + return -EFAULT; + + switch (psp_ret.status) { + case VPSP_INIT: + /* multilevel pointer replace*/ + ret = kvm_pv_psp_cmd_pre_op(kvm, data_gpa, table_gpa, &hbuf); + if (unlikely(ret)) { + psp_ret.status = VPSP_FINISH; + pr_err("[%s]: kvm_pv_psp_cmd_pre_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + + /* try to send command to the device for execution*/ + ret = vpsp_try_do_cmd(vid, cmd, (void *)hbuf.data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_cmd failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + /* backup host memory message for restoring later*/ + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + g_hbuf_wrap[prio][psp_ret.index] = hbuf; + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, &hbuf); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + case VPSP_RUNNING: + prio = vcmd->is_high_rb ? CSV_COMMAND_PRIORITY_HIGH : + CSV_COMMAND_PRIORITY_LOW; + index = psp_ret.index; + /* try to get the execution result from ringbuffer*/ + ret = vpsp_try_get_result(vid, prio, index, g_hbuf_wrap[prio][index].data, + (struct vpsp_ret *)&psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed\n", __func__); + ret = -EFAULT; + goto end; + } + + ret = -EFAULT; + if (psp_ret.status == VPSP_RUNNING) { + ret = 0; + } else if (psp_ret.status == VPSP_FINISH) { + /* restore multilevel pointer data */ + ret = kvm_pv_psp_cmd_post_op(kvm, data_gpa, + &g_hbuf_wrap[prio][index]); + if (unlikely(ret)) { + pr_err("[%s]: kvm_pv_psp_cmd_post_op failed\n", + __func__); + ret = -EFAULT; + goto end; + } + } + break; + + default: + pr_err("[%s]: invalid command status\n", __func__); + ret = -EFAULT; + break; + } +end: + /* return psp_ret to guest */ + kvm_write_guest(kvm, psp_ret_gpa, &psp_ret, sizeof(psp_ret)); + return ret; +} diff --git a/arch/x86/kvm/kvm_emulate.h b/arch/x86/kvm/kvm_emulate.h index be7aeb9b8ea3b152b269870e5a737642a492e192..e6d149825169dda3ace396ca979923c4a2d108e8 100644 --- a/arch/x86/kvm/kvm_emulate.h +++ b/arch/x86/kvm/kvm_emulate.h @@ -88,6 +88,12 @@ struct x86_instruction_info { #define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ #define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */ +/* x86-specific emulation flags */ +#define X86EMUL_F_WRITE BIT(0) +#define X86EMUL_F_FETCH BIT(1) +#define X86EMUL_F_IMPLICIT BIT(2) +#define X86EMUL_F_INVLPG BIT(3) + struct x86_emulate_ops { void (*vm_bugged)(struct x86_emulate_ctxt *ctxt); /* @@ -224,6 +230,9 @@ struct x86_emulate_ops { int (*leave_smm)(struct x86_emulate_ctxt *ctxt); void (*triple_fault)(struct x86_emulate_ctxt *ctxt); int (*set_xcr)(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr); + + gva_t (*get_untagged_addr)(struct x86_emulate_ctxt *ctxt, gva_t addr, + unsigned int flags); }; /* Type, address-of, and value of an instruction's operand. */ diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 253fb2093d5dadcbe7994a004c26e2ad54a054a7..f04cc5ade1cd3c4732e19f555773b107e6cd5984 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -146,6 +146,14 @@ static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu) return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu)); } +static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu) +{ + if (!guest_can_use(vcpu, X86_FEATURE_LAM)) + return 0; + + return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57); +} + static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu) { u64 root_hpa = vcpu->arch.mmu->root.hpa; diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index f7901cb4d2fa4b67a4dae93648a169fd2931585b..1551657ff77d1d09405f03cb6875248406f80950 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3774,7 +3774,7 @@ static int mmu_alloc_shadow_roots(struct kvm_vcpu *vcpu) hpa_t root; root_pgd = kvm_mmu_get_guest_pgd(vcpu, mmu); - root_gfn = root_pgd >> PAGE_SHIFT; + root_gfn = (root_pgd & __PT_BASE_ADDR_MASK) >> PAGE_SHIFT; if (!kvm_vcpu_is_visible_gfn(vcpu, root_gfn)) { mmu->root.hpa = kvm_mmu_get_dummy_root(); diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h index decc1f1536694f31529f3d8c2c7cf67d1185a8c0..68f8564d85a99b6577a41a03a4dc33284bea1073 100644 --- a/arch/x86/kvm/mmu/mmu_internal.h +++ b/arch/x86/kvm/mmu/mmu_internal.h @@ -13,6 +13,7 @@ #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ +#define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h index c85255073f67231f0d25417232c99c9eeceb8f7b..4d4e98fe4f3548baf9156f3a9e0fd67003df7fdf 100644 --- a/arch/x86/kvm/mmu/paging_tmpl.h +++ b/arch/x86/kvm/mmu/paging_tmpl.h @@ -62,7 +62,7 @@ #endif /* Common logic, but per-type values. These also need to be undefined. */ -#define PT_BASE_ADDR_MASK ((pt_element_t)(((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))) +#define PT_BASE_ADDR_MASK ((pt_element_t)__PT_BASE_ADDR_MASK) #define PT_LVL_ADDR_MASK(lvl) __PT_LVL_ADDR_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_LVL_OFFSET_MASK(lvl) __PT_LVL_OFFSET_MASK(PT_BASE_ADDR_MASK, lvl, PT_LEVEL_BITS) #define PT_INDEX(addr, lvl) __PT_INDEX(addr, lvl, PT_LEVEL_BITS) diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index b816506783755a1ce87d511014c9903fe3e25798..e059976a64b360a2a7a9d3add09df09f71f81b98 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -80,6 +80,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_8000_0007_EDX] = {0x80000007, 0, CPUID_EDX}, [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, [CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX}, + [CPUID_C000_0006_EAX] = {0xc0000006, 0, CPUID_EAX}, }; /* diff --git a/arch/x86/kvm/svm/csv.c b/arch/x86/kvm/svm/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..9b9d861695372225e56f50d5b44ed38683116579 --- /dev/null +++ b/arch/x86/kvm/svm/csv.c @@ -0,0 +1,1507 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "kvm_cache_regs.h" +#include "svm.h" +#include "csv.h" +#include "x86.h" + +#undef pr_fmt +#define pr_fmt(fmt) "CSV: " fmt + +struct encrypt_data_block { + struct { + u64 npages: 12; + u64 pfn: 52; + } entry[512]; +}; + +union csv3_page_attr { + struct { + u64 reserved: 1; + u64 rw: 1; + u64 reserved1: 49; + u64 mmio: 1; + u64 reserved2: 12; + }; + u64 val; +}; + +struct guest_paddr_block { + struct { + u64 share: 1; + u64 reserved: 11; + u64 gfn: 52; + } entry[512]; +}; + +struct trans_paddr_block { + u64 trans_paddr[512]; +}; + +struct vmcb_paddr_block { + u64 vmcb_paddr[512]; +}; + +enum csv3_pg_level { + CSV3_PG_LEVEL_NONE, + CSV3_PG_LEVEL_4K, + CSV3_PG_LEVEL_2M, + CSV3_PG_LEVEL_NUM +}; + +struct shared_page_block { + struct list_head list; + struct page **pages; + u64 count; +}; + +struct kvm_csv_info { + struct kvm_sev_info *sev; + + bool csv3_active; /* CSV3 enabled guest */ + + /* List of shared pages */ + u64 total_shared_page_count; + struct list_head shared_pages_list; + void *cached_shared_page_block; + struct mutex shared_page_block_lock; + + struct list_head smr_list; /* List of guest secure memory regions */ + unsigned long nodemask; /* Nodemask where CSV3 guest's memory resides */ +}; + +struct kvm_svm_csv { + struct kvm_svm kvm_svm; + struct kvm_csv_info csv_info; +}; + +struct secure_memory_region { + struct list_head list; + u64 npages; + u64 hpa; +}; + +static struct kvm_x86_ops csv_x86_ops; + +static inline struct kvm_svm_csv *to_kvm_svm_csv(struct kvm *kvm) +{ + return (struct kvm_svm_csv *)container_of(kvm, struct kvm_svm, kvm); +} + +static int to_csv3_pg_level(int level) +{ + int ret; + + switch (level) { + case PG_LEVEL_4K: + ret = CSV3_PG_LEVEL_4K; + break; + case PG_LEVEL_2M: + ret = CSV3_PG_LEVEL_2M; + break; + default: + ret = CSV3_PG_LEVEL_NONE; + } + + return ret; +} + +static bool csv3_guest(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + + return sev_es_guest(kvm) && csv->csv3_active; +} + +static int csv_sync_vmsa(struct vcpu_svm *svm) +{ + struct sev_es_save_area *save = svm->sev_es.vmsa; + + /* Check some debug related fields before encrypting the VMSA */ + if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1)) + return -EINVAL; + + memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save)); + + /* Sync registgers per spec. */ + save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX]; + save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX]; + save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP]; + save->xcr0 = svm->vcpu.arch.xcr0; + save->xss = svm->vcpu.arch.ia32_xss; + + return 0; +} + +static int __csv_issue_cmd(int fd, int id, void *data, int *error) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = sev_issue_cmd_external_user(f.file, id, data, error); + + fdput(f); + return ret; +} + +static int csv_issue_cmd(struct kvm *kvm, int id, void *data, int *error) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_cmd(sev->fd, id, data, error); +} + +static inline void csv3_init_update_npt(struct csv3_data_update_npt *update_npt, + gpa_t gpa, u32 error, u32 handle) +{ + memset(update_npt, 0x00, sizeof(*update_npt)); + + update_npt->gpa = gpa & PAGE_MASK; + update_npt->error_code = error; + update_npt->handle = handle; +} + +static int csv3_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_init_data params; + + if (unlikely(csv->csv3_active)) + return -EINVAL; + + if (unlikely(!sev->es_active)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + csv->csv3_active = true; + csv->sev = sev; + csv->nodemask = (unsigned long)params.nodemask; + + INIT_LIST_HEAD(&csv->shared_pages_list); + INIT_LIST_HEAD(&csv->smr_list); + mutex_init(&csv->shared_page_block_lock); + + return 0; +} + +static bool csv3_is_mmio_pfn(kvm_pfn_t pfn) +{ + return !e820__mapped_raw_any(pfn_to_hpa(pfn), + pfn_to_hpa(pfn + 1) - 1, + E820_TYPE_RAM); +} + +static int csv3_set_guest_private_memory(struct kvm *kvm) +{ + struct kvm_memslots *slots = kvm_memslots(kvm); + struct kvm_memory_slot *memslot; + struct secure_memory_region *smr; + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_set_guest_private_memory *set_guest_private_memory; + struct csv3_data_memory_region *regions; + nodemask_t nodemask; + nodemask_t *nodemask_ptr; + + LIST_HEAD(tmp_list); + struct list_head *pos, *q; + u32 i = 0, count = 0, remainder; + int ret = 0, error; + u64 size = 0, nr_smr = 0, nr_pages = 0; + u32 smr_entry_shift; + int bkt; + + unsigned int flags = FOLL_HWPOISON; + int npages; + struct page *page; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + nodes_clear(nodemask); + for_each_set_bit(i, &csv->nodemask, BITS_PER_LONG) + if (i < MAX_NUMNODES) + node_set(i, nodemask); + + nodemask_ptr = csv->nodemask ? &nodemask : &node_online_map; + + set_guest_private_memory = kzalloc(sizeof(*set_guest_private_memory), + GFP_KERNEL_ACCOUNT); + if (!set_guest_private_memory) + return -ENOMEM; + + regions = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!regions) { + kfree(set_guest_private_memory); + return -ENOMEM; + } + + /* Get guest secure memory size */ + kvm_for_each_memslot(memslot, bkt, slots) { + npages = get_user_pages_unlocked(memslot->userspace_addr, 1, + &page, flags); + if (npages != 1) + continue; + + nr_pages += memslot->npages; + + put_page(page); + } + + /* + * NPT secure memory size + * + * PTEs_entries = nr_pages + * PDEs_entries = nr_pages / 512 + * PDPEs_entries = nr_pages / (512 * 512) + * PML4Es_entries = nr_pages / (512 * 512 * 512) + * + * Totals_entries = nr_pages + nr_pages / 512 + nr_pages / (512 * 512) + + * nr_pages / (512 * 512 * 512) <= nr_pages + nr_pages / 256 + * + * Total_NPT_size = (Totals_entries / 512) * PAGE_SIZE = ((nr_pages + + * nr_pages / 256) / 512) * PAGE_SIZE = nr_pages * 8 + nr_pages / 32 + * <= nr_pages * 9 + * + */ + smr_entry_shift = csv_get_smr_entry_shift(); + size = ALIGN((nr_pages << PAGE_SHIFT), 1UL << smr_entry_shift) + + ALIGN(nr_pages * 9, 1UL << smr_entry_shift); + nr_smr = size >> smr_entry_shift; + remainder = nr_smr; + for (i = 0; i < nr_smr; i++) { + smr = kzalloc(sizeof(*smr), GFP_KERNEL_ACCOUNT); + if (!smr) { + ret = -ENOMEM; + goto e_free_smr; + } + + smr->hpa = csv_alloc_from_contiguous((1UL << smr_entry_shift), + nodemask_ptr, + get_order(1 << smr_entry_shift)); + if (!smr->hpa) { + kfree(smr); + ret = -ENOMEM; + goto e_free_smr; + } + + smr->npages = ((1UL << smr_entry_shift) >> PAGE_SHIFT); + list_add_tail(&smr->list, &tmp_list); + + regions[count].size = (1UL << smr_entry_shift); + regions[count].base_address = smr->hpa; + count++; + + if (count >= (PAGE_SIZE / sizeof(regions[0])) || (remainder == count)) { + set_guest_private_memory->nregions = count; + set_guest_private_memory->handle = sev->handle; + set_guest_private_memory->regions_paddr = __sme_pa(regions); + + /* set secury memory region for launch enrypt data */ + ret = csv_issue_cmd(kvm, CSV3_CMD_SET_GUEST_PRIVATE_MEMORY, + set_guest_private_memory, &error); + if (ret) + goto e_free_smr; + + memset(regions, 0, PAGE_SIZE); + remainder -= count; + count = 0; + } + } + + list_splice(&tmp_list, &csv->smr_list); + + goto done; + +e_free_smr: + if (!list_empty(&tmp_list)) { + list_for_each_safe(pos, q, &tmp_list) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, + smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +done: + kfree(set_guest_private_memory); + kfree(regions); + return ret; +} + +static int csv3_launch_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct kvm_csv3_launch_encrypt_data params; + struct csv3_data_launch_encrypt_data *encrypt_data = NULL; + struct encrypt_data_block *blocks = NULL; + u8 *data = NULL; + u32 offset; + u32 num_entries, num_entries_in_block; + u32 num_blocks, num_blocks_max; + u32 i, n; + unsigned long pfn, pfn_sme_mask; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) { + ret = -EFAULT; + goto exit; + } + + if ((params.len & ~PAGE_MASK) || !params.len || !params.uaddr) { + ret = -EINVAL; + goto exit; + } + + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + + num_entries = params.len / PAGE_SIZE; + num_entries_in_block = ARRAY_SIZE(blocks->entry); + num_blocks = (num_entries + num_entries_in_block - 1) / num_entries_in_block; + num_blocks_max = ARRAY_SIZE(encrypt_data->data_blocks); + + if (num_blocks >= num_blocks_max) { + ret = -EINVAL; + goto exit; + } + + data = vzalloc(params.len); + if (!data) { + ret = -ENOMEM; + goto exit; + } + if (copy_from_user(data, (void __user *)params.uaddr, params.len)) { + ret = -EFAULT; + goto data_free; + } + + blocks = vzalloc(num_blocks * sizeof(*blocks)); + if (!blocks) { + ret = -ENOMEM; + goto data_free; + } + + for (offset = 0, i = 0, n = 0; offset < params.len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + data); + pfn_sme_mask = __sme_set(pfn << PAGE_SHIFT) >> PAGE_SHIFT; + if (offset && ((blocks[n].entry[i].pfn + 1) == pfn_sme_mask)) + blocks[n].entry[i].npages += 1; + else { + if (offset) { + i = (i + 1) % num_entries_in_block; + n = (i == 0) ? (n + 1) : n; + } + blocks[n].entry[i].pfn = pfn_sme_mask; + blocks[n].entry[i].npages = 1; + } + } + + encrypt_data = kzalloc(sizeof(*encrypt_data), GFP_KERNEL); + if (!encrypt_data) { + ret = -ENOMEM; + goto block_free; + } + + encrypt_data->handle = csv->sev->handle; + encrypt_data->length = params.len; + encrypt_data->gpa = params.gpa; + for (i = 0; i <= n; i++) { + encrypt_data->data_blocks[i] = + __sme_set(vmalloc_to_pfn((void *)blocks + i * sizeof(*blocks)) << PAGE_SHIFT); + } + + clflush_cache_range(data, params.len); + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_DATA, + encrypt_data, &argp->error); + + kfree(encrypt_data); +block_free: + vfree(blocks); +data_free: + vfree(data); +exit: + return ret; +} + +static int csv3_launch_encrypt_vmcb(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_launch_encrypt_vmcb *encrypt_vmcb = NULL; + struct kvm_vcpu *vcpu; + int ret = 0; + unsigned long i = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + encrypt_vmcb = kzalloc(sizeof(*encrypt_vmcb), GFP_KERNEL); + if (!encrypt_vmcb) { + ret = -ENOMEM; + goto exit; + } + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = csv_sync_vmsa(svm); + if (ret) + goto e_free; + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + clflush_cache_range(svm->vmcb, PAGE_SIZE); + encrypt_vmcb->handle = csv->sev->handle; + encrypt_vmcb->vcpu_id = i; + encrypt_vmcb->vmsa_addr = __sme_pa(svm->sev_es.vmsa); + encrypt_vmcb->vmsa_len = PAGE_SIZE; + encrypt_vmcb->shadow_vmcb_addr = __sme_pa(svm->vmcb); + encrypt_vmcb->shadow_vmcb_len = PAGE_SIZE; + ret = csv_issue_cmd(kvm, CSV3_CMD_LAUNCH_ENCRYPT_VMCB, + encrypt_vmcb, &argp->error); + if (ret) + goto e_free; + + svm->current_vmcb->pa = encrypt_vmcb->secure_vmcb_addr; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free: + kfree(encrypt_vmcb); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_data *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +#define CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE 0x00000000 +#define CSV3_SEND_ENCRYPT_DATA_SET_READONLY 0x00000001 +static int csv3_send_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_data data; + struct kvm_csv3_send_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + u32 offset; + int ret = 0; + int i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_data_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.guest_addr_data || + !params.guest_addr_len || !params.hdr_uaddr) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + if ((params.trans_len & PAGE_MASK) == 0 || + (params.trans_len & ~PAGE_MASK) != 0) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + + data.flag = CSV3_SEND_ENCRYPT_DATA_SET_READONLY; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + kvm_flush_remote_tlbs(kvm); + + data.flag = CSV3_SEND_ENCRYPT_DATA_MIGRATE_PAGE; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_DATA, &data, &argp->error); + if (ret) + goto e_free_trans_data; + + ret = -EFAULT; + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) + goto e_free_trans_data; + + /* copy guest address block to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.guest_addr_data, + guest_block, params.guest_addr_len)) + goto e_free_trans_data; + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) + goto e_free_trans_data; + + ret = 0; +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +/* Userspace wants to query either header or trans length. */ +static int +csv3_send_encrypt_context_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_csv3_send_encrypt_context *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + int ret; + + memset(&data, 0, sizeof(data)); + data.handle = sev->handle; + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + params->hdr_len = data.hdr_len; + params->trans_len = data.trans_len; + + if (copy_to_user((void __user *)(uintptr_t)argp->data, params, sizeof(*params))) + ret = -EFAULT; + + return ret; +} + +static int csv3_send_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_send_encrypt_context data; + struct kvm_csv3_send_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + unsigned long pfn; + unsigned long i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return csv3_send_encrypt_context_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.handle = sev->handle; + + /* flush hdr, trans data, trans block, secure VMSAs */ + wbinvd_on_all_cpus(); + + ret = csv_issue_cmd(kvm, CSV3_CMD_SEND_ENCRYPT_CONTEXT, &data, &argp->error); + + if (ret) + goto e_free_trans_data; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + /* copy packet header to userspace. */ + if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_data(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct csv3_data_receive_encrypt_data data; + struct kvm_csv3_receive_encrypt_data params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct guest_paddr_block *guest_block; + unsigned long pfn; + int i; + u32 offset; + int ret = 0; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (unlikely(list_empty(&csv->smr_list))) { + /* Allocate all the guest memory from CMA */ + ret = csv3_set_guest_private_memory(kvm); + if (ret) + goto exit; + } + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_addr_data || !params.guest_addr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + if (params.guest_addr_len > sizeof(*guest_block)) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + guest_block = kzalloc(sizeof(*guest_block), GFP_KERNEL_ACCOUNT); + if (!guest_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + + if (copy_from_user(guest_block, + (void __user *)(uintptr_t)params.guest_addr_data, + params.guest_addr_len)) { + ret = -EFAULT; + goto e_free_guest_block; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_guest_block; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + memset(&data, 0, sizeof(data)); + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.guest_block = __psp_pa(guest_block); + data.guest_len = params.guest_addr_len; + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(guest_block, PAGE_SIZE); + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_DATA, &data, + &argp->error); + +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_guest_block: + kfree(guest_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static int csv3_receive_encrypt_context(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct csv3_data_receive_encrypt_context data; + struct kvm_csv3_receive_encrypt_context params; + void *hdr; + void *trans_data; + struct trans_paddr_block *trans_block; + struct vmcb_paddr_block *shadow_vmcb_block; + struct vmcb_paddr_block *secure_vmcb_block; + unsigned long pfn; + u32 offset; + int ret = 0; + struct kvm_vcpu *vcpu; + unsigned long i; + + if (!csv3_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(params))) + return -EFAULT; + + if (!params.trans_uaddr || !params.trans_len || + !params.hdr_uaddr || !params.hdr_len) + return -EINVAL; + + if (params.trans_len > ARRAY_SIZE(trans_block->trans_paddr) * PAGE_SIZE) + return -EINVAL; + + /* allocate memory for header and transport buffer */ + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) { + ret = -ENOMEM; + goto exit; + } + + if (copy_from_user(hdr, + (void __user *)(uintptr_t)params.hdr_uaddr, + params.hdr_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + trans_block = kzalloc(sizeof(*trans_block), GFP_KERNEL_ACCOUNT); + if (!trans_block) { + ret = -ENOMEM; + goto e_free_hdr; + } + trans_data = vzalloc(params.trans_len); + if (!trans_data) { + ret = -ENOMEM; + goto e_free_trans_block; + } + + if (copy_from_user(trans_data, + (void __user *)(uintptr_t)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_trans_data; + } + + for (offset = 0, i = 0; offset < params.trans_len; offset += PAGE_SIZE) { + pfn = vmalloc_to_pfn(offset + trans_data); + trans_block->trans_paddr[i] = __sme_set(pfn_to_hpa(pfn)); + i++; + } + + secure_vmcb_block = kzalloc(sizeof(*secure_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!secure_vmcb_block) { + ret = -ENOMEM; + goto e_free_trans_data; + } + + shadow_vmcb_block = kzalloc(sizeof(*shadow_vmcb_block), + GFP_KERNEL_ACCOUNT); + if (!shadow_vmcb_block) { + ret = -ENOMEM; + goto e_free_secure_vmcb_block; + } + + memset(&data, 0, sizeof(data)); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(shadow_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + shadow_vmcb_block->vmcb_paddr[i] = __sme_pa(svm->vmcb); + data.vmcb_block_len += sizeof(shadow_vmcb_block->vmcb_paddr[0]); + } + + data.hdr_address = __psp_pa(hdr); + data.hdr_len = params.hdr_len; + data.trans_block = __psp_pa(trans_block); + data.trans_len = params.trans_len; + data.shadow_vmcb_block = __psp_pa(shadow_vmcb_block); + data.secure_vmcb_block = __psp_pa(secure_vmcb_block); + data.handle = sev->handle; + + clflush_cache_range(hdr, params.hdr_len); + clflush_cache_range(trans_data, params.trans_len); + clflush_cache_range(trans_block, PAGE_SIZE); + clflush_cache_range(shadow_vmcb_block, PAGE_SIZE); + clflush_cache_range(secure_vmcb_block, PAGE_SIZE); + + ret = csv_issue_cmd(kvm, CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT, &data, + &argp->error); + if (ret) + goto e_free_shadow_vmcb_block; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + if (i >= ARRAY_SIZE(secure_vmcb_block->vmcb_paddr)) { + ret = -EINVAL; + goto e_free_shadow_vmcb_block; + } + + svm->current_vmcb->pa = secure_vmcb_block->vmcb_paddr[i]; + svm->vcpu.arch.guest_state_protected = true; + } + +e_free_shadow_vmcb_block: + kfree(shadow_vmcb_block); +e_free_secure_vmcb_block: + kfree(secure_vmcb_block); +e_free_trans_data: + vfree(trans_data); +e_free_trans_block: + kfree(trans_block); +e_free_hdr: + kfree(hdr); +exit: + return ret; +} + +static void csv3_mark_page_dirty(struct kvm_vcpu *vcpu, gva_t gpa, + unsigned long npages) +{ + gfn_t gfn; + gfn_t gfn_end; + + gfn = gpa >> PAGE_SHIFT; + gfn_end = gfn + npages; +#ifdef KVM_HAVE_MMU_RWLOCK + write_lock(&vcpu->kvm->mmu_lock); +#else + spin_lock(&vcpu->kvm->mmu_lock); +#endif + for (; gfn < gfn_end; gfn++) + kvm_vcpu_mark_page_dirty(vcpu, gfn); +#ifdef KVM_HAVE_MMU_RWLOCK + write_unlock(&vcpu->kvm->mmu_lock); +#else + spin_unlock(&vcpu->kvm->mmu_lock); +#endif +} + +static int csv3_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code) +{ + int r = 0; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + union csv3_page_attr page_attr = {.mmio = 1}; + union csv3_page_attr page_attr_mask = {.mmio = 1}; + struct csv3_data_update_npt *update_npt; + int psp_ret; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + update_npt->page_attr = page_attr.val; + update_npt->page_attr_mask = page_attr_mask.val; + update_npt->level = CSV3_PG_LEVEL_4K; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + if (psp_ret != SEV_RET_SUCCESS) + r = -EFAULT; + + kfree(update_npt); +exit: + return r; +} + +static int __csv3_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, + u32 error_code, struct kvm_memory_slot *slot, + int *psp_ret_ptr, kvm_pfn_t pfn, u32 level) +{ + int r = 0; + struct csv3_data_update_npt *update_npt; + struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); + int psp_ret = 0; + + update_npt = kzalloc(sizeof(*update_npt), GFP_KERNEL); + if (!update_npt) { + r = -ENOMEM; + goto exit; + } + + csv3_init_update_npt(update_npt, gpa, error_code, + kvm_svm->sev_info.handle); + + update_npt->spa = pfn << PAGE_SHIFT; + update_npt->level = level; + + if (!csv3_is_mmio_pfn(pfn)) + update_npt->spa |= sme_me_mask; + + r = csv_issue_cmd(vcpu->kvm, CSV3_CMD_UPDATE_NPT, update_npt, &psp_ret); + + kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); + kvm_flush_remote_tlbs(vcpu->kvm); + + csv3_mark_page_dirty(vcpu, update_npt->gpa, update_npt->npages); + + if (psp_ret_ptr) + *psp_ret_ptr = psp_ret; + + kfree(update_npt); +exit: + return r; +} + +static int csv3_pin_shared_memory(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *slot, gfn_t gfn, + kvm_pfn_t *pfn) +{ + struct page **pages, *page; + u64 hva; + int npinned; + kvm_pfn_t tmp_pfn; + struct kvm *kvm = vcpu->kvm; + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct shared_page_block *shared_page_block = NULL; + u64 npages = PAGE_SIZE / sizeof(struct page *); + bool write = !(slot->flags & KVM_MEM_READONLY); + + tmp_pfn = __gfn_to_pfn_memslot(slot, gfn, false, false, NULL, write, + NULL, NULL); + if (unlikely(is_error_pfn(tmp_pfn))) + return -ENOMEM; + + if (csv3_is_mmio_pfn(tmp_pfn)) { + *pfn = tmp_pfn; + return 0; + } + + if (!page_maybe_dma_pinned(pfn_to_page(tmp_pfn))) { + kvm_release_pfn_clean(tmp_pfn); + if (csv->total_shared_page_count % npages == 0) { + shared_page_block = kzalloc(sizeof(*shared_page_block), + GFP_KERNEL_ACCOUNT); + if (!shared_page_block) + return -ENOMEM; + + pages = kzalloc(PAGE_SIZE, GFP_KERNEL_ACCOUNT); + if (!pages) { + kfree(shared_page_block); + return -ENOMEM; + } + + shared_page_block->pages = pages; + list_add_tail(&shared_page_block->list, + &csv->shared_pages_list); + csv->cached_shared_page_block = shared_page_block; + } else { + shared_page_block = csv->cached_shared_page_block; + pages = shared_page_block->pages; + } + + hva = __gfn_to_hva_memslot(slot, gfn); + npinned = pin_user_pages_fast(hva, 1, FOLL_WRITE | FOLL_LONGTERM, + &page); + if (npinned != 1) { + if (shared_page_block->count == 0) { + list_del(&shared_page_block->list); + kfree(pages); + kfree(shared_page_block); + } + return -ENOMEM; + } + + pages[csv->total_shared_page_count % npages] = page; + shared_page_block->count++; + csv->total_shared_page_count++; + *pfn = page_to_pfn(page); + } else { + kvm_release_pfn_clean(tmp_pfn); + *pfn = tmp_pfn; + } + + return 0; +} + +static int __pfn_mapping_level(struct kvm *kvm, gfn_t gfn, + const struct kvm_memory_slot *slot) +{ + int level = PG_LEVEL_4K; + unsigned long hva; + unsigned long flags; + pgd_t pgd; + p4d_t p4d; + pud_t pud; + pmd_t pmd; + + /* + * Note, using the already-retrieved memslot and __gfn_to_hva_memslot() + * is not solely for performance, it's also necessary to avoid the + * "writable" check in __gfn_to_hva_many(), which will always fail on + * read-only memslots due to gfn_to_hva() assuming writes. Earlier + * page fault steps have already verified the guest isn't writing a + * read-only memslot. + */ + hva = __gfn_to_hva_memslot(slot, gfn); + + /* + * Disable IRQs to prevent concurrent tear down of host page tables, + * e.g. if the primary MMU promotes a P*D to a huge page and then frees + * the original page table. + */ + local_irq_save(flags); + + /* + * Read each entry once. As above, a non-leaf entry can be promoted to + * a huge page _during_ this walk. Re-reading the entry could send the + * walk into the weeks, e.g. p*d_large() returns false (sees the old + * value) and then p*d_offset() walks into the target huge page instead + * of the old page table (sees the new value). + */ + pgd = READ_ONCE(*pgd_offset(kvm->mm, hva)); + if (pgd_none(pgd)) + goto out; + + p4d = READ_ONCE(*p4d_offset(&pgd, hva)); + if (p4d_none(p4d) || !p4d_present(p4d)) + goto out; + + pud = READ_ONCE(*pud_offset(&p4d, hva)); + if (pud_none(pud) || !pud_present(pud)) + goto out; + + if (pud_large(pud)) { + level = PG_LEVEL_1G; + goto out; + } + + pmd = READ_ONCE(*pmd_offset(&pud, hva)); + if (pmd_none(pmd) || !pmd_present(pmd)) + goto out; + + if (pmd_large(pmd)) + level = PG_LEVEL_2M; + +out: + local_irq_restore(flags); + return level; +} + +static int csv3_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn, + struct kvm_memory_slot *slot) +{ + int level; + int page_num; + gfn_t gfn_base; + + if (csv3_is_mmio_pfn(pfn)) { + level = PG_LEVEL_4K; + goto end; + } + + if (!PageCompound(pfn_to_page(pfn))) { + level = PG_LEVEL_4K; + goto end; + } + + level = PG_LEVEL_2M; + page_num = KVM_PAGES_PER_HPAGE(level); + gfn_base = gfn & ~(page_num - 1); + + /* + * 2M aligned guest address in memslot. + */ + if ((gfn_base < slot->base_gfn) || + (gfn_base + page_num > slot->base_gfn + slot->npages)) { + level = PG_LEVEL_4K; + goto end; + } + + /* + * hva in memslot is 2M aligned. + */ + if (__gfn_to_hva_memslot(slot, gfn_base) & ~PMD_MASK) { + level = PG_LEVEL_4K; + goto end; + } + + level = __pfn_mapping_level(vcpu->kvm, gfn, slot); + + /* + * Firmware supports 2M/4K level. + */ + level = level > PG_LEVEL_2M ? PG_LEVEL_2M : level; + +end: + return to_csv3_pg_level(level); +} + +static int csv3_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot, + gfn_t gfn, u32 error_code) +{ + int ret = 0; + int psp_ret = 0; + int level; + kvm_pfn_t pfn; + struct kvm_csv_info *csv = &to_kvm_svm_csv(vcpu->kvm)->csv_info; + + if (error_code & PFERR_PRESENT_MASK) + level = CSV3_PG_LEVEL_4K; + else { + mutex_lock(&csv->shared_page_block_lock); + ret = csv3_pin_shared_memory(vcpu, slot, gfn, &pfn); + mutex_unlock(&csv->shared_page_block_lock); + if (ret) + goto exit; + + level = csv3_mapping_level(vcpu, gfn, pfn, slot); + } + + ret = __csv3_page_fault(vcpu, gfn << PAGE_SHIFT, error_code, slot, + &psp_ret, pfn, level); + + if (psp_ret != SEV_RET_SUCCESS) + ret = -EFAULT; +exit: + return ret; +} + +static void csv_vm_destroy(struct kvm *kvm) +{ + struct kvm_csv_info *csv = &to_kvm_svm_csv(kvm)->csv_info; + struct list_head *head = &csv->shared_pages_list; + struct list_head *pos, *q; + struct shared_page_block *shared_page_block; + struct kvm_vcpu *vcpu; + unsigned long i = 0; + + struct list_head *smr_head = &csv->smr_list; + struct secure_memory_region *smr; + + if (csv3_guest(kvm)) { + mutex_lock(&csv->shared_page_block_lock); + if (!list_empty(head)) { + list_for_each_safe(pos, q, head) { + shared_page_block = list_entry(pos, + struct shared_page_block, list); + unpin_user_pages(shared_page_block->pages, + shared_page_block->count); + kfree(shared_page_block->pages); + csv->total_shared_page_count -= + shared_page_block->count; + list_del(&shared_page_block->list); + kfree(shared_page_block); + } + } + mutex_unlock(&csv->shared_page_block_lock); + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + svm->current_vmcb->pa = __sme_pa(svm->vmcb); + } + } + + if (likely(csv_x86_ops.vm_destroy)) + csv_x86_ops.vm_destroy(kvm); + + if (!csv3_guest(kvm)) + return; + + /* free secure memory region */ + if (!list_empty(smr_head)) { + list_for_each_safe(pos, q, smr_head) { + smr = list_entry(pos, struct secure_memory_region, list); + if (smr) { + csv_release_to_contiguous(smr->hpa, smr->npages << PAGE_SHIFT); + list_del(&smr->list); + kfree(smr); + } + } + } +} + +static int csv3_handle_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, + u32 error_code) +{ + gfn_t gfn = gpa_to_gfn(gpa); + struct kvm_memory_slot *slot = gfn_to_memslot(vcpu->kvm, gfn); + int ret; + int r = -EIO; + + if (kvm_is_visible_memslot(slot)) + ret = csv3_page_fault(vcpu, slot, gfn, error_code); + else + ret = csv3_mmio_page_fault(vcpu, gpa, error_code); + + if (!ret) + r = 1; + + return r; +} + +static int csv_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath) +{ + struct vcpu_svm *svm = to_svm(vcpu); + u32 exit_code = svm->vmcb->control.exit_code; + int ret = -EIO; + + /* + * NPF for csv3 is dedicated. + */ + if (csv3_guest(vcpu->kvm) && exit_code == SVM_EXIT_NPF) { + gpa_t gpa = __sme_clr(svm->vmcb->control.exit_info_2); + u64 error_code = svm->vmcb->control.exit_info_1; + + ret = csv3_handle_page_fault(vcpu, gpa, error_code); + } else { + if (likely(csv_x86_ops.handle_exit)) + ret = csv_x86_ops.handle_exit(vcpu, exit_fastpath); + } + + return ret; +} + +static void csv_guest_memory_reclaimed(struct kvm *kvm) +{ + if (!csv3_guest(kvm)) { + if (likely(csv_x86_ops.guest_memory_reclaimed)) + csv_x86_ops.guest_memory_reclaimed(kvm); + } +} + +static int csv_mem_enc_op(struct kvm *kvm, void __user *argp) +{ + struct kvm_sev_cmd sev_cmd; + int r = -EINVAL; + + if (!argp) + return 0; + + if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd))) + return -EFAULT; + + mutex_lock(&kvm->lock); + + switch (sev_cmd.id) { + case KVM_CSV3_INIT: + r = csv3_guest_init(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_DATA: + r = csv3_launch_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_LAUNCH_ENCRYPT_VMCB: + r = csv3_launch_encrypt_vmcb(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_DATA: + r = csv3_send_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_SEND_ENCRYPT_CONTEXT: + r = csv3_send_encrypt_context(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_DATA: + r = csv3_receive_encrypt_data(kvm, &sev_cmd); + break; + case KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT: + r = csv3_receive_encrypt_context(kvm, &sev_cmd); + break; + default: + mutex_unlock(&kvm->lock); + if (likely(csv_x86_ops.mem_enc_ioctl)) + r = csv_x86_ops.mem_enc_ioctl(kvm, argp); + goto out; + } + + mutex_unlock(&kvm->lock); + + if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd))) + r = -EFAULT; + +out: + return r; +} + +void __init csv_init(struct kvm_x86_ops *ops) +{ + if (boot_cpu_has(X86_FEATURE_CSV3)) { + memcpy(&csv_x86_ops, ops, sizeof(struct kvm_x86_ops)); + + ops->mem_enc_ioctl = csv_mem_enc_op; + ops->vm_destroy = csv_vm_destroy; + ops->vm_size = sizeof(struct kvm_svm_csv); + ops->handle_exit = csv_handle_exit; + ops->guest_memory_reclaimed = csv_guest_memory_reclaimed; + } +} diff --git a/arch/x86/kvm/svm/csv.h b/arch/x86/kvm/svm/csv.h new file mode 100644 index 0000000000000000000000000000000000000000..df5cf9ea9422084676853e851d551dd6d959bb7f --- /dev/null +++ b/arch/x86/kvm/svm/csv.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CSV driver for KVM + * + * HYGON CSV support + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __SVM_CSV_H +#define __SVM_CSV_H + +#ifdef CONFIG_HYGON_CSV + +void __init csv_init(struct kvm_x86_ops *ops); + +#else /* !CONFIG_HYGON_CSV */ + +static inline void __init csv_init(struct kvm_x86_ops *ops) { } + +#endif /* CONFIG_HYGON_CSV */ + +#endif diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c index 3fea8c47679e6899742c6f5aa08046da041439a2..90ca9489aab63e4ef3b778d05afe6f2208b1fa1f 100644 --- a/arch/x86/kvm/svm/nested.c +++ b/arch/x86/kvm/svm/nested.c @@ -311,7 +311,7 @@ static bool __nested_vmcb_check_save(struct kvm_vcpu *vcpu, if ((save->efer & EFER_LME) && (save->cr0 & X86_CR0_PG)) { if (CC(!(save->cr4 & X86_CR4_PAE)) || CC(!(save->cr0 & X86_CR0_PE)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, save->cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, save->cr3))) return false; } @@ -520,7 +520,7 @@ static void nested_svm_transition_tlb_flush(struct kvm_vcpu *vcpu) static int nested_svm_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_npt, bool reload_pdptrs) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) return -EINVAL; if (reload_pdptrs && !nested_npt && is_pae_paging(vcpu) && diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c index 4900c078045acc1bf437859f46e9acbe9c66b0b4..d25fe184ae32fade3ebec49650630528c26f7458 100644 --- a/arch/x86/kvm/svm/sev.c +++ b/arch/x86/kvm/svm/sev.c @@ -75,6 +75,13 @@ static unsigned int nr_asids; static unsigned long *sev_asid_bitmap; static unsigned long *sev_reclaim_asid_bitmap; +static DEFINE_MUTEX(csv_cmd_batch_mutex); + +static const char sev_vm_mnonce[] = "VM_ATTESTATION"; + +static int alloc_trans_mempool(void); +static void free_trans_mempool(void); + struct enc_region { struct list_head list; unsigned long npages; @@ -83,6 +90,17 @@ struct enc_region { unsigned long size; }; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +#define ASID_USERID_LENGTH 20 +struct csv_asid_userid { + int refcnt; // reference count of the ASID + u32 userid_len; + char userid[ASID_USERID_LENGTH]; +}; + +static struct csv_asid_userid *csv_asid_userid_array; +#endif + /* Called with the sev_bitmap_lock held, or on shutdown */ static int sev_flush_asids(int min_asid, int max_asid) { @@ -141,7 +159,11 @@ static void sev_misc_cg_uncharge(struct kvm_sev_info *sev) misc_cg_uncharge(type, sev->misc_cg, 1); } +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID +static int sev_asid_new(struct kvm_sev_info *sev, const char *userid, u32 userid_len) +#else static int sev_asid_new(struct kvm_sev_info *sev) +#endif { int asid, min_asid, max_asid, ret; bool retry = true; @@ -157,12 +179,47 @@ static int sev_asid_new(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, check whether the userid exists */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + int i = !min_sev_asid ? 1 : min_sev_asid; + + for (; i <= max_sev_asid; i++) { + /* skip ASIDs without correspond userid */ + if (!csv_asid_userid_array[i].userid_len) + continue; + + /* skip if length of userid is different */ + if (csv_asid_userid_array[i].userid_len != userid_len) + continue; + + if (!memcmp(csv_asid_userid_array[i].userid, + userid, userid_len)) { + pr_debug("Found reusable asid %d\n", i); + /* Increase reference count if userid exists */ + csv_asid_userid_array[i].refcnt++; + + mutex_unlock(&sev_bitmap_lock); + return i; + } + } + } +#endif + /* * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid. * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1. */ min_asid = sev->es_active ? 1 : min_sev_asid; max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid; + + /* + * No matter what the min_sev_asid is, all asids in range + * [1, max_sev_asid] can be used for CSV2 guest on Hygon CPUs. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + max_asid = max_sev_asid; again: asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid); if (asid > max_asid) { @@ -177,6 +234,16 @@ static int sev_asid_new(struct kvm_sev_info *sev) __set_bit(asid, sev_asid_bitmap); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, initialize the new userid */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + userid && userid_len) { + memcpy(csv_asid_userid_array[asid].userid, userid, userid_len); + csv_asid_userid_array[asid].userid_len = userid_len; + csv_asid_userid_array[asid].refcnt = 1; + } +#endif + mutex_unlock(&sev_bitmap_lock); return asid; @@ -201,7 +268,25 @@ static void sev_asid_free(struct kvm_sev_info *sev) mutex_lock(&sev_bitmap_lock); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* For Hygon CPU, decrease the reference count if userid exist */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[sev->asid].userid_len) { + /* If reach here, reference count should large than 0. */ + WARN_ON(csv_asid_userid_array[sev->asid].refcnt <= 0); + + if (--csv_asid_userid_array[sev->asid].refcnt == 0) { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + + memset(&csv_asid_userid_array[sev->asid], 0, + sizeof(struct csv_asid_userid)); + } + } else { + __set_bit(sev->asid, sev_reclaim_asid_bitmap); + } +#else __set_bit(sev->asid, sev_reclaim_asid_bitmap); +#endif for_each_possible_cpu(cpu) { sd = per_cpu_ptr(&svm_data, cpu); @@ -248,6 +333,11 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; int asid, ret; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + struct kvm_csv_init params; + void *csv_blob = NULL; +#endif + if (kvm->created_vcpus) return -EINVAL; @@ -257,7 +347,43 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev->active = true; sev->es_active = argp->id == KVM_SEV_ES_INIT; + +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + memset(¶ms, 0, sizeof(params)); + + if (argp->data && + copy_from_user(¶ms, + (void __user *)(uintptr_t)argp->data, sizeof(params))) + return -EFAULT; + + if (params.userid_addr) { + if (params.len >= ASID_USERID_LENGTH) { + pr_err("Invalid length of userid %d > %d\n", + params.len, ASID_USERID_LENGTH); + return -EINVAL; + } + + csv_blob = psp_copy_user_blob(params.userid_addr, params.len); + if (IS_ERR(csv_blob)) { + pr_err("Copy userid failed, %llx (%u)\n", + params.userid_addr, params.len); + return PTR_ERR(csv_blob); + } + } + + asid = sev_asid_new(sev, (const char *)csv_blob, params.len); + + /* Free the @csv_blob to prevent memory leak */ + kfree(csv_blob); + csv_blob = NULL; + } else { + asid = sev_asid_new(sev, NULL, 0); + } +#else asid = sev_asid_new(sev); +#endif + if (asid < 0) goto e_no_asid; sev->asid = asid; @@ -277,6 +403,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp) sev_asid_free(sev); sev->asid = 0; e_no_asid: +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_blob); +#endif sev->es_active = false; sev->active = false; return ret; @@ -318,6 +447,28 @@ static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error) return __sev_issue_cmd(sev->fd, id, data, error); } +static int __csv_issue_ringbuf_cmds(int fd, int *psp_ret) +{ + struct fd f; + int ret; + + f = fdget(fd); + if (!f.file) + return -EBADF; + + ret = csv_issue_ringbuf_cmds_external_user(f.file, psp_ret); + + fdput(f); + return ret; +} + +static int csv_issue_ringbuf_cmds(struct kvm *kvm, int *psp_ret) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + + return __csv_issue_ringbuf_cmds(sev->fd, psp_ret); +} + static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -400,6 +551,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, unsigned long locked, lock_limit; struct page **pages; unsigned long first, last; + unsigned int flags = 0; int ret; lockdep_assert_held(&kvm->lock); @@ -432,8 +584,10 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr, if (!pages) return ERR_PTR(-ENOMEM); + flags = write ? FOLL_WRITE : 0; + /* Pin the user virtual address. */ - npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages); + npinned = pin_user_pages_fast(uaddr, npages, flags | FOLL_LONGTERM, pages); if (npinned != npages) { pr_err("SEV: Failure locking %lu pages.\n", npages); ret = -ENOMEM; @@ -654,6 +808,18 @@ static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu, return ret; vcpu->arch.guest_state_protected = true; + + /* + * Backup encrypted vmsa to support rebooting CSV2 guest. The + * clflush_cache_range() is necessary to invalidate prefetched + * memory area pointed by svm->sev_es.vmsa so that we can read + * fresh memory updated by PSP. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + memcpy(svm->sev_es.reset_vmsa, svm->sev_es.vmsa, PAGE_SIZE); + } + return 0; } @@ -1367,6 +1533,115 @@ static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +/* Userspace wants to query either header or trans length. */ +static int +__sev_send_update_vmsa_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp, + struct kvm_sev_send_update_vmsa *params) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + int ret; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL_ACCOUNT); + if (!vmsa) + return -ENOMEM; + + vmsa->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + params->hdr_len = vmsa->hdr_len; + params->trans_len = vmsa->trans_len; + + if (copy_to_user((void __user *)argp->data, params, + sizeof(struct kvm_sev_send_update_vmsa))) + ret = -EFAULT; + + kfree(vmsa); + return ret; +} + +static int sev_send_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_vmsa *vmsa; + struct kvm_sev_send_update_vmsa params; + struct kvm_vcpu *vcpu; + void *hdr, *trans_data; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_send_update_vmsa))) + return -EFAULT; + + /* userspace wants to query either header or trans length */ + if (!params.trans_len || !params.hdr_len) + return __sev_send_update_vmsa_query_lengths(kvm, argp, ¶ms); + + if (!params.trans_uaddr || !params.hdr_uaddr) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + /* allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT); + if (!hdr) + return ret; + + trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT); + if (!trans_data) + goto e_free_hdr; + + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans_data; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans_data); + vmsa->trans_len = params.trans_len; + + /* The SEND_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_VMSA, vmsa, &argp->error); + + if (ret) + goto e_free; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr, + trans_data, params.trans_len)) { + ret = -EFAULT; + goto e_free; + } + + /* Copy packet header to userspace. */ + ret = copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr, + params.hdr_len); + +e_free: + kfree(vmsa); +e_free_trans_data: + kfree(trans_data); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1542,6 +1817,81 @@ static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp) return ret; } +static int sev_receive_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_vmsa params; + struct sev_data_receive_update_vmsa *vmsa; + struct kvm_vcpu *vcpu; + void *hdr = NULL, *trans = NULL; + int ret; + + if (!sev_es_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_sev_receive_update_vmsa))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Get the target vcpu */ + vcpu = kvm_get_vcpu_by_id(kvm, params.vcpu_id); + if (!vcpu) { + pr_err("%s: invalid vcpu\n", __func__); + return -EINVAL; + } + + pr_debug("%s: vcpu (%d)\n", __func__, vcpu->vcpu_id); + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + goto e_free_hdr; + } + + ret = -ENOMEM; + vmsa = kzalloc(sizeof(*vmsa), GFP_KERNEL); + if (!vmsa) + goto e_free_trans; + + vmsa->hdr_address = __psp_pa(hdr); + vmsa->hdr_len = params.hdr_len; + vmsa->trans_address = __psp_pa(trans); + vmsa->trans_len = params.trans_len; + + /* + * Flush before RECEIVE_UPDATE_VMSA, the PSP encrypts the + * written VMSA memory content with the guest's key), and + * the cache may contain dirty, unencrypted data. + */ + clflush_cache_range(to_svm(vcpu)->sev_es.vmsa, PAGE_SIZE); + + /* The RECEIVE_UPDATE_VMSA command requires C-bit to be always set. */ + vmsa->guest_address = __pa(to_svm(vcpu)->sev_es.vmsa) | sev_me_mask; + vmsa->guest_len = PAGE_SIZE; + vmsa->handle = sev->handle; + + ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_VMSA, vmsa, &argp->error); + + if (!ret) + vcpu->arch.guest_state_protected = true; + + kfree(vmsa); +e_free_trans: + kfree(trans); +e_free_hdr: + kfree(hdr); + + return ret; +} + static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp) { struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; @@ -1849,6 +2199,8 @@ int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd) return ret; } +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp); + int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) { struct kvm_sev_cmd sev_cmd; @@ -1918,6 +2270,12 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_SEND_UPDATE_DATA: r = sev_send_update_data(kvm, &sev_cmd); break; + case KVM_SEV_SEND_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_send_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_SEND_FINISH: r = sev_send_finish(kvm, &sev_cmd); break; @@ -1930,9 +2288,23 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp) case KVM_SEV_RECEIVE_UPDATE_DATA: r = sev_receive_update_data(kvm, &sev_cmd); break; + case KVM_SEV_RECEIVE_UPDATE_VMSA: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = sev_receive_update_vmsa(kvm, &sev_cmd); + else + r = -EINVAL; + break; case KVM_SEV_RECEIVE_FINISH: r = sev_receive_finish(kvm, &sev_cmd); break; + case KVM_CSV_COMMAND_BATCH: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + mutex_lock(&csv_cmd_batch_mutex); + r = csv_command_batch(kvm, &sev_cmd); + mutex_unlock(&csv_cmd_batch_mutex); + break; + } + fallthrough; default: r = -EINVAL; goto out; @@ -2229,6 +2601,34 @@ void __init sev_hardware_setup(void) goto out; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* Initialize CSV ASID reuse array */ + csv_asid_userid_array = kcalloc(nr_asids, + sizeof(struct csv_asid_userid), GFP_KERNEL); + if (!csv_asid_userid_array) { + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } +#endif + + /* Initialize buffer to accelerate migration of CSV/CSV2 guest */ + if (alloc_trans_mempool()) { +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); + csv_asid_userid_array = NULL; +#endif + bitmap_free(sev_asid_bitmap); + sev_asid_bitmap = NULL; + bitmap_free(sev_reclaim_asid_bitmap); + sev_reclaim_asid_bitmap = NULL; + goto out; + } + } + sev_asid_count = max_sev_asid - min_sev_asid + 1; WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count)); sev_supported = true; @@ -2250,23 +2650,36 @@ void __init sev_hardware_setup(void) if (!boot_cpu_has(X86_FEATURE_SEV_ES)) goto out; - /* Has the system been allocated ASIDs for SEV-ES? */ - if (min_sev_asid == 1) - goto out; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Ths ASIDs from 1 to max_sev_asid are available for hygon + * CSV2 guest. + */ + sev_es_asid_count = max_sev_asid; + } else { + /* Has the system been allocated ASIDs for SEV-ES? */ + if (min_sev_asid == 1) + goto out; - sev_es_asid_count = min_sev_asid - 1; + sev_es_asid_count = min_sev_asid - 1; + } WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count)); sev_es_supported = true; out: if (boot_cpu_has(X86_FEATURE_SEV)) - pr_info("SEV %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV" : "SEV", sev_supported ? "enabled" : "disabled", min_sev_asid, max_sev_asid); if (boot_cpu_has(X86_FEATURE_SEV_ES)) - pr_info("SEV-ES %s (ASIDs %u - %u)\n", + pr_info("%s %s (ASIDs %u - %u)\n", + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? "CSV2" : "SEV-ES", sev_es_supported ? "enabled" : "disabled", - min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1); + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + 1 : (min_sev_asid > 1 ? 1 : 0), + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + max_sev_asid : min_sev_asid - 1); sev_enabled = sev_supported; sev_es_enabled = sev_es_supported; @@ -2284,6 +2697,13 @@ void sev_hardware_unsetup(void) /* No need to take sev_bitmap_lock, all VMs have been destroyed. */ sev_flush_asids(1, max_sev_asid); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + free_trans_mempool(); +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + kfree(csv_asid_userid_array); +#endif + } + bitmap_free(sev_asid_bitmap); bitmap_free(sev_reclaim_asid_bitmap); @@ -2365,8 +2785,13 @@ void sev_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->sev_es.vmsa)); + if (svm->sev_es.ghcb) + kvm_vcpu_unmap(vcpu, &svm->sev_es.ghcb_map, false); + if (svm->sev_es.ghcb_sa_free) kvfree(svm->sev_es.ghcb_sa); + + __free_page(virt_to_page(svm->sev_es.reset_vmsa)); } static void dump_ghcb(struct vcpu_svm *svm) @@ -2632,6 +3057,13 @@ void pre_sev_run(struct vcpu_svm *svm, int cpu) /* Assign the asid allocated with this SEV guest */ svm->asid = asid; +#ifdef CONFIG_KVM_SUPPORTS_CSV_REUSE_ASID + /* If ASID is shared with other guests, then flush TLB before VMRUN */ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + csv_asid_userid_array[asid].userid_len) + svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID; +#endif + /* * Flush guest TLB: * @@ -2972,6 +3404,25 @@ static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm) set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux); } + + /* + * For SEV-ES, accesses to MSR_IA32_XSS should not be intercepted if + * the host/guest supports its use. + * + * guest_can_use() checks a number of requirements on the host/guest to + * ensure that MSR_IA32_XSS is available, but it might report true even + * if X86_FEATURE_XSAVES isn't configured in the guest to ensure host + * MSR_IA32_XSS is always properly restored. For SEV-ES, it is better + * to further check that the guest CPUID actually supports + * X86_FEATURE_XSAVES so that accesses to MSR_IA32_XSS by misbehaved + * guests will still get intercepted and caught in the normal + * kvm_emulate_rdmsr()/kvm_emulated_wrmsr() paths. + */ + if (guest_can_use(vcpu, X86_FEATURE_XSAVES) && + guest_cpuid_has(vcpu, X86_FEATURE_XSAVES)) + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 1, 1); + else + set_msr_interception(vcpu, svm->msrpm, MSR_IA32_XSS, 0, 0); } void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm) @@ -3138,3 +3589,634 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector) ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1); } + +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_attestation_report *data = NULL; + struct page **pages; + unsigned long guest_uaddr, n; + int ret = 0, offset, error; + + if (!sev_guest(kvm) || (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)) + return -ENOTTY; + + /* + * The physical address of guest must valid and page aligned, and + * the length of guest memory region must be page size aligned. + */ + if (!gpa || (gpa & ~PAGE_MASK) || (len & ~PAGE_MASK)) { + pr_err("invalid guest address or length\n"); + return -EFAULT; + } + + guest_uaddr = gfn_to_hva(kvm, gpa_to_gfn(gpa)); + pages = sev_pin_memory(kvm, guest_uaddr, len, &n, 1); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + /* + * The attestation report must be copied into contiguous memory region, + * lets verify that userspace memory pages are contiguous before we + * issue commmand. + */ + if (get_num_contig_pages(0, pages, n) != n) { + ret = -EINVAL; + goto e_unpin_memory; + } + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_unpin_memory; + + /* sev_vm_mnonce indicates attestation request from guest */ + if (sizeof(sev_vm_mnonce) >= sizeof(data->mnonce)) { + ret = -EINVAL; + goto e_free; + } + + memcpy(data->mnonce, sev_vm_mnonce, sizeof(sev_vm_mnonce)); + + offset = guest_uaddr & (PAGE_SIZE - 1); + data->address = __sme_page_pa(pages[0]) + offset; + data->len = len; + + data->handle = sev->handle; + ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, data, &error); + + if (ret) + pr_err("vm attestation ret %#x, error %#x\n", ret, error); + +e_free: + kfree(data); +e_unpin_memory: + sev_unpin_memory(kvm, pages, n); + return ret; +} + +/*--1024--1023--1024--1023--*/ +#define TRANS_MEMPOOL_1ST_BLOCK_OFFSET 0 +#define TRANS_MEMPOOL_2ND_BLOCK_OFFSET (1024 << PAGE_SHIFT) +#define TRANS_MEMPOOL_3RD_BLOCK_OFFSET (2047 << PAGE_SHIFT) +#define TRANS_MEMPOOL_4TH_BLOCK_OFFSET (3071 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCKS_MAX_OFFSET (4094 << PAGE_SHIFT) +#define TRANS_MEMPOOL_BLOCK_NUM 4 +#define TRANS_MEMPOOL_BLOCK_SIZE (1024 * PAGE_SIZE) + +static size_t g_mempool_offset; +void *g_trans_mempool[TRANS_MEMPOOL_BLOCK_NUM] = { 0, }; + +static void reset_mempool_offset(void) +{ + g_mempool_offset = 0; +} + +static int alloc_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + WARN_ONCE(g_trans_mempool[i], + "CSV: g_trans_mempool[%d] was tainted\n", i); + + g_trans_mempool[i] = kzalloc(TRANS_MEMPOOL_BLOCK_SIZE, GFP_KERNEL); + if (!g_trans_mempool[i]) + goto free_trans_mempool; + } + + g_mempool_offset = 0; + return 0; + +free_trans_mempool: + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + return -ENOMEM; +} + +static void free_trans_mempool(void) +{ + int i; + + for (i = 0; i < TRANS_MEMPOOL_BLOCK_NUM; i++) { + kfree(g_trans_mempool[i]); + g_trans_mempool[i] = NULL; + } + + g_mempool_offset = 0; +} + +static void __maybe_unused *get_trans_data_from_mempool(size_t size) +{ + void *trans = NULL; + char *trans_data = NULL; + int i; + size_t offset; + + if (g_mempool_offset < TRANS_MEMPOOL_2ND_BLOCK_OFFSET) { + i = 0; + offset = g_mempool_offset - TRANS_MEMPOOL_1ST_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_3RD_BLOCK_OFFSET) { + i = 1; + offset = g_mempool_offset - TRANS_MEMPOOL_2ND_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_4TH_BLOCK_OFFSET) { + i = 2; + offset = g_mempool_offset - TRANS_MEMPOOL_3RD_BLOCK_OFFSET; + } else if (g_mempool_offset < TRANS_MEMPOOL_BLOCKS_MAX_OFFSET) { + i = 3; + offset = g_mempool_offset - TRANS_MEMPOOL_4TH_BLOCK_OFFSET; + } else { + pr_err("CSV: mempool is full (offset: %lu)\n", g_mempool_offset); + return NULL; + } + + trans_data = (char *)g_trans_mempool[i]; + trans = &trans_data[offset]; + g_mempool_offset += size; + + return trans; +} + +static int csv_ringbuf_infos_free(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + if (item) { + if (item->data_vaddr) + kfree((void *)item->data_vaddr); + + if (item->hdr_vaddr) + kfree((void *)item->hdr_vaddr); + + if (item->pages) + sev_unpin_memory(kvm, item->pages, item->n); + + kfree(item); + + ringbuf_infos->item[i] = NULL; + } + } + + return 0; +} + +static int +sev_send_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct sev_data_send_update_data *data; + struct kvm_sev_send_update_data params; + struct csv_ringbuf_info_item *item; + void *hdr, *trans_data; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_send_update_data))) + return -EFAULT; + + /* + * userspace shouldn't query either header or trans length in ringbuf + * mode. + */ + if (!params.trans_len || !params.hdr_len) + return -EINVAL; + + if (!params.trans_uaddr || !params.guest_uaddr || + !params.guest_len || !params.hdr_uaddr) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 0); + if (IS_ERR(guest_page)) + return PTR_ERR(guest_page); + + /* Allocate memory for header and transport buffer */ + ret = -ENOMEM; + hdr = kmalloc(params.hdr_len, GFP_KERNEL); + if (!hdr) + goto e_unpin; + + trans_data = get_trans_data_from_mempool(params.trans_len); + if (!trans_data) + goto e_free_hdr; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans_data); + data->trans_len = params.trans_len; + + /* The SEND_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_SEND_UPDATE_DATA, data, 0); + if (ret) + goto e_free; + + /* + * Create item to save page info and pointer, which will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_free; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans_data; + item->data_vaddr = (uintptr_t)data; + item->hdr_uaddr = params.hdr_uaddr; + item->trans_uaddr = params.trans_uaddr; + item->hdr_len = params.hdr_len; + item->trans_len = params.trans_len; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); +e_unpin: + sev_unpin_memory(kvm, guest_page, n); + +finish: + return ret; +} + +static int +sev_send_update_data_copy_to_user(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos) +{ + int i, ret = 0; + + for (i = 0; i < ringbuf_infos->num; i++) { + struct csv_ringbuf_info_item *item = ringbuf_infos->item[i]; + + /* copy transport buffer to user space */ + if (copy_to_user((void __user *)item->trans_uaddr, + (void *)item->trans_vaddr, item->trans_len)) { + ret = -EFAULT; + break; + } + + /* Copy packet header to userspace. */ + if (copy_to_user((void __user *)item->hdr_uaddr, + (void *)item->hdr_vaddr, item->hdr_len)) { + ret = -EFAULT; + break; + } + } + + return ret; +} + +static int +sev_receive_update_data_to_ringbuf(struct kvm *kvm, + int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos) +{ + struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; + struct kvm_sev_receive_update_data params; + struct sev_data_receive_update_data *data; + struct csv_ringbuf_info_item *item; + void *hdr = NULL, *trans = NULL; + struct page **guest_page; + unsigned long n; + int ret, offset; + + if (!sev_guest(kvm)) + return -EINVAL; + + if (copy_from_user(¶ms, (void __user *)data_ptr, + sizeof(struct kvm_sev_receive_update_data))) + return -EFAULT; + + if (!params.hdr_uaddr || !params.hdr_len || + !params.guest_uaddr || !params.guest_len || + !params.trans_uaddr || !params.trans_len) + return -EINVAL; + + /* Check if we are crossing the page boundary */ + offset = params.guest_uaddr & (PAGE_SIZE - 1); + if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE) + return -EINVAL; + + hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len); + if (IS_ERR(hdr)) + return PTR_ERR(hdr); + + ret = -ENOMEM; + trans = get_trans_data_from_mempool(params.trans_len); + if (!trans) + goto e_free_hdr; + + if (copy_from_user(trans, (void __user *)params.trans_uaddr, + params.trans_len)) { + ret = -EFAULT; + goto e_free_hdr; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto e_free_hdr; + + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; + data->trans_address = __psp_pa(trans); + data->trans_len = params.trans_len; + + /* Pin guest memory */ + guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK, + PAGE_SIZE, &n, 1); + if (IS_ERR(guest_page)) { + ret = PTR_ERR(guest_page); + goto e_free; + } + + /* + * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP + * encrypts the written data with the guest's key, and the cache may + * contain dirty, unencrypted data. + */ + sev_clflush_pages(guest_page, n); + + /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */ + data->guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + + offset; + data->guest_address |= sev_me_mask; + data->guest_len = params.guest_len; + data->handle = sev->handle; + + ret = csv_fill_cmd_queue(prio, SEV_CMD_RECEIVE_UPDATE_DATA, data, 0); + + if (ret) + goto e_unpin; + + /* + * Create item to save page info and pointer, whitch will be freed + * in function csv_command_batch because it will be used after PSP + * return for copy_to_user. + */ + item = kzalloc(sizeof(*item), GFP_KERNEL); + if (!item) { + ret = -ENOMEM; + goto e_unpin; + } + + item->pages = guest_page; + item->n = n; + item->hdr_vaddr = (uintptr_t)hdr; + item->trans_vaddr = (uintptr_t)trans; + item->data_vaddr = (uintptr_t)data; + + ringbuf_infos->item[ringbuf_infos->num] = item; + ringbuf_infos->num++; + + /* copy to ring buffer success, data freed after commands completed */ + goto finish; + +e_unpin: + sev_unpin_memory(kvm, guest_page, n); +e_free: + kfree(data); +e_free_hdr: + kfree(hdr); + +finish: + return ret; +} + +typedef int (*csv_ringbuf_input_fn)(struct kvm *kvm, int prio, + uintptr_t data_ptr, + struct csv_ringbuf_infos *ringbuf_infos); +typedef int (*csv_ringbuf_output_fn)(struct kvm *kvm, + struct csv_ringbuf_infos *ringbuf_infos); + +static int get_cmd_helpers(__u32 cmd, + csv_ringbuf_input_fn *to_ringbuf_fn, + csv_ringbuf_output_fn *to_user_fn) +{ + int ret = 0; + + /* copy commands to ring buffer*/ + switch (cmd) { + case KVM_SEV_SEND_UPDATE_DATA: + *to_ringbuf_fn = sev_send_update_data_to_ringbuf; + *to_user_fn = sev_send_update_data_copy_to_user; + break; + case KVM_SEV_RECEIVE_UPDATE_DATA: + *to_ringbuf_fn = sev_receive_update_data_to_ringbuf; + *to_user_fn = NULL; + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static int csv_command_batch(struct kvm *kvm, struct kvm_sev_cmd *argp) +{ + int ret; + struct kvm_csv_command_batch params; + uintptr_t node_addr; + struct csv_ringbuf_infos *ringbuf_infos; + csv_ringbuf_input_fn csv_cmd_to_ringbuf_fn = NULL; + csv_ringbuf_output_fn csv_copy_to_user_fn = NULL; + int prio = CSV_COMMAND_PRIORITY_HIGH; + + if (!sev_guest(kvm)) + return -ENOTTY; + + if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, + sizeof(struct kvm_csv_command_batch))) + return -EFAULT; + + /* return directly if node list is NULL */ + if (!params.csv_batch_list_uaddr) + return 0; + + /* ring buffer init */ + if (csv_ring_buffer_queue_init()) + return -EINVAL; + + if (get_cmd_helpers(params.command_id, + &csv_cmd_to_ringbuf_fn, &csv_copy_to_user_fn)) { + ret = -EINVAL; + goto err_free_ring_buffer; + } + + ringbuf_infos = kzalloc(sizeof(*ringbuf_infos), GFP_KERNEL); + if (!ringbuf_infos) { + ret = -ENOMEM; + goto err_free_ring_buffer; + } + + node_addr = (uintptr_t)params.csv_batch_list_uaddr; + while (node_addr) { + struct kvm_csv_batch_list_node node; + + if (copy_from_user(&node, (void __user *)node_addr, + sizeof(struct kvm_csv_batch_list_node))) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + if (ringbuf_infos->num > SVM_RING_BUFFER_MAX) { + pr_err("%s: ring num is too large:%d, cmd:0x%x\n", + __func__, ringbuf_infos->num, params.command_id); + + ret = -EINVAL; + goto err_free_ring_buffer_infos_items; + } + + if (csv_cmd_to_ringbuf_fn(kvm, prio, + (uintptr_t)node.cmd_data_addr, + ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + + /* 1st half set to HIGH queue, 2nd half set to LOW queue */ + if (ringbuf_infos->num == SVM_RING_BUFFER_MAX / 2) + prio = CSV_COMMAND_PRIORITY_LOW; + + node_addr = node.next_cmd_addr; + } + + /* ring buffer process */ + ret = csv_issue_ringbuf_cmds(kvm, &argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + ret = csv_check_stat_queue_status(&argp->error); + if (ret) + goto err_free_ring_buffer_infos_items; + + if (csv_copy_to_user_fn && csv_copy_to_user_fn(kvm, ringbuf_infos)) { + ret = -EFAULT; + goto err_free_ring_buffer_infos_items; + } + +err_free_ring_buffer_infos_items: + csv_ringbuf_infos_free(kvm, ringbuf_infos); + kfree(ringbuf_infos); + reset_mempool_offset(); + +err_free_ring_buffer: + csv_ring_buffer_queue_free(); + + return ret; +} + +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa) +{ + if (kvm_vcpu_map(&svm->vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) { + /* Unable to map GHCB from guest */ + vcpu_unimpl(&svm->vcpu, "Missing GHCB [%#llx] from guest\n", + ghcb_gpa); + + svm->sev_es.receiver_ghcb_map_fail = true; + return -EINVAL; + } + + svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva; + svm->sev_es.receiver_ghcb_map_fail = false; + + pr_info("Mapping GHCB [%#llx] from guest at recipient\n", ghcb_gpa); + + return 0; +} + +int csv_control_pre_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + vcpu->arch.guest_state_protected = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} + +int csv_control_post_system_reset(struct kvm *kvm) +{ + struct kvm_vcpu *vcpu; + unsigned long i; + int ret; + + /* Flush both host and guest caches before next boot flow */ + wbinvd_on_all_cpus(); + + if (!sev_es_guest(kvm)) + return 0; + + kvm_for_each_vcpu(i, vcpu, kvm) { + struct vcpu_svm *svm = to_svm(vcpu); + + ret = mutex_lock_killable(&vcpu->mutex); + if (ret) + return ret; + + memcpy(svm->sev_es.vmsa, svm->sev_es.reset_vmsa, PAGE_SIZE); + + /* Flush encrypted vmsa to memory */ + clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE); + + svm->vcpu.arch.guest_state_protected = true; + svm->sev_es.received_first_sipi = false; + + mutex_unlock(&vcpu->mutex); + } + + return 0; +} diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 77f1eeefcd34bf2292dd66a557861bc050fd1e30..e50e7fe1a86ca8eedb41c00b305ea7ff00342bb3 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -48,6 +48,7 @@ #include "svm.h" #include "svm_ops.h" +#include "csv.h" #include "kvm_onhyperv.h" #include "svm_onhyperv.h" @@ -103,6 +104,7 @@ static const struct svm_direct_access_msrs { { .index = MSR_IA32_LASTBRANCHTOIP, .always = false }, { .index = MSR_IA32_LASTINTFROMIP, .always = false }, { .index = MSR_IA32_LASTINTTOIP, .always = false }, + { .index = MSR_IA32_XSS, .always = false }, { .index = MSR_EFER, .always = false }, { .index = MSR_IA32_CR_PAT, .always = false }, { .index = MSR_AMD64_SEV_ES_GHCB, .always = true }, @@ -545,7 +547,10 @@ static bool __kvm_is_svm_supported(void) } if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) { - pr_info("KVM is unsupported when running as an SEV guest\n"); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + pr_info("KVM is unsupported when running as an CSV guest\n"); + else + pr_info("KVM is unsupported when running as an SEV guest\n"); return false; } @@ -1432,6 +1437,7 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) struct vcpu_svm *svm; struct page *vmcb01_page; struct page *vmsa_page = NULL; + struct page *reset_vmsa_page = NULL; int err; BUILD_BUG_ON(offsetof(struct vcpu_svm, vcpu) != 0); @@ -1451,6 +1457,10 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (!vmsa_page) goto error_free_vmcb_page; + reset_vmsa_page = alloc_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO); + if (!reset_vmsa_page) + goto error_free_vmsa_page; + /* * SEV-ES guests maintain an encrypted version of their FPU * state which is restored and saved on VMRUN and VMEXIT. @@ -1479,6 +1489,9 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) if (vmsa_page) svm->sev_es.vmsa = page_address(vmsa_page); + if (reset_vmsa_page) + svm->sev_es.reset_vmsa = page_address(reset_vmsa_page); + svm->guest_state_loaded = false; return 0; @@ -1486,6 +1499,8 @@ static int svm_vcpu_create(struct kvm_vcpu *vcpu) error_free_vmsa_page: if (vmsa_page) __free_page(vmsa_page); + if (reset_vmsa_page) + __free_page(reset_vmsa_page); error_free_vmcb_page: __free_page(vmcb01_page); out: @@ -2943,6 +2958,31 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DE_CFG: msr_info->data = svm->msr_decfg; break; + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr_info->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + msr_info->data = svm->vmcb->control.ghcb_gpa; + + /* Only set status bits when using GHCB page protocol */ + if (msr_info->data && + !is_ghcb_msr_protocol(msr_info->data)) { + if (svm->sev_es.ghcb) + msr_info->data |= GHCB_MSR_MAPPED_MASK; + + if (svm->sev_es.received_first_sipi) + msr_info->data |= + GHCB_MSR_RECEIVED_FIRST_SIPI_MASK; + } + break; + } + return 1; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -3178,6 +3218,47 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) svm->msr_decfg = data; break; } + case MSR_AMD64_SEV_ES_GHCB: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* + * Only support userspace get/set from/to + * vmcb.control.ghcb_gpa + */ + if (!msr->host_initiated || + !sev_es_guest(svm->vcpu.kvm)) + return 1; + + /* + * Value 0 means uninitialized userspace MSR data, + * userspace need get the initial MSR data afterwards. + */ + if (!data) + return 0; + + /* Extract status info when using GHCB page protocol */ + if (!is_ghcb_msr_protocol(data)) { + if (!svm->sev_es.ghcb && + (data & GHCB_MSR_MAPPED_MASK)) { + /* + * This happened on recipient of migration, + * should return error if cannot map the + * ghcb page. + */ + if (sev_es_ghcb_map(to_svm(vcpu), + data & ~GHCB_MSR_KVM_STATUS_MASK)) + return 1; + } + + if (data & GHCB_MSR_RECEIVED_FIRST_SIPI_MASK) + svm->sev_es.received_first_sipi = true; + + data &= ~GHCB_MSR_KVM_STATUS_MASK; + } + + svm->vmcb->control.ghcb_gpa = data; + break; + } + return 1; default: return kvm_set_msr_common(vcpu, msr); } @@ -4133,6 +4214,19 @@ static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) trace_kvm_entry(vcpu); + /* + * For receipient side of CSV2 guest, fake the exit code as + * SVM_EXIT_ERR and return directly if failed to mapping + * the necessary GHCB page. When handling the exit code + * afterwards, it can exit to userspace and stop the guest. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + sev_es_guest(vcpu->kvm) && + svm->sev_es.receiver_ghcb_map_fail) { + svm->vmcb->control.exit_code = SVM_EXIT_ERR; + return EXIT_FASTPATH_NONE; + } + svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; @@ -4307,6 +4401,15 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index) if (kvm && sev_es_guest(kvm)) return false; break; + case MSR_AMD64_SEV_ES_GHCB: + /* + * Only CSV2 guests support to export this MSR, this should + * be determined after KVM_CREATE_VM. + */ + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + (kvm && !sev_es_guest(kvm))) + return false; + break; default: break; } @@ -5039,6 +5142,10 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .vcpu_deliver_sipi_vector = svm_vcpu_deliver_sipi_vector, .vcpu_get_apicv_inhibit_reasons = avic_vcpu_get_apicv_inhibit_reasons, + + .vm_attestation = sev_vm_attestation, + .control_pre_system_reset = csv_control_pre_system_reset, + .control_post_system_reset = csv_control_post_system_reset, }; /* @@ -5349,6 +5456,9 @@ static int __init svm_init(void) if (!kvm_is_svm_supported()) return -EOPNOTSUPP; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_init(&svm_x86_ops); + r = kvm_x86_vendor_init(&svm_init_ops); if (r) return r; diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h index be67ab7fdd104e33f1ecc960e94d23e9385acf73..0461fbbe103dd1a00f155176e1aa9b581057665d 100644 --- a/arch/x86/kvm/svm/svm.h +++ b/arch/x86/kvm/svm/svm.h @@ -30,7 +30,7 @@ #define IOPM_SIZE PAGE_SIZE * 3 #define MSRPM_SIZE PAGE_SIZE * 2 -#define MAX_DIRECT_ACCESS_MSRS 46 +#define MAX_DIRECT_ACCESS_MSRS 47 #define MSRPM_OFFSETS 32 extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly; extern bool npt_enabled; @@ -202,6 +202,11 @@ struct vcpu_sev_es_state { u32 ghcb_sa_len; bool ghcb_sa_sync; bool ghcb_sa_free; + + /* CSV2 migrated ghcb mapping state support */ + bool receiver_ghcb_map_fail; + /* CSV2 reboot vmsa */ + struct vmcb_save_area *reset_vmsa; }; struct vcpu_svm { @@ -665,6 +670,44 @@ void avic_refresh_virtual_apic_mode(struct kvm_vcpu *vcpu); #define GHCB_VERSION_MAX 1ULL #define GHCB_VERSION_MIN 1ULL +/* + * CSV2 live migration support: + * If MSR_AMD64_SEV_ES_GHCB in migration didn't apply GHCB MSR protocol, + * reuse bits [52-63] to indicate vcpu status. The following status are + * currently included: + * * ghcb_map: indicate whether GHCB page was mapped. The mapped GHCB + * page may be filled with GPRs before VMRUN, so we must + * remap GHCB page on the recipient's side. + * * received_first_sipi: indicate AP's INIT-SIPI-SIPI stage. Reuse + * these bits for received_first_sipi is acceptable cause + * runtime stage of guest's linux only applies GHCB page + * protocol. + * It's unlikely that the migration encounter other stages + * of guest's linux. Once encountered, AP bringup may fail + * which will not impact user payload. + * Otherbits keep their's original meaning. (See GHCB Spec 2.3.1 for detail) + */ +#define GHCB_MSR_KVM_STATUS_POS 52 +#define GHCB_MSR_KVM_STATUS_BITS 12 +#define GHCB_MSR_KVM_STATUS_MASK \ + ((BIT_ULL(GHCB_MSR_KVM_STATUS_BITS) - 1) \ + << GHCB_MSR_KVM_STATUS_POS) +#define GHCB_MSR_MAPPED_POS 63 +#define GHCB_MSR_MAPPED_BITS 1 +#define GHCB_MSR_MAPPED_MASK \ + ((BIT_ULL(GHCB_MSR_MAPPED_BITS) - 1) \ + << GHCB_MSR_MAPPED_POS) +#define GHCB_MSR_RECEIVED_FIRST_SIPI_POS 62 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_BITS 1 +#define GHCB_MSR_RECEIVED_FIRST_SIPI_MASK \ + ((BIT_ULL(GHCB_MSR_RECEIVED_FIRST_SIPI_BITS) - 1) \ + << GHCB_MSR_RECEIVED_FIRST_SIPI_POS) + + +static inline bool is_ghcb_msr_protocol(u64 ghcb_val) +{ + return ghcb_val & GHCB_MSR_INFO_MASK; +} extern unsigned int max_sev_asid; @@ -693,6 +736,12 @@ void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector); void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa); void sev_es_unmap_ghcb(struct vcpu_svm *svm); +int sev_vm_attestation(struct kvm *kvm, unsigned long gpa, unsigned long len); +int sev_es_ghcb_map(struct vcpu_svm *svm, u64 ghcb_gpa); + +int csv_control_pre_system_reset(struct kvm *kvm); +int csv_control_post_system_reset(struct kvm *kvm); + /* vmenter.S */ void __svm_sev_es_vcpu_run(struct vcpu_svm *svm, bool spec_ctrl_intercepted); diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h index 41a4533f9989748c9226255e6aa7c043ced40ecc..631e65a212285073924d90df28ea1ee981efe747 100644 --- a/arch/x86/kvm/vmx/capabilities.h +++ b/arch/x86/kvm/vmx/capabilities.h @@ -60,6 +60,7 @@ struct vmcs_config { u32 pin_based_exec_ctrl; u32 cpu_based_exec_ctrl; u32 cpu_based_2nd_exec_ctrl; + u32 zx_cpu_based_3rd_exec_ctrl; u64 cpu_based_3rd_exec_ctrl; u32 vmexit_ctrl; u32 vmentry_ctrl; @@ -255,6 +256,12 @@ static inline bool cpu_has_vmx_xsaves(void) SECONDARY_EXEC_ENABLE_XSAVES; } +static inline bool cpu_has_vmx_zxpause(void) +{ + return vmcs_config.zx_cpu_based_3rd_exec_ctrl & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool cpu_has_vmx_waitpkg(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index c5ec0ef51ff78fa3baae19092d12057c07476394..4ba46e1b29d2b23f13adf8773cef07dc9e74e696 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -1085,7 +1085,7 @@ static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept, bool reload_pdptrs, enum vm_entry_failure_code *entry_failure_code) { - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) { + if (CC(!kvm_vcpu_is_legal_cr3(vcpu, cr3))) { *entry_failure_code = ENTRY_FAIL_DEFAULT; return -EINVAL; } @@ -2717,7 +2717,7 @@ static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp) } /* Reserved bits should not be set */ - if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) + if (CC(!kvm_vcpu_is_legal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f))) return false; /* AD, if set, should be supported */ @@ -2912,7 +2912,7 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || - CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3))) + CC(!kvm_vcpu_is_legal_cr3(vcpu, vmcs12->host_cr3))) return -EINVAL; if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) || @@ -4980,6 +4980,7 @@ int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification, else *ret = off; + *ret = vmx_get_untagged_addr(vcpu, *ret, 0); /* Long mode: #GP(0)/#SS(0) if the memory address is in a * non-canonical form. This is the only check on the memory * destination for long mode! @@ -5797,6 +5798,10 @@ static int handle_invvpid(struct kvm_vcpu *vcpu) vpid02 = nested_get_vpid02(vcpu); switch (type) { case VMX_VPID_EXTENT_INDIVIDUAL_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if (!operand.vpid || is_noncanonical_address(operand.gla, vcpu)) return nested_vmx_fail(vcpu, diff --git a/arch/x86/kvm/vmx/sgx.c b/arch/x86/kvm/vmx/sgx.c index 3e822e58249753c5ff267b5ce4f69e2ede9a00c9..6fef01e0536e5079c34e6efc6b64716119c10c4e 100644 --- a/arch/x86/kvm/vmx/sgx.c +++ b/arch/x86/kvm/vmx/sgx.c @@ -37,6 +37,7 @@ static int sgx_get_encls_gva(struct kvm_vcpu *vcpu, unsigned long offset, if (!IS_ALIGNED(*gva, alignment)) { fault = true; } else if (likely(is_64_bit_mode(vcpu))) { + *gva = vmx_get_untagged_addr(vcpu, *gva, 0); fault = is_noncanonical_address(*gva, vcpu); } else { *gva &= 0xffffffff; diff --git a/arch/x86/kvm/vmx/vmcs.h b/arch/x86/kvm/vmx/vmcs.h index 7c1996b433e262fa0c67d06a7a71baa6304db476..4eabed8e5813af72bac6e9d9377747cdf3a219fa 100644 --- a/arch/x86/kvm/vmx/vmcs.h +++ b/arch/x86/kvm/vmx/vmcs.h @@ -50,7 +50,9 @@ struct vmcs_controls_shadow { u32 pin; u32 exec; u32 secondary_exec; + u32 zx_tertiary_exec; u64 tertiary_exec; + u64 zx_vmexit_tsc; }; /* diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9bba5352582c35615b7c076fb54b4601b8db108e..5a121b16fccd464a4165d9c56a5d346b544ca0cf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -214,6 +214,8 @@ module_param(ple_window_max, uint, 0444); int __read_mostly pt_mode = PT_MODE_SYSTEM; module_param(pt_mode, int, S_IRUGO); +static u32 zx_ext_vmcs_cap; + static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond); static DEFINE_MUTEX(vmx_l1d_flush_mutex); @@ -2006,7 +2008,11 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_UMWAIT_CONTROL: if (!msr_info->host_initiated && !vmx_has_waitpkg(vmx)) return 1; - + msr_info->data = vmx->msr_ia32_umwait_control; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; msr_info->data = vmx->msr_ia32_umwait_control; break; case MSR_IA32_SPEC_CTRL: @@ -2266,7 +2272,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) /* The reserved bit 1 and non-32 bit [63:32] should be zero */ if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) return 1; + vmx->msr_ia32_umwait_control = data; + break; + case MSR_ZX_PAUSE_CONTROL: + if (!msr_info->host_initiated && !vmx_guest_zxpause_enabled(vmx)) + return 1; + /* The reserved bit 1 and non-32 bit [63:32] should be zero */ + if (data & (BIT_ULL(1) | GENMASK_ULL(63, 32))) + return 1; vmx->msr_ia32_umwait_control = data; break; case MSR_IA32_SPEC_CTRL: @@ -2724,6 +2738,10 @@ static int setup_vmcs_config(struct vmcs_config *vmcs_conf, vmcs_conf->vmentry_ctrl = _vmentry_control; vmcs_conf->misc = misc_msr; + /* Setup Zhaoxin exec-cntl3 VMCS field. */ + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) + vmcs_conf->zx_cpu_based_3rd_exec_ctrl |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + #if IS_ENABLED(CONFIG_HYPERV) if (enlightened_vmcs) evmcs_sanitize_exec_ctrls(vmcs_conf); @@ -3400,7 +3418,8 @@ static void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, hpa_t root_hpa, update_guest_cr3 = false; vmx_ept_load_pdptrs(vcpu); } else { - guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu); + guest_cr3 = root_hpa | kvm_get_active_pcid(vcpu) | + kvm_get_active_cr3_lam_bits(vcpu); } if (update_guest_cr3) @@ -4516,6 +4535,28 @@ static u64 vmx_tertiary_exec_control(struct vcpu_vmx *vmx) return exec_control; } +static u32 vmx_zx_tertiary_exec_control(struct vcpu_vmx *vmx) +{ + struct kvm_vcpu *vcpu = &vmx->vcpu; + u32 exec_control = vmcs_config.zx_cpu_based_3rd_exec_ctrl; + + /* + * Show errors if Qemu wants to enable guest_zxpause while + * vmx not support it. + */ + if (guest_cpuid_has(vcpu, X86_FEATURE_ZXPAUSE)) { + if (!cpu_has_vmx_zxpause()) + pr_err("VMX not support guest_zxpause!\n"); + else + exec_control |= ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + } else + exec_control &= ~ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; + + /* enable other features here */ + + return exec_control; +} + /* * Adjust a single secondary execution control bit to intercept/allow an * instruction in the guest. This is usually done based on whether or not a @@ -4722,6 +4763,11 @@ static void init_vmcs(struct vcpu_vmx *vmx) if (cpu_has_secondary_exec_ctrls()) secondary_exec_controls_set(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (cpu_has_tertiary_exec_ctrls()) tertiary_exec_controls_set(vmx, vmx_tertiary_exec_control(vmx)); @@ -5782,7 +5828,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * would also use advanced VM-exit information for EPT violations to * reconstruct the page fault error code. */ - if (unlikely(allow_smaller_maxphyaddr && kvm_vcpu_is_illegal_gpa(vcpu, gpa))) + if (unlikely(allow_smaller_maxphyaddr && !kvm_vcpu_is_legal_gpa(vcpu, gpa))) return kvm_emulate_instruction(vcpu, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); @@ -6251,6 +6297,13 @@ void dump_vmcs(struct kvm_vcpu *vcpu) else tertiary_exec_control = 0; + pr_err("*** Zhaoxin Specific Fields ***\n"); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + pr_err("Zhaoxin TertiaryExec Cntl = 0x%016x\n", + vmcs_read32(ZX_TERTIARY_VM_EXEC_CONTROL)); + pr_err("ZXPAUSE Saved TSC = 0x%016llx\n", vmcs_read64(ZXPAUSE_VMEXIT_TSC)); + } + pr_err("VMCS %p, last attempted VM-entry on CPU %d\n", vmx->loaded_vmcs->vmcs, vcpu->arch.last_vmentry_cpu); pr_err("*** Guest State ***\n"); @@ -7007,6 +7060,7 @@ static bool vmx_has_emulated_msr(struct kvm *kvm, u32 index) return nested; case MSR_AMD64_VIRT_SPEC_CTRL: case MSR_AMD64_TSC_RATIO: + case MSR_AMD64_SEV_ES_GHCB: /* This is AMD only. */ return false; default: @@ -7677,6 +7731,9 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu) cr4_fixed1_update(X86_CR4_UMIP, ecx, feature_bit(UMIP)); cr4_fixed1_update(X86_CR4_LA57, ecx, feature_bit(LA57)); + entry = kvm_find_cpuid_entry_index(vcpu, 0x7, 1); + cr4_fixed1_update(X86_CR4_LAM_SUP, eax, feature_bit(LAM)); + #undef cr4_fixed1_update } @@ -7763,6 +7820,7 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_XSAVES); kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_VMX); + kvm_governed_feature_check_and_set(vcpu, X86_FEATURE_LAM); vmx_setup_uret_msrs(vmx); @@ -7770,6 +7828,11 @@ static void vmx_vcpu_after_set_cpuid(struct kvm_vcpu *vcpu) vmcs_set_secondary_exec_control(vmx, vmx_secondary_exec_control(vmx)); + if (zx_ext_vmcs_cap & MSR_ZX_VMCS_EXEC_CTL3) { + zx_tertiary_exec_controls_set(vmx, vmx_zx_tertiary_exec_control(vmx)); + zx_vmexit_tsc_controls_set(vmx, 0); + } + if (guest_can_use(vcpu, X86_FEATURE_VMX)) vmx->msr_ia32_feature_control_valid_bits |= FEAT_CTL_VMX_ENABLED_INSIDE_SMX | @@ -7900,6 +7963,9 @@ static __init void vmx_set_cpu_caps(void) if (cpu_has_vmx_waitpkg()) kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG); + + if (cpu_has_vmx_zxpause()) + kvm_cpu_cap_check_and_set(X86_FEATURE_ZXPAUSE); } static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu) @@ -8209,6 +8275,50 @@ static void vmx_vm_destroy(struct kvm *kvm) free_pages((unsigned long)kvm_vmx->pid_table, vmx_get_pid_table_order(kvm)); } +/* + * Note, the SDM states that the linear address is masked *after* the modified + * canonicality check, whereas KVM masks (untags) the address and then performs + * a "normal" canonicality check. Functionally, the two methods are identical, + * and when the masking occurs relative to the canonicality check isn't visible + * to software, i.e. KVM's behavior doesn't violate the SDM. + */ +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags) +{ + int lam_bit; + unsigned long cr3_bits; + + if (flags & (X86EMUL_F_FETCH | X86EMUL_F_IMPLICIT | X86EMUL_F_INVLPG)) + return gva; + + if (!is_64_bit_mode(vcpu)) + return gva; + + /* + * Bit 63 determines if the address should be treated as user address + * or a supervisor address. + */ + if (!(gva & BIT_ULL(63))) { + cr3_bits = kvm_get_active_cr3_lam_bits(vcpu); + if (!(cr3_bits & (X86_CR3_LAM_U57 | X86_CR3_LAM_U48))) + return gva; + + /* LAM_U48 is ignored if LAM_U57 is set. */ + lam_bit = cr3_bits & X86_CR3_LAM_U57 ? 56 : 47; + } else { + if (!kvm_is_cr4_bit_set(vcpu, X86_CR4_LAM_SUP)) + return gva; + + lam_bit = kvm_is_cr4_bit_set(vcpu, X86_CR4_LA57) ? 56 : 47; + } + + /* + * Untag the address by sign-extending the lam_bit, but NOT to bit 63. + * Bit 63 is retained from the raw virtual address so that untagging + * doesn't change a user access to a supervisor access, and vice versa. + */ + return (sign_extend64(gva, lam_bit) & ~BIT_ULL(63)) | (gva & BIT_ULL(63)); +} + static struct kvm_x86_ops vmx_x86_ops __initdata = { .name = KBUILD_MODNAME, @@ -8349,6 +8459,8 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .complete_emulated_msr = kvm_complete_insn_gp, .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector, + + .get_untagged_addr = vmx_get_untagged_addr, }; static unsigned int vmx_handle_intel_pt_intr(void) @@ -8423,6 +8535,12 @@ static __init int hardware_setup(void) unsigned long host_bndcfgs; struct desc_ptr dt; int r; + u32 ign; + + /* Caches Zhaoxin extend VMCS capabilities. */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) + rdmsr_safe(MSR_ZX_EXT_VMCS_CAPS, &zx_ext_vmcs_cap, &ign); store_idt(&dt); host_idt_base = dt.address; diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index c2130d2c8e24bb5ff3a529a4bde67f875376adda..f566b9984f1940a5803c7ecf579be0534dbed5cb 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -420,6 +420,8 @@ void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type); u64 vmx_get_l2_tsc_offset(struct kvm_vcpu *vcpu); u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu); +gva_t vmx_get_untagged_addr(struct kvm_vcpu *vcpu, gva_t gva, unsigned int flags); + static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type, bool value) { @@ -578,6 +580,17 @@ static inline u8 vmx_get_rvi(void) #define KVM_OPTIONAL_VMX_TERTIARY_VM_EXEC_CONTROL \ (TERTIARY_EXEC_IPI_VIRT) +#define KVM_REQUIRED_VMX_ZX_TERTIARY_VM_EXEC_CONTROL 0 +#define KVM_OPTIONAL_VMX_ZX_TERTIARY_VM_EXEC_CONTROL \ + (ZX_TERTIARY_EXEC_GUEST_ZXPAUSE) + +/* + * We shouldn't rw zxpause_vmexit_tsc vmcs field in this + * way, try to use another way in the future. + */ +#define KVM_REQUIRED_VMX_ZXPAUSE_VMEXIT_TSC 0 +#define KVM_OPTIONAL_VMX_ZXPAUSE_VMEXIT_TSC 1 + #define BUILD_CONTROLS_SHADOW(lname, uname, bits) \ static inline void lname##_controls_set(struct vcpu_vmx *vmx, u##bits val) \ { \ @@ -610,6 +623,8 @@ BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL, 32) BUILD_CONTROLS_SHADOW(tertiary_exec, TERTIARY_VM_EXEC_CONTROL, 64) +BUILD_CONTROLS_SHADOW(zx_tertiary_exec, ZX_TERTIARY_VM_EXEC_CONTROL, 32) +BUILD_CONTROLS_SHADOW(zx_vmexit_tsc, ZXPAUSE_VMEXIT_TSC, 64) /* * VMX_REGS_LAZY_LOAD_SET - The set of registers that will be updated in the @@ -712,6 +727,12 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE; } +static inline bool vmx_guest_zxpause_enabled(struct vcpu_vmx *vmx) +{ + return zx_tertiary_exec_controls_get(vmx) & + ZX_TERTIARY_EXEC_GUEST_ZXPAUSE; +} + static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { if (!enable_ept) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e179db7c17dad1429808afb6153f620f9c64196e..4e804ec85ea74376eca3520032f43db5cb3dc681 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1284,7 +1284,7 @@ int kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) * stuff CR3, e.g. for RSM emulation, and there is no guarantee that * the current vCPU mode is accurate. */ - if (kvm_vcpu_is_illegal_gpa(vcpu, cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, cr3)) return 1; if (is_pae_paging(vcpu) && !load_pdptrs(vcpu, cr3)) @@ -1462,8 +1462,8 @@ static const u32 msrs_to_save_base[] = { MSR_IA32_RTIT_ADDR2_A, MSR_IA32_RTIT_ADDR2_B, MSR_IA32_RTIT_ADDR3_A, MSR_IA32_RTIT_ADDR3_B, MSR_IA32_UMWAIT_CONTROL, - MSR_IA32_XFD, MSR_IA32_XFD_ERR, + MSR_ZX_PAUSE_CONTROL, }; static const u32 msrs_to_save_pmu[] = { @@ -1564,6 +1564,8 @@ static const u32 emulated_msrs_all[] = { MSR_K7_HWCR, MSR_KVM_POLL_CONTROL, + + MSR_AMD64_SEV_ES_GHCB, }; static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)]; @@ -1701,22 +1703,17 @@ static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) struct kvm_msr_entry msr; int r; + /* Unconditionally clear the output for simplicity */ + msr.data = 0; msr.index = index; r = kvm_get_msr_feature(&msr); - if (r == KVM_MSR_RET_INVALID) { - /* Unconditionally clear the output for simplicity */ - *data = 0; - if (kvm_msr_ignored_check(index, 0, false)) - r = 0; - } - - if (r) - return r; + if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false)) + r = 0; *data = msr.data; - return 0; + return r; } static bool __kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) @@ -4633,6 +4630,17 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_X86_NOTIFY_VMEXIT: r = kvm_caps.has_notify_vmexit; break; + case KVM_CAP_SEV_ES_GHCB: + r = 0; + + /* Both CSV2 and SEV-ES guests support MSR_AMD64_SEV_ES_GHCB, + * but only CSV2 guest support export to emulate + * MSR_AMD64_SEV_ES_GHCB. + */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + r = static_call(kvm_x86_has_emulated_msr)(kvm, + MSR_AMD64_SEV_ES_GHCB); + break; default: break; } @@ -7101,6 +7109,20 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) r = kvm_vm_ioctl_set_msr_filter(kvm, &filter); break; } + case KVM_CONTROL_PRE_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_pre_system_reset) + r = static_call(kvm_x86_control_pre_system_reset)(kvm); + else + r = -ENOTTY; + break; + case KVM_CONTROL_POST_SYSTEM_RESET: + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + kvm_x86_ops.control_post_system_reset) + r = static_call(kvm_x86_control_post_system_reset)(kvm); + else + r = -ENOTTY; + break; default: r = -ENOTTY; } @@ -7145,6 +7167,10 @@ static void kvm_probe_msr_to_save(u32 msr_index) if (!kvm_cpu_cap_has(X86_FEATURE_WAITPKG)) return; break; + case MSR_ZX_PAUSE_CONTROL: + if (!kvm_cpu_cap_has(X86_FEATURE_ZXPAUSE)) + return; + break; case MSR_IA32_RTIT_CTL: case MSR_IA32_RTIT_STATUS: if (!kvm_cpu_cap_has(X86_FEATURE_INTEL_PT)) @@ -8321,6 +8347,15 @@ static void emulator_vm_bugged(struct x86_emulate_ctxt *ctxt) kvm_vm_bugged(kvm); } +static gva_t emulator_get_untagged_addr(struct x86_emulate_ctxt *ctxt, + gva_t addr, unsigned int flags) +{ + if (!kvm_x86_ops.get_untagged_addr) + return addr; + + return static_call(kvm_x86_get_untagged_addr)(emul_to_vcpu(ctxt), addr, flags); +} + static const struct x86_emulate_ops emulate_ops = { .vm_bugged = emulator_vm_bugged, .read_gpr = emulator_read_gpr, @@ -8365,6 +8400,7 @@ static const struct x86_emulate_ops emulate_ops = { .leave_smm = emulator_leave_smm, .triple_fault = emulator_triple_fault, .set_xcr = emulator_set_xcr, + .get_untagged_addr = emulator_get_untagged_addr, }; static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) @@ -9809,7 +9845,7 @@ static int complete_hypercall_exit(struct kvm_vcpu *vcpu) { u64 ret = vcpu->run->hypercall.ret; - if (!is_64_bit_mode(vcpu)) + if (!is_64_bit_hypercall(vcpu)) ret = (u32)ret; kvm_rax_write(vcpu, ret); ++vcpu->stat.hypercalls; @@ -9844,7 +9880,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) a3 &= 0xFFFFFFFF; } - if (static_call(kvm_x86_get_cpl)(vcpu) != 0) { + if (static_call(kvm_x86_get_cpl)(vcpu) != 0 && + !(nr == KVM_HC_VM_ATTESTATION || nr == KVM_HC_PSP_OP)) { ret = -KVM_EPERM; goto out; } @@ -9907,6 +9944,14 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) vcpu->arch.complete_userspace_io = complete_hypercall_exit; return 0; } + case KVM_HC_VM_ATTESTATION: + ret = -KVM_ENOSYS; + if (kvm_x86_ops.vm_attestation) + ret = static_call(kvm_x86_vm_attestation)(vcpu->kvm, a0, a1); + break; + case KVM_HC_PSP_OP: + ret = kvm_pv_psp_op(vcpu->kvm, a0, a1, a2, a3); + break; default: ret = -KVM_ENOSYS; break; @@ -11481,7 +11526,7 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) */ if (!(sregs->cr4 & X86_CR4_PAE) || !(sregs->efer & EFER_LMA)) return false; - if (kvm_vcpu_is_illegal_gpa(vcpu, sregs->cr3)) + if (!kvm_vcpu_is_legal_cr3(vcpu, sregs->cr3)) return false; } else { /* @@ -11511,21 +11556,24 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, if (kvm_set_apic_base(vcpu, &apic_base_msr)) return -EINVAL; - if (vcpu->arch.guest_state_protected) + if (vcpu->arch.guest_state_protected && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) return 0; - dt.size = sregs->idt.limit; - dt.address = sregs->idt.base; - static_call(kvm_x86_set_idt)(vcpu, &dt); - dt.size = sregs->gdt.limit; - dt.address = sregs->gdt.base; - static_call(kvm_x86_set_gdt)(vcpu, &dt); + if (!vcpu->arch.guest_state_protected) { + dt.size = sregs->idt.limit; + dt.address = sregs->idt.base; + static_call(kvm_x86_set_idt)(vcpu, &dt); + dt.size = sregs->gdt.limit; + dt.address = sregs->gdt.base; + static_call(kvm_x86_set_gdt)(vcpu, &dt); - vcpu->arch.cr2 = sregs->cr2; - *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; - vcpu->arch.cr3 = sregs->cr3; - kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); - static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + vcpu->arch.cr2 = sregs->cr2; + *mmu_reset_needed |= kvm_read_cr3(vcpu) != sregs->cr3; + vcpu->arch.cr3 = sregs->cr3; + kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3); + static_call_cond(kvm_x86_post_set_cr3)(vcpu, sregs->cr3); + } kvm_set_cr8(vcpu, sregs->cr8); @@ -11539,6 +11587,9 @@ static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, *mmu_reset_needed |= kvm_read_cr4(vcpu) != sregs->cr4; static_call(kvm_x86_set_cr4)(vcpu, sregs->cr4); + if (vcpu->arch.guest_state_protected) + return 0; + if (update_pdptrs) { idx = srcu_read_lock(&vcpu->kvm->srcu); if (is_pae_paging(vcpu)) { @@ -13399,6 +13450,10 @@ int kvm_handle_invpcid(struct kvm_vcpu *vcpu, unsigned long type, gva_t gva) switch (type) { case INVPCID_TYPE_INDIV_ADDR: + /* + * LAM doesn't apply to addresses that are inputs to TLB + * invalidation. + */ if ((!pcid_enabled && (operand.pcid != 0)) || is_noncanonical_address(operand.gla, vcpu)) { kvm_inject_gp(vcpu, 0); diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1e7be1f6ab299d78a76e76385db159dee679220b..53e883721e7167dfe2875412987a5254b3b0c83c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -529,6 +529,8 @@ bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type); __reserved_bits |= X86_CR4_VMXE; \ if (!__cpu_has(__c, X86_FEATURE_PCID)) \ __reserved_bits |= X86_CR4_PCIDE; \ + if (!__cpu_has(__c, X86_FEATURE_LAM)) \ + __reserved_bits |= X86_CR4_LAM_SUP; \ __reserved_bits; \ }) diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c index 0e65d00e2339ff95e022d03ee9012346d2ead8b3..3946badbd78fd7058a871b3d6779766a8f0c606d 100644 --- a/arch/x86/lib/delay.c +++ b/arch/x86/lib/delay.c @@ -117,6 +117,27 @@ static void delay_halt_tpause(u64 start, u64 cycles) __tpause(TPAUSE_C02_STATE, edx, eax); } +/* + * On ZHAOXIN the ZXPAUSE instruction waits until any of: + * 1) the delta of TSC counter exceeds the value provided in EDX:EAX + * 2) global timeout in ZX_PAUSE_CONTROL is exceeded + * 3) an external interrupt occurs + */ +static void delay_halt_zxpause(u64 unused, u64 cycles) +{ + u64 until = cycles; + u32 eax, edx; + + eax = lower_32_bits(until); + edx = upper_32_bits(until); + + /* + * Hard code the deeper (C0.1) sleep state because exit latency is + * small compared to the "microseconds" that usleep() will delay. + */ + __zxpause(ZXPAUSE_C01_STATE, edx, eax); +} + /* * On some AMD platforms, MWAITX has a configurable 32-bit timer, that * counts with TSC frequency. The input value is the number of TSC cycles @@ -183,6 +204,12 @@ void __init use_tpause_delay(void) delay_fn = delay_halt; } +void __init use_zxpause_delay(void) +{ + delay_halt_fn = delay_halt_zxpause; + delay_fn = delay_halt; +} + void use_mwaitx_delay(void) { delay_halt_fn = delay_halt_mwaitx; diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index c80febc44cd2feed47bf3c419accb98b12c1e38b..166a0934d3e46fb5643a88b132b0f660095a3ef0 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -67,3 +67,5 @@ obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_amd.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o + +obj-$(CONFIG_HYGON_CSV) += csv.o diff --git a/arch/x86/mm/csv.c b/arch/x86/mm/csv.c new file mode 100644 index 0000000000000000000000000000000000000000..09f2cb7b358abf419ef6e10ac6919b482a46ffd4 --- /dev/null +++ b/arch/x86/mm/csv.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hygon China Secure Virtualization (CSV) + * + * Copyright (C) Hygon Info Technologies Ltd. + * + * Author: Jiang Xin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef pr_fmt +#define pr_fmt(fmt) "CSV-CMA: " fmt + +#define NUM_SMR_ENTRIES (8 * 1024) +#define CSV_CMA_SHIFT PUD_SHIFT +#define CSV_CMA_SIZE (1 << CSV_CMA_SHIFT) +#define MIN_SMR_ENTRY_SHIFT 23 +#define CSV_SMR_INFO_SIZE (nr_node_ids * sizeof(struct csv_mem)) + +/* 0 percent of total memory by default*/ +static unsigned char csv_mem_percentage; +static unsigned long csv_mem_size; + +static int __init cmdline_parse_csv_mem_size(char *str) +{ + unsigned long size; + char *endp; + + if (str) { + size = memparse(str, &endp); + csv_mem_size = size; + if (!csv_mem_size) + csv_mem_percentage = 0; + } + + return 0; +} +early_param("csv_mem_size", cmdline_parse_csv_mem_size); + +static int __init cmdline_parse_csv_mem_percentage(char *str) +{ + unsigned char percentage; + int ret; + + if (!str) + return 0; + + ret = kstrtou8(str, 10, &percentage); + if (!ret) { + csv_mem_percentage = min_t(unsigned char, percentage, 80); + if (csv_mem_percentage != percentage) + pr_warn("csv_mem_percentage is limited to 80.\n"); + } else { + /* Disable CSV CMA. */ + csv_mem_percentage = 0; + pr_err("csv_mem_percentage is invalid. (0 - 80) is expected.\n"); + } + + return ret; +} +early_param("csv_mem_percentage", cmdline_parse_csv_mem_percentage); + +struct csv_mem *csv_smr; +EXPORT_SYMBOL_GPL(csv_smr); + +unsigned int csv_smr_num; +EXPORT_SYMBOL_GPL(csv_smr_num); + +struct csv_cma { + int fast; + struct cma *cma; +}; + +struct cma_array { + unsigned long count; + struct csv_cma csv_cma[]; +}; + +static unsigned int smr_entry_shift; +static struct cma_array *csv_contiguous_pernuma_area[MAX_NUMNODES]; + +static void csv_set_smr_entry_shift(unsigned int shift) +{ + smr_entry_shift = max_t(unsigned int, shift, MIN_SMR_ENTRY_SHIFT); + pr_info("SMR entry size is 0x%x\n", 1 << smr_entry_shift); +} + +unsigned int csv_get_smr_entry_shift(void) +{ + return smr_entry_shift; +} +EXPORT_SYMBOL_GPL(csv_get_smr_entry_shift); + +static unsigned long __init present_pages_in_node(int nid) +{ + unsigned long range_start_pfn, range_end_pfn; + unsigned long nr_present = 0; + int i; + + for_each_mem_pfn_range(i, nid, &range_start_pfn, &range_end_pfn, NULL) + nr_present += range_end_pfn - range_start_pfn; + + return nr_present; +} + +static phys_addr_t __init csv_early_percent_memory_on_node(int nid) +{ + return (present_pages_in_node(nid) * csv_mem_percentage / 100) << PAGE_SHIFT; +} + +static void __init csv_cma_reserve_mem(void) +{ + int node, i; + unsigned long size; + int idx = 0; + int count; + int cma_array_size; + unsigned long max_spanned_size = 0; + + csv_smr = memblock_alloc_node(CSV_SMR_INFO_SIZE, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!csv_smr) { + pr_err("Fail to allocate csv_smr\n"); + return; + } + + for_each_node_state(node, N_ONLINE) { + int ret; + char name[CMA_MAX_NAME]; + struct cma_array *array; + unsigned long spanned_size; + unsigned long start = 0, end = 0; + struct csv_cma *csv_cma; + + size = csv_early_percent_memory_on_node(node); + count = DIV_ROUND_UP(size, 1 << CSV_CMA_SHIFT); + if (!count) + continue; + + cma_array_size = count * sizeof(*csv_cma) + sizeof(*array); + array = memblock_alloc_node(cma_array_size, SMP_CACHE_BYTES, NUMA_NO_NODE); + if (!array) { + pr_err("Fail to allocate cma_array\n"); + continue; + } + + array->count = 0; + csv_contiguous_pernuma_area[node] = array; + + for (i = 0; i < count; i++) { + csv_cma = &array->csv_cma[i]; + csv_cma->fast = 1; + snprintf(name, sizeof(name), "csv-n%dc%d", node, i); + ret = cma_declare_contiguous_nid(0, CSV_CMA_SIZE, 0, + 1 << CSV_MR_ALIGN_BITS, PMD_SHIFT - PAGE_SHIFT, + false, name, &(csv_cma->cma), node); + if (ret) { + pr_warn("Fail to reserve memory size 0x%x node %d\n", + 1 << CSV_CMA_SHIFT, node); + break; + } + cma_enable_concurrency(csv_cma->cma); + + if (start > cma_get_base(csv_cma->cma) || !start) + start = cma_get_base(csv_cma->cma); + + if (end < cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma)) + end = cma_get_base(csv_cma->cma) + cma_get_size(csv_cma->cma); + } + + if (!i) + continue; + + array->count = i; + spanned_size = end - start; + if (spanned_size > max_spanned_size) + max_spanned_size = spanned_size; + + csv_smr[idx].start = start; + csv_smr[idx].size = end - start; + idx++; + + pr_info("Node %d - reserve size 0x%016lx, (expected size 0x%016lx)\n", + node, (unsigned long)i * CSV_CMA_SIZE, size); + } + + csv_smr_num = idx; + WARN_ON((max_spanned_size / NUM_SMR_ENTRIES) < 1); + if (likely((max_spanned_size / NUM_SMR_ENTRIES) >= 1)) + csv_set_smr_entry_shift(ilog2(max_spanned_size / NUM_SMR_ENTRIES - 1) + 1); +} + +/* + * Check whether host supports CSV3 in hygon platform. + * Called in the guest, it always returns false. + */ +static bool __init csv3_check_cpu_support(void) +{ + unsigned int eax, ebx, ecx, edx; + unsigned long me_mask; + u64 msr; + bool csv3_enabled; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if (sev_status) + return false; + + /* Check for the SME/CSV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return false; + +#define HYGON_SME_BIT BIT(0) +#define HYGON_CSV3_BIT BIT(30) + /* + * Check for the CSV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - SME support + * - Bit 1 - CSV support + * - Bit 3 - CSV2 support + * - Bit 30 - CSV3 support + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & HYGON_SME_BIT)) + return false; + + csv3_enabled = !!(eax & HYGON_CSV3_BIT); + + me_mask = 1UL << (ebx & 0x3f); + + /* No SME if Hypervisor bit is set */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (ecx & BIT(31)) + return false; + + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_AMD64_SYSCFG); + if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT)) + return false; + + return !!me_mask && csv3_enabled; +} + +#define CSV_CMA_AREAS 2458 + +void __init early_csv_reserve_mem(void) +{ + unsigned long total_pages; + + /* Only reserve memory on the host that enabled CSV3 feature */ + if (!csv3_check_cpu_support()) + return; + + if (cma_alloc_areas(CSV_CMA_AREAS)) + return; + + total_pages = PHYS_PFN(memblock_phys_mem_size()); + if (csv_mem_size) { + if (csv_mem_size < (total_pages << PAGE_SHIFT)) { + csv_mem_percentage = csv_mem_size * 100 / (total_pages << PAGE_SHIFT); + if (csv_mem_percentage > 80) + csv_mem_percentage = 80; /* Maximum percentage */ + } else + csv_mem_percentage = 80; /* Maximum percentage */ + } + + if (!csv_mem_percentage) { + pr_warn("Don't reserve any memory\n"); + return; + } + + csv_cma_reserve_mem(); +} + +phys_addr_t csv_alloc_from_contiguous(size_t size, nodemask_t *nodes_allowed, + unsigned int align) +{ + int nid; + int nr_nodes; + struct page *page = NULL; + phys_addr_t phys_addr; + int count; + struct csv_cma *csv_cma; + int fast = 1; + + if (!nodes_allowed || size > CSV_CMA_SIZE) { + pr_err("Invalid params, size = 0x%lx, nodes_allowed = %p\n", + size, nodes_allowed); + return 0; + } + + align = min_t(unsigned int, align, get_order(CSV_CMA_SIZE)); +retry: + nr_nodes = nodes_weight(*nodes_allowed); + + /* Traverse from current node */ + nid = numa_node_id(); + if (!node_isset(nid, *nodes_allowed)) + nid = next_node_in(nid, *nodes_allowed); + + for (; nr_nodes > 0; nid = next_node_in(nid, *nodes_allowed), nr_nodes--) { + struct cma_array *array = csv_contiguous_pernuma_area[nid]; + + if (!array) + continue; + + count = array->count; + while (count) { + csv_cma = &array->csv_cma[count - 1]; + + /* + * The value check of csv_cma->fast is lockless, but + * that's ok as this don't affect functional correntness + * whatever the value of csv_cma->fast. + */ + if (fast && !csv_cma->fast) { + count--; + continue; + } + page = cma_alloc(csv_cma->cma, PAGE_ALIGN(size) >> PAGE_SHIFT, + align, true); + if (page) { + page->private = (unsigned long)csv_cma; + if (!csv_cma->fast) + csv_cma->fast = 1; + goto success; + } else + csv_cma->fast = 0; + + count--; + } + } + + if (fast) { + fast = 0; + goto retry; + } else { + pr_err("Fail to alloc secure memory(size = 0x%lx)\n", size); + return 0; + } + +success: + phys_addr = page_to_phys(page); + clflush_cache_range(__va(phys_addr), size); + + return phys_addr; +} +EXPORT_SYMBOL_GPL(csv_alloc_from_contiguous); + +void csv_release_to_contiguous(phys_addr_t pa, size_t size) +{ + struct csv_cma *csv_cma; + struct page *page = pfn_to_page(pa >> PAGE_SHIFT); + + WARN_ON(!page); + if (likely(page)) { + csv_cma = (struct csv_cma *)page->private; + WARN_ON(!csv_cma); + if (likely(csv_cma)) { + page->private = 0; + csv_cma->fast = 1; + cma_release(csv_cma->cma, page, PAGE_ALIGN(size) >> PAGE_SHIFT); + } + } +} +EXPORT_SYMBOL_GPL(csv_release_to_contiguous); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 9f27e14e185f339f58010d859c7941b6c8345f3e..054f6113be67b70d6f00cf80fcfce3fc675880c1 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -12,6 +12,7 @@ #include #include #include +#include /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */ bool force_dma_unencrypted(struct device *dev) @@ -39,6 +40,30 @@ bool force_dma_unencrypted(struct device *dev) return false; } +static void print_hygon_cc_feature_info(void) +{ + /* Secure Memory Encryption */ + if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) { + /* + * HYGON SME is mutually exclusive with any of the + * HYGON CSV features below. + */ + pr_info(" HYGON SME"); + return; + } + + /* Secure Encrypted Virtualization */ + if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + pr_info(" HYGON CSV"); + + /* Encrypted Register State */ + if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) + pr_info(" HYGON CSV2"); + + if (csv3_active()) + pr_info(" HYGON CSV3"); +} + static void print_mem_encrypt_feature_info(void) { pr_info("Memory Encryption Features active:"); @@ -48,6 +73,11 @@ static void print_mem_encrypt_feature_info(void) return; } + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + print_hygon_cc_feature_info(); + return; + } + pr_cont(" AMD"); /* Secure Memory Encryption */ diff --git a/arch/x86/mm/mem_encrypt_amd.c b/arch/x86/mm/mem_encrypt_amd.c index 45ff95264a097766ba4ac73aecbb543ea35152f5..de8c557116c7c0570109ec09e894b4bda593fd54 100644 --- a/arch/x86/mm/mem_encrypt_amd.c +++ b/arch/x86/mm/mem_encrypt_amd.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "mm_internal.h" @@ -344,6 +345,14 @@ static bool amd_enc_status_change_finish(unsigned long vaddr, int npages, bool e if (!cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) enc_dec_hypercall(vaddr, npages << PAGE_SHIFT, enc); + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_memory_enc_dec(vaddr, npages, enc); + return true; } @@ -377,6 +386,9 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) */ clflush_cache_range(__va(pa), size); + if (csv3_active()) + goto skip_in_place_enc_dec; + /* Encrypt/decrypt the contents in-place */ if (enc) { sme_early_encrypt(pa, size); @@ -390,6 +402,7 @@ static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) early_snp_set_memory_shared((unsigned long)__va(pa), pa, 1); } +skip_in_place_enc_dec: /* Change the page encryption mask. */ new_pte = pfn_pte(pfn, new_prot); set_pte_atomic(kpte, new_pte); @@ -469,6 +482,15 @@ static int __init early_set_memory_enc_dec(unsigned long vaddr, early_set_mem_enc_dec_hypercall(start, size, enc); out: __flush_tlb_all(); + + /* + * On CSV3, the shared and private page attr changes should be managed + * by secure processor. Private pages live in isolated memory region, + * while shared pages live out of isolated memory region. + */ + if (csv3_active()) + csv_early_memory_enc_dec(vaddr_end - size, size, enc); + return ret; } diff --git a/block/bio.c b/block/bio.c index 816d412c06e9b41ce13c3223a303fe5b6155156e..bc38c3ed161ae3f923aa9fed8cd55f0aed2a51ea 100644 --- a/block/bio.c +++ b/block/bio.c @@ -263,6 +263,12 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table, bio->bi_issue.value = 0; if (bdev) bio_associate_blkg(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + bio->start_time_ns = 0; + bio->io_start_time_ns = 0; + bio->bi_tg_end_io = NULL; + bio->bi_tg_private = NULL; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST bio->bi_iocost_cost = 0; #endif @@ -1366,12 +1372,15 @@ int submit_bio_wait(struct bio *bio) /* Prevent hang_check timer from firing at us during very long I/O */ hang_check = sysctl_hung_task_timeout_secs; + + task_set_wait_res(TASK_WAIT_BIO, bio); if (hang_check) while (!wait_for_completion_io_timeout(&done, hang_check * (HZ/2))) ; else wait_for_completion_io(&done); + task_clear_wait_res(); return blk_status_to_errno(bio->bi_status); } @@ -1599,6 +1608,10 @@ void bio_endio(struct bio *bio) blk_throtl_bio_endio(bio); /* release cgroup info */ bio_uninit(bio); +#ifdef CONFIG_BLK_DEV_THROTTLING + if (bio->bi_tg_end_io) + bio->bi_tg_end_io(bio); +#endif if (bio->bi_end_io) bio->bi_end_io(bio); } diff --git a/block/blk-core.c b/block/blk-core.c index fdf25b8d6e784f9904ee7892277acdb25430f3d3..b44a5f760b6f474849a4b193c11eedcc62c77972 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -403,6 +403,7 @@ struct request_queue *blk_alloc_queue(int node_id) return NULL; q->last_merge = NULL; + q->rq_hang_threshold = BLK_REQ_HANG_THRESHOLD; q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL); if (q->id < 0) @@ -723,6 +724,9 @@ void submit_bio_noacct(struct bio *bio) struct block_device *bdev = bio->bi_bdev; struct request_queue *q = bdev_get_queue(bdev); blk_status_t status = BLK_STS_IOERR; + DEFINE_WAIT(wait); + wait_queue_head_t *wait_head = NULL; + bool throtl; might_sleep(); @@ -796,7 +800,13 @@ void submit_bio_noacct(struct bio *bio) break; } - if (blk_throtl_bio(bio)) + throtl = blk_throtl_bio(bio, &wait_head, &wait); + if (wait_head) { + io_schedule(); + finish_wait(wait_head, &wait); + } + + if (throtl) return; submit_bio_noacct_nocheck(bio); return; diff --git a/block/blk-iocost.c b/block/blk-iocost.c index 089fcb9cfce37011f4cf6ec1f86ba36853fed381..2e20a07ca6b42bef2300a50cb2b2ef9c606603ac 100644 --- a/block/blk-iocost.c +++ b/block/blk-iocost.c @@ -3490,6 +3490,36 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input, return ret; } +static u64 ioc_stat_prfill(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct blkcg_gq *blkg = pd->blkg; + const char *dname = blkg_dev_name(blkg); + struct ioc_gq *iocg = blkg_to_iocg(blkg); + struct ioc *ioc = iocg->ioc; + + if (!dname) + return 0; + + seq_printf(sf, "%s is_active=%d active=%u inuse=%u " + "hweight_active=%u hweight_inuse=%u vrate=%llu\n", + dname, !list_empty(&iocg->active_list), + iocg->active, iocg->inuse, + iocg->hweight_active, iocg->hweight_inuse, + (unsigned long long)atomic64_read(&ioc->vtime_rate)); + + return 0; +} + +static int ioc_cost_print_stat(struct seq_file *sf, void *v) +{ + struct blkcg *blkcg = css_to_blkcg(seq_css(sf)); + + blkcg_print_blkgs(sf, blkcg, ioc_stat_prfill, + &blkcg_policy_iocost, seq_cft(sf)->private, false); + return 0; +} + static struct cftype ioc_files[] = { { .name = "weight", @@ -3512,8 +3542,36 @@ static struct cftype ioc_files[] = { {} }; +static struct cftype ioc_legacy_files[] = { + { + .name = "cost.weight", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_weight_show, + .write = ioc_weight_write, + }, + { + .name = "cost.qos", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_qos_show, + .write = ioc_qos_write, + }, + { + .name = "cost.model", + .flags = CFTYPE_ONLY_ON_ROOT, + .seq_show = ioc_cost_model_show, + .write = ioc_cost_model_write, + }, + { + .name = "cost.stat", + .flags = CFTYPE_NOT_ON_ROOT, + .seq_show = ioc_cost_print_stat, + }, + {} +}; + static struct blkcg_policy blkcg_policy_iocost = { .dfl_cftypes = ioc_files, + .legacy_cftypes = ioc_legacy_files, .cpd_alloc_fn = ioc_cpd_alloc, .cpd_free_fn = ioc_cpd_free, .pd_alloc_fn = ioc_pd_alloc, diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c index c3b5930106b288bec70850a90f88f2fdc1ed5a38..271535f56bd2b39e582c44abea55929a925ffe21 100644 --- a/block/blk-mq-debugfs.c +++ b/block/blk-mq-debugfs.c @@ -155,12 +155,47 @@ static ssize_t queue_state_write(void *data, const char __user *buf, return count; } +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq); + +static bool blk_mq_check_rq_hang(struct request *rq, void *priv) +{ + struct seq_file *m = priv; + u64 now = ktime_get_ns(); + u64 duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration < rq->q->rq_hang_threshold) + return true; + + /* See comments in blk_mq_check_expired() */ + if (!req_ref_inc_not_zero(rq)) + return true; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if (duration >= rq->q->rq_hang_threshold) + blk_mq_debugfs_rq_hang_show(m, rq); + + blk_mq_put_rq_ref(rq); + + return true; + +} + +static int queue_rq_hang_show(void *data, struct seq_file *m) +{ + struct request_queue *q = data; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_rq_hang, m); + return 0; +} + static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = { { "poll_stat", 0400, queue_poll_stat_show }, { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops }, { "pm_only", 0600, queue_pm_only_show, NULL }, { "state", 0600, queue_state_show, queue_state_write }, { "zone_wlock", 0400, queue_zone_wlock_show, NULL }, + { "rq_hang", 0400, queue_rq_hang_show, NULL }, { }, }; @@ -310,6 +345,52 @@ int blk_mq_debugfs_rq_show(struct seq_file *m, void *v) } EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show); +static void blk_mq_debugfs_rq_hang_show(struct seq_file *m, struct request *rq) +{ + const struct blk_mq_ops *const mq_ops = rq->q->mq_ops; + const unsigned int op = req_op(rq); + const char *op_str = blk_op_str(op); + struct bio *bio; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + seq_printf(m, "%p {.op=", rq); + if (strcmp(op_str, "UNKNOWN") == 0) + seq_printf(m, "%u", op); + else + seq_printf(m, "%s", op_str); + seq_puts(m, ", .cmd_flags="); + blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name, + ARRAY_SIZE(cmd_flag_name)); + seq_puts(m, ", .rq_flags="); + blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name, + ARRAY_SIZE(rqf_name)); + seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq))); + seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag, + rq->internal_tag); + seq_printf(m, ", .start_time_ns=%llu", rq->start_time_ns); + seq_printf(m, ", .io_start_time_ns=%llu", rq->io_start_time_ns); + seq_printf(m, ", .current_time=%llu", ktime_get_ns()); + + __rq_for_each_bio(bio, rq) { + seq_printf(m, ", .bio = %px", bio); + seq_printf(m, ", .sector = %llu, .len=%u", + bio->bi_iter.bi_sector, bio->bi_iter.bi_size); + seq_puts(m, ", .bio_pages = { "); + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + if (!page) + continue; + seq_printf(m, "%px ", page); + } + seq_puts(m, "}"); + } + if (mq_ops->show_rq) + mq_ops->show_rq(m, rq); + seq_puts(m, "}\n"); +} + static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos) __acquires(&hctx->lock) { diff --git a/block/blk-mq.c b/block/blk-mq.c index 6ab7f360ff2ac711bdadb9b6492dc80cb90759fa..f2bbc11e5b4f71835d5742426a5d41de3db87465 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -89,6 +89,11 @@ struct mq_inflight { unsigned int inflight[2]; }; +struct mq_hang { + struct block_device *part; + unsigned int hang[2]; +}; + static bool blk_mq_check_inflight(struct request *rq, void *priv) { struct mq_inflight *mi = priv; @@ -121,6 +126,29 @@ void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, inflight[1] = mi.inflight[1]; } +static bool blk_mq_check_hang(struct request *rq, void *priv) +{ + struct mq_hang *mh = priv; + u64 now = ktime_get_ns(), duration; + + duration = div_u64(now - rq->start_time_ns, NSEC_PER_MSEC); + if ((duration >= rq->q->rq_hang_threshold) && + (!mh->part->bd_partno || rq->part == mh->part)) + mh->hang[rq_data_dir(rq)]++; + + return true; +} + +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]) +{ + struct mq_hang mh = { .part = part }; + + blk_mq_queue_tag_busy_iter(q, blk_mq_check_hang, &mh); + hang[0] = mh.hang[0]; + hang[1] = mh.hang[1]; +} + void blk_freeze_queue_start(struct request_queue *q) { mutex_lock(&q->mq_freeze_lock); @@ -994,6 +1022,10 @@ static inline void blk_account_io_done(struct request *req, u64 now) update_io_ticks(req->part, jiffies, true); part_stat_inc(req->part, ios[sgrp]); part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns); + if (req->rq_flags & RQF_STATS) { + part_stat_add(req->part, d2c_nsecs[sgrp], + now - req->io_start_time_ns); + } part_stat_unlock(); } } diff --git a/block/blk-mq.h b/block/blk-mq.h index 1743857e0b01d9c94406d74aa923433d86852dd5..986d6bcbeefa18d3b834995bff7c0e7bd856f431 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -240,6 +240,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct block_device *part); void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part, unsigned int inflight[2]); +void blk_mq_hang_rw(struct request_queue *q, struct block_device *part, + unsigned int hang[2]); static inline void blk_mq_put_dispatch_budget(struct request_queue *q, int budget_token) diff --git a/block/blk-settings.c b/block/blk-settings.c index 0046b447268f912a1b587c6367adb88548571feb..2ad219fad5b4afb0840f33c0d7d1e7f9ae4c969c 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -25,6 +25,13 @@ void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) } EXPORT_SYMBOL_GPL(blk_queue_rq_timeout); +void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold) +{ + q->rq_hang_threshold = hang_threshold; +} +EXPORT_SYMBOL_GPL(blk_queue_rq_hang_threshold); + /** * blk_set_default_limits - reset limits to default values * @lim: the queue_limits structure to reset diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 63e4812623361ddde759809c4e04b715aa871e43..f852ce8b40a446d3975763475fc9bc42e47ce388 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -438,6 +438,26 @@ static ssize_t queue_io_timeout_store(struct request_queue *q, const char *page, return count; } +static ssize_t queue_hang_threshold_show(struct request_queue *q, char *page) +{ + return sprintf(page, "%u\n", q->rq_hang_threshold); +} + +static ssize_t queue_hang_threshold_store(struct request_queue *q, const char *page, + size_t count) +{ + unsigned int hang_threshold; + int err; + + err = kstrtou32(page, 10, &hang_threshold); + if (err || hang_threshold == 0) + return -EINVAL; + + blk_queue_rq_hang_threshold(q, hang_threshold); + + return count; +} + static ssize_t queue_wc_show(struct request_queue *q, char *page) { if (test_bit(QUEUE_FLAG_WC, &q->queue_flags)) @@ -527,6 +547,7 @@ QUEUE_RO_ENTRY(queue_dax, "dax"); QUEUE_RW_ENTRY(queue_io_timeout, "io_timeout"); QUEUE_RO_ENTRY(queue_virt_boundary_mask, "virt_boundary_mask"); QUEUE_RO_ENTRY(queue_dma_alignment, "dma_alignment"); +QUEUE_RW_ENTRY(queue_hang_threshold, "hang_threshold"); #ifdef CONFIG_BLK_DEV_THROTTLING_LOW QUEUE_RW_ENTRY(blk_throtl_sample_time, "throttle_sample_time"); @@ -656,6 +677,7 @@ static struct attribute *queue_attrs[] = { #endif &queue_virt_boundary_mask_entry.attr, &queue_dma_alignment_entry.attr, + &queue_hang_threshold_entry.attr, NULL, }; diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 13e4377a8b2865f527019830074e0febbd7a766e..bb2cef7672b2ae828165bc8cecd8486bb58582b7 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -331,6 +331,10 @@ static void throtl_service_queue_init(struct throtl_service_queue *sq) { INIT_LIST_HEAD(&sq->queued[READ]); INIT_LIST_HEAD(&sq->queued[WRITE]); + sq->nr_queued_bytes[READ] = 0; + sq->nr_queued_bytes[WRITE] = 0; + init_waitqueue_head(&sq->wait[READ]); + init_waitqueue_head(&sq->wait[WRITE]); sq->pending_tree = RB_ROOT_CACHED; timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); } @@ -345,11 +349,14 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, if (!tg) return NULL; - if (blkg_rwstat_init(&tg->stat_bytes, gfp)) - goto err_free_tg; - - if (blkg_rwstat_init(&tg->stat_ios, gfp)) - goto err_exit_stat_bytes; + if (blkg_rwstat_init(&tg->stat_bytes, gfp) || + blkg_rwstat_init(&tg->stat_ios, gfp) || + blkg_rwstat_init(&tg->service_time, gfp) || + blkg_rwstat_init(&tg->wait_time, gfp) || + blkg_rwstat_init(&tg->completed, gfp) || + blkg_rwstat_init(&tg->total_bytes_queued, gfp) || + blkg_rwstat_init(&tg->total_io_queued, gfp)) + goto err; throtl_service_queue_init(&tg->service_queue); @@ -376,9 +383,14 @@ static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, return &tg->pd; -err_exit_stat_bytes: +err: blkg_rwstat_exit(&tg->stat_bytes); -err_free_tg: + blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); return NULL; } @@ -405,7 +417,9 @@ static void throtl_pd_init(struct blkg_policy_data *pd) * regardless of the position of the group in the hierarchy. */ sq->parent_sq = &td->service_queue; - if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) + + /* Enable hierarchical throttling even on traditional hierarchy */ + if (blkg->parent) sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue; tg->td = td; } @@ -474,6 +488,8 @@ static void throtl_upgrade_state(struct throtl_data *td); static void throtl_pd_offline(struct blkg_policy_data *pd) { struct throtl_grp *tg = pd_to_tg(pd); + struct blkcg_gq *blkg = pd_to_blkg(pd); + struct blkcg_gq *parent = blkg->parent; tg->bps[READ][LIMIT_LOW] = 0; tg->bps[WRITE][LIMIT_LOW] = 0; @@ -484,6 +500,18 @@ static void throtl_pd_offline(struct blkg_policy_data *pd) if (!tg->td->limit_valid[tg->td->limit_index]) throtl_upgrade_state(tg->td); + if (parent) { + blkg_rwstat_add_aux(&blkg_to_tg(parent)->service_time, + &tg->service_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->wait_time, + &tg->wait_time); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->completed, + &tg->completed); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_bytes_queued, + &tg->total_bytes_queued); + blkg_rwstat_add_aux(&blkg_to_tg(parent)->total_io_queued, + &tg->total_io_queued); + } } static void throtl_pd_free(struct blkg_policy_data *pd) @@ -493,9 +521,25 @@ static void throtl_pd_free(struct blkg_policy_data *pd) del_timer_sync(&tg->service_queue.pending_timer); blkg_rwstat_exit(&tg->stat_bytes); blkg_rwstat_exit(&tg->stat_ios); + blkg_rwstat_exit(&tg->service_time); + blkg_rwstat_exit(&tg->wait_time); + blkg_rwstat_exit(&tg->completed); + blkg_rwstat_exit(&tg->total_bytes_queued); + blkg_rwstat_exit(&tg->total_io_queued); kfree(tg); } +static void throtl_pd_reset(struct blkg_policy_data *pd) +{ + struct throtl_grp *tg = pd_to_tg(pd); + + blkg_rwstat_reset(&tg->service_time); + blkg_rwstat_reset(&tg->wait_time); + blkg_rwstat_reset(&tg->completed); + blkg_rwstat_reset(&tg->total_bytes_queued); + blkg_rwstat_reset(&tg->total_io_queued); +} + static struct throtl_grp * throtl_rb_first(struct throtl_service_queue *parent_sq) { @@ -958,6 +1002,65 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, return false; } +static void throtl_stats_update_completion(struct throtl_grp *tg, + uint64_t start_time, + uint64_t io_start_time, + int op) +{ + unsigned long flags; + uint64_t now = sched_clock(); + + local_irq_save(flags); + if (time_after64(now, io_start_time)) + blkg_rwstat_add(&tg->service_time, op, now - io_start_time); + if (time_after64(io_start_time, start_time)) + blkg_rwstat_add(&tg->wait_time, op, io_start_time - start_time); + blkg_rwstat_add(&tg->completed, op, 1); + local_irq_restore(flags); +} + +static void throtl_bio_end_io(struct bio *bio) +{ + struct throtl_grp *tg; + + rcu_read_lock(); + /* see comments in throtl_bio_stats_start() */ + if (!bio_ext_flagged(bio, BIO_THROTL_STATED)) + goto out; + + tg = (struct throtl_grp *)bio->bi_tg_private; + if (!tg) + goto out; + + throtl_stats_update_completion(tg, bio_start_time_ns(bio), + bio_io_start_time_ns(bio), + bio_op(bio)); + blkg_put(tg_to_blkg(tg)); + bio_clear_ext_flag(bio, BIO_THROTL_STATED); +out: + rcu_read_unlock(); +} + +static inline void throtl_bio_stats_start(struct bio *bio, struct throtl_grp *tg) +{ + int op = bio_op(bio); + + /* + * It may happen that end_io will be called twice like dm-thin, + * which will save origin end_io first, and call its overwrite + * end_io and then the saved end_io. We use bio flag + * BIO_THROTL_STATED to do only once statistics. + */ + if ((op == REQ_OP_READ || op == REQ_OP_WRITE) && + !bio_ext_flagged(bio, BIO_THROTL_STATED)) { + blkg_get(tg_to_blkg(tg)); + bio_set_ext_flag(bio, BIO_THROTL_STATED); + bio->bi_tg_end_io = throtl_bio_end_io; + bio->bi_tg_private = tg; + bio_set_start_time_ns(bio); + } +} + static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) { bool rw = bio_data_dir(bio); @@ -1003,6 +1106,10 @@ static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, throtl_qnode_add_bio(bio, qn, &sq->queued[rw]); sq->nr_queued[rw]++; + sq->nr_queued_bytes[rw] += throtl_bio_data_size(bio); + blkg_rwstat_add(&tg->total_bytes_queued, bio_op(bio), + throtl_bio_data_size(bio)); + blkg_rwstat_add(&tg->total_io_queued, bio_op(bio), 1); throtl_enqueue_tg(tg); } @@ -1058,6 +1165,15 @@ static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) */ bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put); sq->nr_queued[rw]--; + sq->nr_queued_bytes[rw] -= throtl_bio_data_size(bio); + WARN_ON_ONCE(sq->nr_queued_bytes[rw] < 0); + + if (wq_has_sleeper(&sq->wait[rw])) { + if (sq->nr_queued_bytes[rw] > 0) + wake_up(&sq->wait[rw]); + else + wake_up_all(&sq->wait[rw]); + } throtl_charge_bio(tg, bio); @@ -1484,6 +1600,31 @@ static struct cftype throtl_legacy_files[] = { .private = offsetof(struct throtl_grp, stat_ios), .seq_show = tg_print_rwstat_recursive, }, + { + .name = "throttle.io_service_time", + .private = offsetof(struct throtl_grp, service_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_wait_time", + .private = offsetof(struct throtl_grp, wait_time), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.io_completed", + .private = offsetof(struct throtl_grp, completed), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_bytes_queued", + .private = offsetof(struct throtl_grp, total_bytes_queued), + .seq_show = tg_print_rwstat, + }, + { + .name = "throttle.total_io_queued", + .private = offsetof(struct throtl_grp, total_io_queued), + .seq_show = tg_print_rwstat, + }, { } /* terminate */ }; @@ -1550,6 +1691,56 @@ static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, return 0; } +static u64 tg_prfill_extstat(struct seq_file *sf, struct blkg_policy_data *pd, + int off) +{ + struct throtl_grp *tg = pd_to_tg(pd); + const char *dname = blkg_dev_name(pd->blkg); + char bufs[10][21] = { "0", "0", "0", "0", "0", "0", "0", "0", "0", "0" }; + struct blkg_rwstat_sample tmp = { }; + + if (!dname) + return 0; + + /* read/write IOs wait time */ + blkg_rwstat_read(&tg->wait_time, &tmp); + snprintf(bufs[0], sizeof(bufs[0]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[1], sizeof(bufs[1]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write IOs service time */ + blkg_rwstat_read(&tg->service_time, &tmp); + snprintf(bufs[2], sizeof(bufs[2]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[3], sizeof(bufs[3]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write completed IOs */ + blkg_rwstat_read(&tg->completed, &tmp); + snprintf(bufs[4], sizeof(bufs[4]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[5], sizeof(bufs[5]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued bytes */ + blkg_rwstat_read(&tg->total_bytes_queued, &tmp); + snprintf(bufs[6], sizeof(bufs[6]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[7], sizeof(bufs[7]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + /* read/write queued IOs */ + blkg_rwstat_read(&tg->total_io_queued, &tmp); + snprintf(bufs[8], sizeof(bufs[8]), "%llu", + tmp.cnt[BLKG_RWSTAT_READ]); + snprintf(bufs[9], sizeof(bufs[9]), "%llu", + tmp.cnt[BLKG_RWSTAT_WRITE]); + + seq_printf(sf, "%s rwait=%s wwait=%s rserv=%s wserv=%s rcomp=%s wcomp=%s " + "rbytesq=%s wbytesq=%s riosq=%s wiosq=%s\n", + dname, bufs[0], bufs[1], bufs[2], bufs[3], bufs[4], + bufs[5], bufs[6], bufs[7], bufs[8], bufs[9]); + + return 0; +} + static int tg_print_limit(struct seq_file *sf, void *v) { blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit, @@ -1557,6 +1748,13 @@ static int tg_print_limit(struct seq_file *sf, void *v) return 0; } +static int tg_print_extstat(struct seq_file *sf, void *v) +{ + blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_extstat, + &blkcg_policy_throtl, 0, false); + return 0; +} + static ssize_t tg_set_limit(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { @@ -1693,6 +1891,10 @@ static struct cftype throtl_files[] = { .write = tg_set_limit, .private = LIMIT_MAX, }, + { + .name = "extstat", + .seq_show = tg_print_extstat, + }, { } /* terminate */ }; @@ -1712,6 +1914,7 @@ struct blkcg_policy blkcg_policy_throtl = { .pd_online_fn = throtl_pd_online, .pd_offline_fn = throtl_pd_offline, .pd_free_fn = throtl_pd_free, + .pd_reset_stats_fn = throtl_pd_reset, }; void blk_throtl_cancel_bios(struct gendisk *disk) @@ -2171,7 +2374,8 @@ static void throtl_upgrade_state(struct throtl_data *td) } #endif -bool __blk_throtl_bio(struct bio *bio) +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct blkcg_gq *blkg = bio->bi_blkg; @@ -2184,6 +2388,8 @@ bool __blk_throtl_bio(struct bio *bio) rcu_read_lock(); + throtl_bio_stats_start(bio, tg); + spin_lock_irq(&q->queue_lock); throtl_update_latency_buckets(td); @@ -2253,6 +2459,18 @@ bool __blk_throtl_bio(struct bio *bio) tg->last_low_overflow_time[rw] = jiffies; td->nr_queued[rw]++; + + if (rw == WRITE) { + u64 bps_limit = tg_bps_limit(tg, rw); + + if (bps_limit != U64_MAX && + (wq_has_sleeper(&sq->wait[rw]) || + sq->nr_queued_bytes[rw] > div_u64(bps_limit, 2))) { + *waitq = &sq->wait[rw]; + prepare_to_wait_exclusive(*waitq, wait, TASK_UNINTERRUPTIBLE); + } + } + throtl_add_bio_tg(bio, qn, tg); throttled = true; @@ -2273,6 +2491,8 @@ bool __blk_throtl_bio(struct bio *bio) bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; #endif spin_unlock_irq(&q->queue_lock); + if (!throttled) + bio_set_io_start_time_ns(bio); rcu_read_unlock(); return throttled; diff --git a/block/blk-throttle.h b/block/blk-throttle.h index bffbc9cfc8ab6bac79ad4d3cff04622ce1f852d8..4b5ce538ca5b177ba31ab2ae2497fc21598aaf0b 100644 --- a/block/blk-throttle.h +++ b/block/blk-throttle.h @@ -41,6 +41,8 @@ struct throtl_service_queue { */ struct list_head queued[2]; /* throtl_qnode [READ/WRITE] */ unsigned int nr_queued[2]; /* number of queued bios */ + long nr_queued_bytes[2]; /* number of queued bytes */ + wait_queue_head_t wait[2]; /* * RB tree of active children throtl_grp's, which are sorted by @@ -150,6 +152,16 @@ struct throtl_grp { struct blkg_rwstat stat_bytes; struct blkg_rwstat stat_ios; + /* total time spent on lower layer: scheduler, device and others */ + struct blkg_rwstat service_time; + /* total time spent on block throttle */ + struct blkg_rwstat wait_time; + /* total IOs completed */ + struct blkg_rwstat completed; + /* total bytes throttled */ + struct blkg_rwstat total_bytes_queued; + /* total IOs throttled */ + struct blkg_rwstat total_io_queued; }; extern struct blkcg_policy blkcg_policy_throtl; @@ -171,13 +183,18 @@ static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg) static inline int blk_throtl_init(struct gendisk *disk) { return 0; } static inline void blk_throtl_exit(struct gendisk *disk) { } static inline void blk_throtl_register(struct gendisk *disk) { } -static inline bool blk_throtl_bio(struct bio *bio) { return false; } +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) +{ + return false; +} static inline void blk_throtl_cancel_bios(struct gendisk *disk) { } #else /* CONFIG_BLK_DEV_THROTTLING */ int blk_throtl_init(struct gendisk *disk); void blk_throtl_exit(struct gendisk *disk); void blk_throtl_register(struct gendisk *disk); -bool __blk_throtl_bio(struct bio *bio); +bool __blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait); void blk_throtl_cancel_bios(struct gendisk *disk); static inline bool blk_should_throtl(struct bio *bio) @@ -204,13 +221,14 @@ static inline bool blk_should_throtl(struct bio *bio) return false; } -static inline bool blk_throtl_bio(struct bio *bio) +static inline bool blk_throtl_bio(struct bio *bio, wait_queue_head_t **waitq, + wait_queue_entry_t *wait) { if (!blk_should_throtl(bio)) return false; - return __blk_throtl_bio(bio); + return __blk_throtl_bio(bio, waitq, wait); } #endif /* CONFIG_BLK_DEV_THROTTLING */ diff --git a/block/blk.h b/block/blk.h index 08a358bc0919e2e8f5a53bbca4519ba6ad9d0b15..851a8a1a687d2c4311fac5058f2ca4476b339977 100644 --- a/block/blk.h +++ b/block/blk.h @@ -285,6 +285,8 @@ ssize_t part_stat_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf); ssize_t part_fail_show(struct device *dev, struct device_attribute *attr, char *buf); ssize_t part_fail_store(struct device *dev, struct device_attribute *attr, diff --git a/block/genhd.c b/block/genhd.c index cc32a0c704eb846c2abba312494c7272f4c74520..415f35fdeec9ba6732fe2d8dcf8d7259802eea6b 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -109,6 +109,7 @@ static void part_stat_read_all(struct block_device *part, for (group = 0; group < NR_STAT_GROUPS; group++) { stat->nsecs[group] += ptr->nsecs[group]; + stat->d2c_nsecs[group] += ptr->d2c_nsecs[group]; stat->sectors[group] += ptr->sectors[group]; stat->ios[group] += ptr->ios[group]; stat->merges[group] += ptr->merges[group]; @@ -964,7 +965,8 @@ ssize_t part_stat_show(struct device *dev, "%8lu %8lu %8llu %8u " "%8u %8u %8u " "%8lu %8lu %8llu %8u " - "%8lu %8u" + "%8lu %8u " + "%8u %8u %8u" "\n", stat.ios[STAT_READ], stat.merges[STAT_READ], @@ -986,7 +988,10 @@ ssize_t part_stat_show(struct device *dev, (unsigned long long)stat.sectors[STAT_DISCARD], (unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC), stat.ios[STAT_FLUSH], - (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC)); + (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC)); } ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, @@ -1004,6 +1009,23 @@ ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "%8u %8u\n", inflight[0], inflight[1]); } +ssize_t part_hang_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct block_device *bdev = dev_to_bdev(dev); + struct request_queue *q = bdev_get_queue(bdev); + unsigned int hang[2] = {0, 0}; + + /* + * For now, we only support mq device, since don't find a generic method + * to track reqs in single queue device. + */ + if (queue_is_mq(q)) + blk_mq_hang_rw(q, bdev, hang); + + return sprintf(buf, "%8u %8u\n", hang[0], hang[1]); +} + static ssize_t disk_capability_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1048,6 +1070,7 @@ static DEVICE_ATTR(discard_alignment, 0444, disk_discard_alignment_show, NULL); static DEVICE_ATTR(capability, 0444, disk_capability_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); static DEVICE_ATTR(badblocks, 0644, disk_badblocks_show, disk_badblocks_store); static DEVICE_ATTR(diskseq, 0444, diskseq_show, NULL); @@ -1091,6 +1114,7 @@ static struct attribute *disk_attrs[] = { &dev_attr_capability.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, &dev_attr_badblocks.attr, &dev_attr_events.attr, &dev_attr_events_async.attr, @@ -1257,7 +1281,8 @@ static int diskstats_show(struct seq_file *seqf, void *v) "%lu %lu %lu %u " "%u %u %u " "%lu %lu %lu %u " - "%lu %u" + "%lu %u " + "%u %u %u" "\n", MAJOR(hd->bd_dev), MINOR(hd->bd_dev), hd, stat.ios[STAT_READ], @@ -1284,6 +1309,12 @@ static int diskstats_show(struct seq_file *seqf, void *v) NSEC_PER_MSEC), stat.ios[STAT_FLUSH], (unsigned int)div_u64(stat.nsecs[STAT_FLUSH], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_READ], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_WRITE], + NSEC_PER_MSEC), + (unsigned int)div_u64(stat.d2c_nsecs[STAT_DISCARD], NSEC_PER_MSEC) ); } diff --git a/block/partitions/core.c b/block/partitions/core.c index e137a87f4db0d30b6b5e3ab175ba76dc855b5ec1..b1867d8e61671335cc63e600d813cb706ff51dfe 100644 --- a/block/partitions/core.c +++ b/block/partitions/core.c @@ -208,6 +208,7 @@ static DEVICE_ATTR(alignment_offset, 0444, part_alignment_offset_show, NULL); static DEVICE_ATTR(discard_alignment, 0444, part_discard_alignment_show, NULL); static DEVICE_ATTR(stat, 0444, part_stat_show, NULL); static DEVICE_ATTR(inflight, 0444, part_inflight_show, NULL); +static DEVICE_ATTR(hang, 0444, part_hang_show, NULL); #ifdef CONFIG_FAIL_MAKE_REQUEST static struct device_attribute dev_attr_fail = __ATTR(make-it-fail, 0644, part_fail_show, part_fail_store); @@ -222,6 +223,7 @@ static struct attribute *part_attrs[] = { &dev_attr_discard_alignment.attr, &dev_attr_stat.attr, &dev_attr_inflight.attr, + &dev_attr_hang.attr, #ifdef CONFIG_FAIL_MAKE_REQUEST &dev_attr_fail.attr, #endif diff --git a/drivers/acpi/acpi_apd.c b/drivers/acpi/acpi_apd.c index 80f945cbec8a7cf12502400575a02a455594ed10..791f4b234e02052fb48f7f4919d42f1357ba09db 100644 --- a/drivers/acpi/acpi_apd.c +++ b/drivers/acpi/acpi_apd.c @@ -40,7 +40,8 @@ struct apd_private_data { const struct apd_device_desc *dev_desc; }; -#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || defined(CONFIG_ARM64) +#if defined(CONFIG_X86_AMD_PLATFORM_DEVICE) || \ +defined(CONFIG_ARM64) || defined(CONFIG_SW64) #define APD_ADDR(desc) ((unsigned long)&desc) static int acpi_apd_setup(struct apd_private_data *pdata) @@ -178,6 +179,18 @@ static const struct apd_device_desc hip08_spi_desc = { }; #endif /* CONFIG_ARM64 */ +#ifdef CONFIG_SW64 +static const struct apd_device_desc sunway_i2c_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; + +static const struct apd_device_desc sunway_spi_desc = { + .setup = acpi_apd_setup, + .fixed_clk_rate = 25000000, +}; +#endif + #endif /* @@ -246,6 +259,10 @@ static const struct acpi_device_id acpi_apd_device_ids[] = { { "HISI02A3", APD_ADDR(hip08_lite_i2c_desc) }, { "HISI0173", APD_ADDR(hip08_spi_desc) }, { "NXP0001", APD_ADDR(nxp_i2c_desc) }, +#endif +#ifdef CONFIG_SW64 + { "HISI02A1", APD_ADDR(sunway_i2c_desc) }, + { "HISI0173", APD_ADDR(sunway_spi_desc) }, #endif { } }; diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index 67c2c3b959e1538a645d43bda3449378c0051ed8..448370641d1d5bb1348622b0912ce9b81006df43 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -131,3 +131,7 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus) int apei_osc_setup(void); #endif + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 63ad0541db3817419ddc9d7fc7e51be2182ac229..9aeec2e4c47c76eb7a6c0e0460f9840d6664df8a 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -469,9 +469,62 @@ static void ghes_kick_task_work(struct callback_head *head) gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node, node_len); } +/* + * Tasks can handle task_work: + * + * - All user task: run task work before return to user. + */ +static bool should_add_task_work(struct task_struct *task) +{ + if (task->mm) + return true; + + return false; +} + +/** + * struct mce_task_work - for synchronous RAS event + * + * @twork: callback_head for task work + * @pfn: page frame number of corrupted page + * @flags: fine tune action taken + * + * Structure to pass task work to be handled before + * ret_to_user via task_work_add(). + */ +struct mce_task_work { + struct callback_head twork; + u64 pfn; + int flags; +}; + +static void memory_failure_cb(struct callback_head *twork) +{ + int rc; + struct mce_task_work *twcb = + container_of(twork, struct mce_task_work, twork); + + rc = memory_failure(twcb->pfn, twcb->flags); + kfree(twcb); + + if (!rc) + return; + /* + * -EHWPOISON from memory_failure() means that it already sent SIGBUS + * to the current process with the proper error info, so no need to + * send SIGBUS here again. + */ + if (rc == -EHWPOISON) + return; + + pr_err("Memory error not recovered"); + force_sig(SIGBUS); +} + static bool ghes_do_memory_failure(u64 physical_addr, int flags) { unsigned long pfn; + struct mce_task_work *twcb; if (!IS_ENABLED(CONFIG_ACPI_APEI_MEMORY_FAILURE)) return false; @@ -484,12 +537,25 @@ static bool ghes_do_memory_failure(u64 physical_addr, int flags) return false; } + if (flags == MF_ACTION_REQUIRED && should_add_task_work(current)) { + twcb = kmalloc(sizeof(*twcb), GFP_ATOMIC); + if (!twcb) + return false; + + twcb->pfn = pfn; + twcb->flags = flags; + init_task_work(&twcb->twork, memory_failure_cb); + task_work_add(current, &twcb->twork, TWA_RESUME); + return false; + } + memory_failure_queue(pfn, flags); + return true; } static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, - int sev) + bool sync, int sev) { int flags = -1; int sec_sev = ghes_severity(gdata->error_severity); @@ -503,7 +569,7 @@ static bool ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED)) flags = MF_SOFT_OFFLINE; if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE) - flags = 0; + flags = sync ? MF_ACTION_REQUIRED : 0; if (flags != -1) return ghes_do_memory_failure(mem_err->physical_addr, flags); @@ -657,6 +723,33 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata, schedule_work(&entry->work); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +/* + * Check if the event is synchronous exception by Yitian DDR Raw data + * NOTE: only works for Yitian 710 now + */ +static bool is_sync_event(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + struct yitian_ddr_raw_data *data; + + if (!yitian_estatus_check_header(estatus)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + if (header->type != ERR_TYPE_DDR) + return false; + + data = (struct yitian_ddr_raw_data *)(header + 1); + /* 1 for synchronous exception */ + if (data->ex_type == 1) + return true; + + return false; +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + static bool ghes_do_proc(struct ghes *ghes, const struct acpi_hest_generic_status *estatus) { @@ -666,8 +759,13 @@ static bool ghes_do_proc(struct ghes *ghes, const guid_t *fru_id = &guid_null; char *fru_text = ""; bool queued = false; + bool sync = false; sev = ghes_severity(estatus->error_severity); +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + sync = is_sync_event(estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ apei_estatus_for_each_section(estatus, gdata) { sec_type = (guid_t *)gdata->section_type; sec_sev = ghes_severity(gdata->error_severity); @@ -683,7 +781,7 @@ static bool ghes_do_proc(struct ghes *ghes, atomic_notifier_call_chain(&ghes_report_chain, sev, mem_err); arch_apei_report_mem_error(sev, mem_err); - queued = ghes_handle_memory_failure(gdata, sev); + queued = ghes_handle_memory_failure(gdata, sync, sev); } else if (guid_equal(sec_type, &CPER_SEC_PCIE)) { ghes_handle_aer(gdata); diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig index b3ed6212244c1e5405008355b7d0878252564251..f2fd79f22e7d836b07401eb055725b2632ba624c 100644 --- a/drivers/acpi/arm64/Kconfig +++ b/drivers/acpi/arm64/Kconfig @@ -21,3 +21,6 @@ config ACPI_AGDI config ACPI_APMT bool + +config ACPI_MPAM + bool diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile index 143debc1ba4a9d9dae6147c3c1dfc4408d05c5e9..a55d16c01c50827564233d1cedfbe812253ddd6f 100644 --- a/drivers/acpi/arm64/Makefile +++ b/drivers/acpi/arm64/Makefile @@ -4,4 +4,5 @@ obj-$(CONFIG_ACPI_IORT) += iort.o obj-$(CONFIG_ACPI_GTDT) += gtdt.o obj-$(CONFIG_ACPI_APMT) += apmt.o obj-$(CONFIG_ARM_AMBA) += amba.o +obj-$(CONFIG_ACPI_MPAM) += mpam.o obj-y += dma.o init.o diff --git a/drivers/acpi/arm64/mpam.c b/drivers/acpi/arm64/mpam.c new file mode 100644 index 0000000000000000000000000000000000000000..153ef041abf0ca1e037b91dde21b86a62ba903ba --- /dev/null +++ b/drivers/acpi/arm64/mpam.c @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +/* Parse the MPAM ACPI table feeding the discovered nodes into the driver */ + +#define pr_fmt(fmt) "ACPI MPAM: " fmt + +#include +#include +#include +#include +#include + +#include + +#include + +/* Flags for acpi_table_mpam_msc.*_interrupt_flags */ +#define ACPI_MPAM_MSC_IRQ_MODE_EDGE 1 +#define ACPI_MPAM_MSC_IRQ_TYPE_MASK (3<<1) +#define ACPI_MPAM_MSC_IRQ_TYPE_WIRED 0 +#define ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER (1<<3) +#define ACPI_MPAM_MSC_IRQ_AFFINITY_VALID (1<<4) + +int ddrc_freq; + +/* Use OEM info in MPAM ACPI table to distinguish different machine types */ +struct acpi_mpam_machine_oem_info { + enum mpam_machine_type type; + char signature[ACPI_NAMESEG_SIZE + 1]; + u8 revision; + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static struct acpi_mpam_machine_oem_info acpi_mpam_machines[MPAM_NUM_MACHINE_TYPES] = { + [MPAM_YITIAN710] = { + .signature = "YMPM", + .revision = 0, + .oem_id = "PTG ", + .oem_table_id = "PTG01 ", + .oem_revision = 0, + }, +}; + +static bool frob_irq(struct platform_device *pdev, int intid, u32 flags, + int *irq, u32 processor_container_uid) +{ + int sense; + + if (!intid) + return false; + + /* 0 in this field indicates a wired interrupt */ + if (flags & ACPI_MPAM_MSC_IRQ_TYPE_MASK) + return false; + + if (flags & ACPI_MPAM_MSC_IRQ_MODE_EDGE) + sense = ACPI_EDGE_SENSITIVE; + else + sense = ACPI_LEVEL_SENSITIVE; + + /* + * If the GSI is in the GIC's PPI range, try and create a partitioned + * percpu interrupt. + */ + if (16 <= intid && intid < 32 && processor_container_uid != ~0) { + pr_err_once("Partitioned interrupts not supported\n"); + return false; + } else { + *irq = acpi_register_gsi(&pdev->dev, intid, sense, + ACPI_ACTIVE_HIGH); + } + if (*irq <= 0) { + pr_err_once("Failed to register interrupt 0x%x with ACPI\n", + intid); + return false; + } + + return true; +} + +static void acpi_mpam_parse_irqs(struct platform_device *pdev, + struct acpi_mpam_msc_node *tbl_msc, + struct resource *res, int *res_idx) +{ + u32 flags, aff = ~0; + int irq; + + flags = tbl_msc->overflow_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->overflow_interrupt_affinity; + if (frob_irq(pdev, tbl_msc->overflow_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "overflow"; + + (*res_idx)++; + } + + flags = tbl_msc->error_interrupt_flags; + if (flags & ACPI_MPAM_MSC_IRQ_AFFINITY_VALID && + flags & ACPI_MPAM_MSC_IRQ_AFFINITY_PROCESSOR_CONTAINER) + aff = tbl_msc->error_interrupt_affinity; + else + aff = ~0; + if (frob_irq(pdev, tbl_msc->error_interrupt, flags, &irq, aff)) { + res[*res_idx].start = irq; + res[*res_idx].end = irq; + res[*res_idx].flags = IORESOURCE_IRQ; + res[*res_idx].name = "error"; + + (*res_idx)++; + } +} + +static int acpi_mpam_parse_resource(struct mpam_msc *msc, + struct acpi_mpam_resource_node *res) +{ + u32 cache_id; + int level; + + switch (res->locator_type) { + case ACPI_MPAM_LOCATION_TYPE_PROCESSOR_CACHE: + cache_id = res->locator.cache_locator.cache_reference; + if (mpam_current_machine == MPAM_YITIAN710) { + /* + * YITIAN710's BIOS doesn't support find level from + * cache id. Since it only supports L3 cache, use a + * fixed value, 3. + */ + level = 3; + } else { + level = find_acpi_cache_level_from_id(cache_id); + if (level < 0) { + pr_err_once("Bad level for cache with id %u\n", cache_id); + return level; + } + } + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_CACHE, + level, cache_id); + case ACPI_MPAM_LOCATION_TYPE_MEMORY: + if (mpam_current_machine == MPAM_YITIAN710) + ddrc_freq = res->locator.memory_locator.reserved; + return mpam_ris_create(msc, res->ris_index, MPAM_CLASS_MEMORY, + 255, res->locator.memory_locator.proximity_domain); + default: + /* These get discovered later and treated as unknown */ + return 0; + } +} + +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + int i, err; + struct acpi_mpam_resource_node *resources; + + resources = (struct acpi_mpam_resource_node *)(tbl_msc + 1); + for (i = 0; i < tbl_msc->num_resouce_nodes; i++) { + err = acpi_mpam_parse_resource(msc, &resources[i]); + if (err) + return err; + } + + return 0; +} + +static bool __init parse_msc_pm_link(struct acpi_mpam_msc_node *tbl_msc, + struct platform_device *pdev, + u32 *acpi_id) +{ + bool acpi_id_valid = false; + struct acpi_device *buddy; + char hid[16], uid[16]; + int err; + + memset(&hid, 0, sizeof(hid)); + memcpy(hid, &tbl_msc->hardware_id_linked_device, + sizeof(tbl_msc->hardware_id_linked_device)); + + if (!strcmp(hid, ACPI_PROCESSOR_CONTAINER_HID)) { + *acpi_id = tbl_msc->instance_id_linked_device; + acpi_id_valid = true; + } + + err = snprintf(uid, sizeof(uid), "%u", + tbl_msc->instance_id_linked_device); + if (err < 0 || err >= sizeof(uid)) + return acpi_id_valid; + + buddy = acpi_dev_get_first_match_dev(hid, uid, -1); + if (buddy) { + device_link_add(&pdev->dev, &buddy->dev, DL_FLAG_STATELESS); + } + + return acpi_id_valid; +} + +static int decode_interface_type(struct acpi_mpam_msc_node *tbl_msc, + enum mpam_msc_iface *iface) +{ + switch (tbl_msc->interface_type){ + case 0: + *iface = MPAM_IFACE_MMIO; + return 0; + case 1: + *iface = MPAM_IFACE_PCC; + return 0; + default: + return -EINVAL; + } +} + +static int __init _parse_table(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct property_entry props[4]; /* needs a sentinel */ + struct acpi_mpam_msc_node *tbl_msc; + int next_res, next_prop, err = 0; + struct acpi_device *companion; + struct platform_device *pdev; + enum mpam_msc_iface iface; + struct resource res[3]; + char uid[16]; + u32 acpi_id; + int msc_num = 0; + + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_offset += tbl_msc->length; + + /* + * If any of the reserved fields are set, make no attempt to + * parse the msc structure. This will prevent the driver from + * probing all the MSC, meaning it can't discover the system + * wide supported partid and pmg ranges. This avoids whatever + * this MSC is truncating the partids and creating a screaming + * error interrupt. + */ + if (tbl_msc->reserved || tbl_msc->reserved1 || tbl_msc->reserved2) + continue; + + if (decode_interface_type(tbl_msc, &iface)) + continue; + + next_res = 0; + next_prop = 0; + memset(res, 0, sizeof(res)); + memset(props, 0, sizeof(props)); + + /* + * Use an extra msc_num instead of msc->identifier, since MSC + * nodes with different types in MPAM ACPI table may have the + * same id value. + */ + pdev = platform_device_alloc("mpam_msc", msc_num++); + if (IS_ERR(pdev)) { + err = PTR_ERR(pdev); + break; + } + + if (tbl_msc->length < sizeof(*tbl_msc)) { + err = -EINVAL; + break; + } + + /* Some power management is described in the namespace: */ + err = snprintf(uid, sizeof(uid), "%u", tbl_msc->identifier); + if (err > 0 && err < sizeof(uid)) { + companion = acpi_dev_get_first_match_dev("ARMHAA5C", uid, -1); + if (companion) + ACPI_COMPANION_SET(&pdev->dev, companion); + } + + if (iface == MPAM_IFACE_MMIO) { + res[next_res].name = "MPAM:MSC"; + res[next_res].start = tbl_msc->base_address; + res[next_res].end = tbl_msc->base_address + tbl_msc->mmio_size - 1; + res[next_res].flags = IORESOURCE_MEM; + next_res++; + } else if (iface == MPAM_IFACE_PCC) { + props[next_prop++] = PROPERTY_ENTRY_U32("pcc-channel", + tbl_msc->base_address); + next_prop++; + } + + acpi_mpam_parse_irqs(pdev, tbl_msc, res, &next_res); + err = platform_device_add_resources(pdev, res, next_res); + if (err) + break; + + props[next_prop++] = PROPERTY_ENTRY_U32("arm,not-ready-us", + tbl_msc->max_nrdy_usec); + + /* + * The MSC's CPU affinity is described via its linked power + * management device, but only if it points at a Processor or + * Processor Container. + */ + if (parse_msc_pm_link(tbl_msc, pdev, &acpi_id)) { + props[next_prop++] = PROPERTY_ENTRY_U32("cpu_affinity", + acpi_id); + } + + err = device_create_managed_software_node(&pdev->dev, props, + NULL); + if (err) + break; + + /* Come back later if you want the RIS too */ + err = platform_device_add_data(pdev, tbl_msc, tbl_msc->length); + if (err) + break; + + platform_device_add(pdev); + } + + if (err) + platform_device_put(pdev); + + return err; +} + +static struct acpi_table_header *get_table(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type mtype; + acpi_status status; + + if (acpi_disabled || !mpam_cpus_have_feature()) + return NULL; + + mtype = acpi_mpam_get_machine_type(); + + if (mtype != MPAM_DEFAULT_MACHINE) + status = acpi_get_table(acpi_mpam_machines[mtype].signature, 0, &table); + else + status = acpi_get_table(ACPI_SIG_MPAM, 0, &table); + if (ACPI_FAILURE(status)) + return NULL; + + if (mtype == MPAM_DEFAULT_MACHINE && table->revision != 1) + return NULL; + + /* + * Kunpeng's MPAM ACPI adopts an older version of MPAM ACPI, so + * this MPAM ACPI driver is not suitable for Kunpeng platform. + * Skip it. + */ + if (!strncmp(table->oem_id, "HISI", 4)) { + acpi_put_table(table); + return NULL; + } + + return table; +} + + + +static int __init acpi_mpam_parse(void) +{ + struct acpi_table_header *mpam; + int err; + + mpam = get_table(); + if (!mpam) + return 0; + + err = _parse_table(mpam); + acpi_put_table(mpam); + + return err; +} + +static int _count_msc(struct acpi_table_header *table) +{ + char *table_end, *table_offset = (char *)(table + 1); + struct acpi_mpam_msc_node *tbl_msc; + int ret = 0; + + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + table_end = (char *)table + table->length; + + while (table_offset < table_end) { + if (tbl_msc->length < sizeof(*tbl_msc)) + return -EINVAL; + + ret++; + + table_offset += tbl_msc->length; + tbl_msc = (struct acpi_mpam_msc_node *)table_offset; + } + + return ret; +} + + +int acpi_mpam_count_msc(void) +{ + struct acpi_table_header *mpam; + int ret; + + mpam = get_table(); + if (!mpam) + return 0; + + ret = _count_msc(mpam); + acpi_put_table(mpam); + + return ret; +} + +enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + struct acpi_table_header *table; + enum mpam_machine_type ret; + acpi_status status; + int i; + + ret = MPAM_DEFAULT_MACHINE; + + for (i = MPAM_DEFAULT_MACHINE + 1; i < MPAM_NUM_MACHINE_TYPES; i++) { + status = acpi_get_table(acpi_mpam_machines[i].signature, 0, &table); + if (ACPI_FAILURE(status)) + continue; + + if (!memcmp(acpi_mpam_machines[i].oem_id, table->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(acpi_mpam_machines[i].oem_table_id, table->oem_table_id, + ACPI_OEM_TABLE_ID_SIZE) && + acpi_mpam_machines[i].oem_revision == table->oem_revision) { + ret = i; + } + + acpi_put_table(table); + } + + return ret; +} + +/* + * Call after ACPI devices have been created, which happens behind acpi_scan_init() + * called from subsys_initcall(). PCC requires the mailbox driver, which is + * initialised from postcore_initcall(). + */ +subsys_initcall_sync(acpi_mpam_parse); diff --git a/drivers/acpi/numa/Kconfig b/drivers/acpi/numa/Kconfig index 39b1f34c21dfd7a86ddc360857897a94dc0d2046..67d1f40bfa9f4350e9c902bbc4fba983b969275a 100644 --- a/drivers/acpi/numa/Kconfig +++ b/drivers/acpi/numa/Kconfig @@ -2,7 +2,7 @@ config ACPI_NUMA bool "NUMA support" depends on NUMA - depends on (X86 || IA64 || ARM64 || LOONGARCH) + depends on (X86 || IA64 || ARM64 || LOONGARCH || SW64) default y if IA64 || ARM64 config ACPI_HMAT diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 12f330b0eac01a3653906889b3107c923917acb3..a50a65bd1f30d7d084d36fd6e26d8cddfdc74194 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -206,7 +206,7 @@ int __init srat_disabled(void) return acpi_numa < 0; } -#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_ARM64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) /* * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for * I/O localities since SRAT does not list them. I/O localities are diff --git a/drivers/acpi/pci_mcfg.c b/drivers/acpi/pci_mcfg.c index 860014b89b8ebe80fcfecb6107b56423f15770c0..1dccb26b2b7fa743958e7d97f32bebb920dc15c2 100644 --- a/drivers/acpi/pci_mcfg.c +++ b/drivers/acpi/pci_mcfg.c @@ -182,6 +182,32 @@ static struct mcfg_fixup mcfg_quirks[] = { LOONGSON_ECAM_MCFG("\0", 1), LOONGSON_ECAM_MCFG("LOONGSON", 1), #endif /* LOONGARCH */ + +#ifdef CONFIG_SW64 +#define _SW64_ECAM_QUIRK(rev, seg) \ + { "SUNWAY", "SUNWAY. ", rev, seg, MCFG_BUS_ANY, &sw64_pci_ecam_ops } +#define SW64_ECAM_QUIRK(rev, node) _SW64_ECAM_QUIRK(rev, node * 8 + 0),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 1),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 2),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 3),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 4),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 5),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 6),\ + _SW64_ECAM_QUIRK(rev, node * 8 + 7) + + /** + * According to the address space of sw64, up to 8 nodes supported + * with a maximum of 8 pcie controllers per node + */ + SW64_ECAM_QUIRK(1, 0x00), + SW64_ECAM_QUIRK(1, 0x01), + SW64_ECAM_QUIRK(1, 0x02), + SW64_ECAM_QUIRK(1, 0x03), + SW64_ECAM_QUIRK(1, 0x04), + SW64_ECAM_QUIRK(1, 0x05), + SW64_ECAM_QUIRK(1, 0x06), + SW64_ECAM_QUIRK(1, 0x07), +#endif /* SW64 */ }; static char mcfg_oem_id[ACPI_OEM_ID_SIZE]; diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index a35dd0e41c27043bc0cb6f8783c1bc0280cb1155..56b53d45dadde28a8f4e336d1bb7adb95a741f04 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -21,6 +21,8 @@ #include #include +typedef int (*acpi_pptt_cpu_callback_t)(struct acpi_pptt_processor *, void *); + static struct acpi_subtable_header *fetch_pptt_subtable(struct acpi_table_header *table_hdr, u32 pptt_ref) { @@ -181,9 +183,10 @@ acpi_find_cache_level(struct acpi_table_header *table_hdr, * levels and split cache levels (data/instruction). * @table_hdr: Pointer to the head of the PPTT table * @cpu_node: processor node we wish to count caches for - * @levels: Number of levels if success. + * @levels: Number of levels if success. (*levels) should be initialized by + * the caller with the value to be used as the starting level. * @split_levels: Number of split cache levels (data/instruction) if - * success. Can by NULL. + * success. Can be NULL. * * Given a processor node containing a processing unit, walk into it and count * how many levels exist solely for it, and then walk up each level until we hit @@ -293,6 +296,177 @@ static struct acpi_pptt_processor *acpi_find_processor_node(struct acpi_table_he return NULL; } +/* + * acpi_pptt_find_cache_backwards() - Given a PPTT cache find a processor node + * that points to it. This lets us find a cacheinfo node by fw_token, but + * is totally broken as many processor node may point at the same PPTT + * cache indicating different instances of the cache. (e.g. all the L1 + * caches are the same shape, but they aren't the same cache). + * This only works if you cooked your PPTT table to look like this. + */ +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_subtable_header *entry; + struct acpi_subtable_header *res; + unsigned long table_end; + u32 proc_sz; + int i; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor *); + + /* find the processor structure which points at with this cpuid */ + while ((unsigned long)entry + proc_sz < table_end) { + if (entry->length == 0) { + pr_warn("Invalid zero length subtable\n"); + break; + } + + cpu_node = (struct acpi_pptt_processor *)entry; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + + if (cpu_node->header.type != ACPI_PPTT_TYPE_PROCESSOR) + continue; + + for (i = 0; i < cpu_node->number_of_priv_resources; i++) { + res = acpi_get_pptt_resource(table_hdr, cpu_node, i); + if (&cache->header == res) + return cpu_node; + } + } + + return NULL; +} + +/* parent_node points into the table, but the table isn't provided. */ +static void acpi_pptt_get_child_cpus(struct acpi_pptt_processor *parent_node, + cpumask_t *cpus) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + acpi_status status; + u32 acpi_id; + int cpu; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return; + + for_each_possible_cpu(cpu) { + acpi_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table_hdr, acpi_id); + + while (cpu_node) { + if (cpu_node == parent_node) { + cpumask_set_cpu(cpu, cpus); + break; + } + cpu_node = fetch_pptt_node(table_hdr, cpu_node->parent); + } + } + + acpi_put_table(table_hdr); + + return; +} + +/** + * acpi_pptt_for_each_container() - Iterate over all processor containers + * + * Not all 'Processor' entries in the PPTT are either a CPU or a Processor + * Container, they may exist purely to describe a Private resource. CPUs + * have to be leaves, so a Processor Container is a non-leaf that has the + * 'ACPI Processor ID valid' flag set. + * + * Return: 0 for a complete walk, or the first non-zero value from the callback + * that stopped the walk. + */ +int acpi_pptt_for_each_container(acpi_pptt_cpu_callback_t callback, void *arg) +{ + struct acpi_pptt_processor *cpu_node; + struct acpi_table_header *table_hdr; + struct acpi_subtable_header *entry; + bool leaf_flag, has_leaf_flag = false; + unsigned long table_end; + acpi_status status; + u32 proc_sz; + int ret = 0; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table_hdr); + if (ACPI_FAILURE(status)) + return 0; + + if (table_hdr->revision > 1) + has_leaf_flag = true; + + table_end = (unsigned long)table_hdr + table_hdr->length; + entry = ACPI_ADD_PTR(struct acpi_subtable_header, table_hdr, + sizeof(struct acpi_table_pptt)); + proc_sz = sizeof(struct acpi_pptt_processor); + while ((unsigned long)entry + proc_sz < table_end) { + cpu_node = (struct acpi_pptt_processor *)entry; + if (entry->type == ACPI_PPTT_TYPE_PROCESSOR && + cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID) + { + leaf_flag = cpu_node->flags & ACPI_PPTT_ACPI_LEAF_NODE; + if ((has_leaf_flag && !leaf_flag) || + (!has_leaf_flag && !acpi_pptt_leaf_node(table_hdr, cpu_node))) + { + ret = callback(cpu_node, arg); + if (ret) + break; + } + } + entry = ACPI_ADD_PTR(struct acpi_subtable_header, entry, + entry->length); + } + + acpi_put_table(table_hdr); + + return ret; +} + +struct __cpus_from_container_arg { + u32 acpi_cpu_id; + cpumask_t *cpus; +}; + +static int __cpus_from_container(struct acpi_pptt_processor *container, void *arg) +{ + struct __cpus_from_container_arg *params = arg; + + if (container->acpi_processor_id == params->acpi_cpu_id) + acpi_pptt_get_child_cpus(container, params->cpus); + + return 0; +} + +/** + * acpi_pptt_get_cpus_from_container() - Populate a cpumask with all CPUs in a + * processor containers + * + * Find the specified Processor Container, and fill cpus with all the cpus + * below it. + * + * Return: 0 for a complete walk, or an error if the mask is incomplete. + */ +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus) +{ + struct __cpus_from_container_arg params; + + params.acpi_cpu_id = acpi_cpu_id; + params.cpus = cpus; + + cpumask_clear(cpus); + return acpi_pptt_for_each_container(&__cpus_from_container, ¶ms); +} + static u8 acpi_cache_type(enum cache_type type) { switch (type) { @@ -812,3 +986,215 @@ int find_acpi_cpu_topology_hetero_id(unsigned int cpu) return find_acpi_cpu_topology_tag(cpu, PPTT_ABORT_PACKAGE, ACPI_PPTT_ACPI_IDENTICAL); } + + +/** + * find_acpi_cache_level_from_id() - Get the level of the specified cache + * @cache_id: The id field of the unified cache + * + * Determine the level relative to any CPU for the unified cache identified by + * cache_id. This allows the property to be found even if the CPUs are offline. + * + * The returned level can be used to group unified caches that are peers. + * + * The PPTT table must be rev 3 or later, + * + * If one CPUs L2 is shared with another as L3, this function will return + * and unpredictable value. + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns a value which represents the level of the specified cache. + */ +int find_acpi_cache_level_from_id(u32 cache_id) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each CPU + * to find the level... + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + acpi_put_table(table); + return level; + } + } + } + + acpi_put_table(table); + return -ENOENT; +} + +/** + * acpi_pptt_get_cpumask_from_cache_id() - Get the cpus associated with the + * specified cache + * @cache_id: The id field of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int level, cpu, num_levels; + struct acpi_pptt_cache *cache; + struct acpi_table_header *table; + struct acpi_pptt_cache_v1* cache_v1; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + if (table->revision < 3) { + acpi_put_table(table); + return -ENOENT; + } + + /* + * If we found the cache first, we'd still need to walk from each cpu. + */ + for_each_possible_cpu(cpu) { + + num_levels = 0; + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + break; + acpi_count_levels(table, cpu_node, &num_levels, NULL); + + /* Start at 1 for L1 */ + for (level = 1; level <= num_levels; level++) { + cache = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + level, &cpu_node); + if (!cache) + continue; + + cache_v1 = ACPI_ADD_PTR(struct acpi_pptt_cache_v1, + cache, + sizeof(struct acpi_pptt_cache)); + + if (cache->flags & ACPI_PPTT_CACHE_ID_VALID && + cache_v1->cache_id == cache_id) { + cpumask_set_cpu(cpu, cpus); + } + } + } + + acpi_put_table(table); + return 0; +} + +/** + * acpi_pptt_get_cpumask_from_cache_id_and_level() - Get the cpus associated with the + * cache specified by id and level + * @cache_id: The id field of the unified cache + * @cache_level: The level of the unified cache + * @cpus: Where to buidl the cpumask + * + * Determine which CPUs are below this cache in the PPTT. This allows the property + * to be found even if the CPUs are offline. + * + * The PPTT table must be rev 3 or later, + * + * Return: -ENOENT if the PPTT doesn't exist, or the cache cannot be found. + * Otherwise returns 0 and sets the cpus in the provided cpumask. + */ +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus) +{ + u32 acpi_cpu_id; + acpi_status status; + int cpu; + struct acpi_table_header *table; + struct acpi_pptt_cache *cache_node; + struct acpi_pptt_processor *cpu_node; + + cpumask_clear(cpus); + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return -ENOENT; + } + + /* + * FIXME: Since this function does not actually use the cache id in the + * PPTT table, we downgrade the revision requirement. + */ + if (table->revision < 2) { + acpi_put_table(table); + return -ENOENT; + } + + for_each_possible_cpu(cpu) { + acpi_cpu_id = get_acpi_id_for_cpu(cpu); + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + if (!cpu_node) + continue; + + cache_node = acpi_find_cache_node(table, acpi_cpu_id, + ACPI_PPTT_CACHE_TYPE_UNIFIED, + cache_level, &cpu_node); + + if (!cache_node) + continue; + + cpu_node = acpi_pptt_find_cache_backwards(table, cache_node); + if (cpu_node->acpi_processor_id == cache_id) + cpumask_set_cpu(cpu, cpus); + } + + acpi_put_table(table); + return 0; +} diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 8ab0a82b4da41d2aad9f975db596e6d0a9375e20..94cb47d740c9ce3fd3bccff8a1134a1370b4d472 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c @@ -566,7 +566,7 @@ static const char table_sigs[][ACPI_NAMESEG_SIZE] __initconst = { ACPI_SIG_PSDT, ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, ACPI_SIG_IORT, ACPI_SIG_NFIT, ACPI_SIG_HMAT, ACPI_SIG_PPTT, ACPI_SIG_NHLT, ACPI_SIG_AEST, ACPI_SIG_CEDT, ACPI_SIG_AGDI, - ACPI_SIG_NBFT }; + ACPI_SIG_NBFT, ACPI_SIG_MPAM }; #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header) diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 42b51c9812a0ebab52d2dfd4cb94e563c8312f17..0fd5a5bce3e4f2135185c1e9c8d2fafe5c965f44 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -553,6 +553,15 @@ config SATA_VITESSE If unsure, say N. +config SATA_ZHAOXIN + tristate "ZhaoXin SATA support" + depends on PCI + select SATA_HOST + help + This option enables support for ZhaoXin Serial ATA. + + If unsure, say N. + comment "PATA SFF controllers with BMDMA" config PATA_ALI diff --git a/drivers/ata/Makefile b/drivers/ata/Makefile index 20e6645ab737183d92fec21117286c5e354c272f..4b846692e3656f5a4449c6c0d176f2390e2f389f 100644 --- a/drivers/ata/Makefile +++ b/drivers/ata/Makefile @@ -45,6 +45,7 @@ obj-$(CONFIG_SATA_SIL) += sata_sil.o obj-$(CONFIG_SATA_SIS) += sata_sis.o obj-$(CONFIG_SATA_SVW) += sata_svw.o obj-$(CONFIG_SATA_ULI) += sata_uli.o +obj-$(CONFIG_SATA_ZHAOXIN) += sata_zhaoxin.o obj-$(CONFIG_SATA_VIA) += sata_via.o obj-$(CONFIG_SATA_VITESSE) += sata_vsc.o diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 5686353e442cf41fb27174112617ae51f97fdfee..320281cefd9e138d8b753042660fd36a17ee21e4 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -3371,6 +3371,8 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, struct ata_device **r_failed_dev) { struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL; + struct device *device = ap ? ap->host->dev : NULL; + struct pci_dev *pdev = (!device || !dev_is_pci(device)) ? NULL : to_pci_dev(device); struct ata_eh_context *ehc = &link->eh_context; struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL; enum ata_lpm_policy old_policy = link->lpm_policy; @@ -3379,6 +3381,11 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy, unsigned int err_mask; int rc; + /* if controller does not support lpm, then sets no LPM flags*/ + if ((pdev && pdev->vendor == PCI_VENDOR_ID_ZHAOXIN) && + !(~ap->host->flags & (ATA_HOST_NO_PART | ATA_HOST_NO_SSC | ATA_HOST_NO_DEVSLP))) + link->flags |= ATA_LFLAG_NO_LPM; + /* if the link or host doesn't do LPM, noop */ if (!IS_ENABLED(CONFIG_SATA_HOST) || (link->flags & ATA_LFLAG_NO_LPM) || (ap && !ap->ops->set_lpm)) diff --git a/drivers/ata/sata_zhaoxin.c b/drivers/ata/sata_zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..53c3e2ab60956b85ad7cb42d2db4ea3bfb5ef497 --- /dev/null +++ b/drivers/ata/sata_zhaoxin.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * sata_zhaoxin.c - ZhaoXin Serial ATA controllers + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRV_NAME "sata_zx" +#define DRV_VERSION "2.6.1" + +#define PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL 9002 +#define PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL 9003 + +enum board_ids_enum { + zx100s, +}; + +enum { + SATA_CHAN_ENAB = 0x40, /* SATA channel enable */ + SATA_INT_GATE = 0x41, /* SATA interrupt gating */ + SATA_NATIVE_MODE = 0x42, /* Native mode enable */ + PATA_UDMA_TIMING = 0xB3, /* PATA timing for DMA/ cable detect */ + PATA_PIO_TIMING = 0xAB, /* PATA timing register */ + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + ALL_PORTS = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), + + SATA_EXT_PHY = (1 << 6), /* 0==use PATA, 1==ext phy */ +}; + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val); +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val); +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); + +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); + +static const struct pci_device_id zx_pci_tbl[] = { + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL), zx100s }, + { PCI_VDEVICE(ZHAOXIN, PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL), zx100s }, + + { } /* terminate list */ +}; + +static struct pci_driver zx_pci_driver = { + .name = DRV_NAME, + .id_table = zx_pci_tbl, + .probe = zx_init_one, +#ifdef CONFIG_PM_SLEEP + .suspend = ata_pci_device_suspend, + .resume = ata_pci_device_resume, +#endif + .remove = ata_pci_remove_one, +}; + +static struct scsi_host_template zx_sht = { + ATA_BMDMA_SHT(DRV_NAME), +}; + +static struct ata_port_operations zx_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = zx_tf_load, +}; + +static struct ata_port_operations zx_ops = { + .inherits = &zx_base_ops, + .hardreset = zx_hardreset, + .scr_read = zx_scr_read, + .scr_write = zx_scr_write, +}; + +static struct ata_port_info zx100s_port_info = { + .flags = ATA_FLAG_SATA | ATA_FLAG_SLAVE_POSS, + .pio_mask = ATA_PIO4, + .mwdma_mask = ATA_MWDMA2, + .udma_mask = ATA_UDMA6, + .port_ops = &zx_ops, +}; + + +static int zx_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + int rc; + + rc = sata_std_hardreset(link, class, deadline); + if (!rc || rc == -EAGAIN) { + struct ata_port *ap = link->ap; + int pmp = link->pmp; + int tmprc; + + if (pmp) { + ap->ops->sff_dev_select(ap, pmp); + tmprc = ata_sff_wait_ready(&ap->link, deadline); + } else { + tmprc = ata_sff_wait_ready(link, deadline); + } + if (tmprc) + ata_link_err(link, "COMRESET failed for wait (errno=%d)\n", + rc); + else + ata_link_err(link, "wait for bsy success\n"); + + ata_link_err(link, "COMRESET success (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } else { + ata_link_err(link, "COMRESET failed (errno=%d) ap=%d link %d\n", + rc, link->ap->port_no, link->pmp); + } + return rc; +} + +static int zx_scr_read(struct ata_link *link, unsigned int scr, u32 *val) +{ + static const u8 ipm_tbl[] = { 1, 2, 6, 0 }; + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + u8 raw; + + switch (scr) { + case SCR_STATUS: + pci_read_config_byte(pdev, 0xA0 + slot, &raw); + + /* read the DET field, bit0 and 1 of the config byte */ + v |= raw & 0x03; + + /* read the SPD field, bit4 of the configure byte */ + v |= raw & 0x30; + + /* read the IPM field, bit2 and 3 of the config byte */ + v |= ((ipm_tbl[(raw >> 2) & 0x3])<<8); + break; + + case SCR_ERROR: + /* devices other than 5287 uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_read_config_dword(pdev, 0xA8, &v); + break; + + case SCR_CONTROL: + pci_read_config_byte(pdev, 0xA4 + slot, &raw); + + /* read the DET field, bit0 and bit1 */ + v |= ((raw & 0x02) << 1) | (raw & 0x01); + + /* read the IPM field, bit2 and bit3 */ + v |= ((raw >> 2) & 0x03) << 8; + + break; + + default: + return -EINVAL; + } + + *val = v; + return 0; +} + +static int zx_scr_write(struct ata_link *link, unsigned int scr, u32 val) +{ + struct pci_dev *pdev = to_pci_dev(link->ap->host->dev); + int slot = 2 * link->ap->port_no + link->pmp; + u32 v = 0; + + WARN_ON(pdev == NULL); + + switch (scr) { + case SCR_ERROR: + /* devices PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL uses 0xA8 as base */ + WARN_ON(pdev->device != PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL && + pdev->device != PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL); + pci_write_config_byte(pdev, 0x42, slot); + pci_write_config_dword(pdev, 0xA8, val); + return 0; + + case SCR_CONTROL: + /* set the DET field */ + v |= ((val & 0x4) >> 1) | (val & 0x1); + + /* set the IPM field */ + v |= ((val >> 8) & 0x3) << 2; + + + pci_write_config_byte(pdev, 0xA4 + slot, v); + + + return 0; + + default: + return -EINVAL; + } +} + + +/** + * zx_tf_load - send taskfile registers to host controller + * @ap: Port to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller. + * + * This is to fix the internal bug of zx chipsets, which will + * reset the device register after changing the IEN bit on ctl + * register. + */ +static void zx_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) +{ + struct ata_taskfile ttf; + + if (tf->ctl != ap->last_ctl) { + ttf = *tf; + ttf.flags |= ATA_TFLAG_DEVICE; + tf = &ttf; + } + ata_sff_tf_load(ap, tf); +} + +static const unsigned int zx_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static const unsigned int zx100s_bar_sizes0[] = { + 8, 4, 8, 4, 16, 0 +}; + +static const unsigned int zx100s_bar_sizes1[] = { + 8, 4, 0, 0, 16, 0 +}; + +static int zx_prepare_host(struct pci_dev *pdev, struct ata_host **r_host) +{ + const struct ata_port_info *ppi0[] = { + &zx100s_port_info, NULL + }; + const struct ata_port_info *ppi1[] = { + &zx100s_port_info, &ata_dummy_port_info + }; + struct ata_host *host; + int i, rc; + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi0, &host); + else if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + rc = ata_pci_bmdma_prepare_host(pdev, ppi1, &host); + else + rc = -EINVAL; + + if (rc) + return rc; + + *r_host = host; + + /* 9002 hosts four sata ports as M/S of the two channels */ + /* 9003 hosts two sata ports as M/S of the one channel */ + for (i = 0; i < host->n_ports; i++) + ata_slave_link_init(host->ports[i]); + + return 0; +} + +static void zx_configure(struct pci_dev *pdev, int board_id) +{ + u8 tmp8; + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + dev_info(&pdev->dev, "routed to hard irq line %d\n", + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channels (0x%x)\n", + (int)tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & ALL_PORTS) != ALL_PORTS) { + dev_dbg(&pdev->dev, "enabling SATA channel interrupts (0x%x)\n", + (int) tmp8); + tmp8 |= ALL_PORTS; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + dev_dbg(&pdev->dev, + "enabling SATA channel native mode (0x%x)\n", + (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } +} + +static int zx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + unsigned int i; + int rc; + struct ata_host *host = NULL; + int board_id = (int) ent->driver_data; + const unsigned int *bar_sizes; + int legacy_mode = 0; + + ata_print_version_once(&pdev->dev, DRV_VERSION); + + if (pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL || + pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) { + if ((pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { + u8 tmp8, mask; + + /* TODO: What if one channel is in native mode ... */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = 1; + } + if (legacy_mode) + return -EINVAL; + } + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_DUAL_CHANNEL) + bar_sizes = &zx100s_bar_sizes0[0]; + else if (board_id == zx100s && pdev->device == PCI_DEVICE_ID_ZHAOXIN_SING_CHANNEL) + bar_sizes = &zx100s_bar_sizes1[0]; + else + bar_sizes = &zx_bar_sizes[0]; + + for (i = 0; i < ARRAY_SIZE(zx_bar_sizes); i++) { + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < bar_sizes[i])) { + if (bar_sizes[i] == 0) + continue; + + dev_err(&pdev->dev, + "invalid PCI BAR %u (sz 0x%llx, val 0x%llx)\n", + i, + (unsigned long long)pci_resource_start(pdev, i), + (unsigned long long)pci_resource_len(pdev, i)); + + return -ENODEV; + } + } + + switch (board_id) { + case zx100s: + rc = zx_prepare_host(pdev, &host); + break; + default: + rc = -EINVAL; + } + if (rc) + return rc; + + zx_configure(pdev, board_id); + + pci_set_master(pdev); + return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, + IRQF_SHARED, &zx_sht); +} + +module_pci_driver(zx_pci_driver); + +MODULE_AUTHOR("Yanchen:YanchenSun@zhaoxin.com"); +MODULE_DESCRIPTION("SCSI low-level driver for ZX SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, zx_pci_tbl); +MODULE_VERSION(DRV_VERSION); diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c index cbae8be1fe52005553e3d9dded77a21595d2cafc..6f9087c6aef742b113bf4d328386e015dbdeca22 100644 --- a/drivers/base/cacheinfo.c +++ b/drivers/base/cacheinfo.c @@ -183,6 +183,38 @@ static bool cache_node_is_unified(struct cacheinfo *this_leaf, return of_property_read_bool(np, "cache-unified"); } +unsigned long cache_of_get_id(struct device_node *np) +{ + struct device_node *cpu; + unsigned long min_id = ~0UL; + + for_each_of_cpu_node(cpu) { + struct device_node *cache_node = cpu; + u64 id = of_get_cpu_hwid(cache_node, 0); + + while ((cache_node = of_find_next_cache_node(cache_node))) { + if ((cache_node == np) && (id < min_id)) { + min_id = id; + of_node_put(cache_node); + break; + } + of_node_put(cache_node); + } + } + + return min_id; +} + +static void cache_of_set_id(struct cacheinfo *this_leaf, struct device_node *np) +{ + unsigned long id = cache_of_get_id(np); + + if (id != ~0UL) { + this_leaf->id = id; + this_leaf->attributes |= CACHE_ID; + } +} + static void cache_of_set_props(struct cacheinfo *this_leaf, struct device_node *np) { @@ -198,6 +230,7 @@ static void cache_of_set_props(struct cacheinfo *this_leaf, cache_get_line_size(this_leaf, np); cache_nr_sets(this_leaf, np); cache_associativity(this_leaf); + cache_of_set_id(this_leaf, np); } static int cache_setup_of_node(unsigned int cpu) @@ -620,13 +653,19 @@ static ssize_t file_name##_show(struct device *dev, \ return sysfs_emit(buf, "%u\n", this_leaf->object); \ } -show_one(id, id); show_one(level, level); show_one(coherency_line_size, coherency_line_size); show_one(number_of_sets, number_of_sets); show_one(physical_line_partition, physical_line_partition); show_one(ways_of_associativity, ways_of_associativity); +static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct cacheinfo *this_leaf = dev_get_drvdata(dev); + + return sysfs_emit(buf, "%lu\n", this_leaf->id); +} + static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 548491de818ef126f598eba06287bf7899418398..1b9d3afe6af4d27bc9229c4c1f7efd6280534871 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include "base.h" @@ -216,8 +218,28 @@ static ssize_t show_cpus_attr(struct device *dev, char *buf) { struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); + struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; + bool rich_container; - return cpumap_print_to_pagebuf(true, buf, ca->map); + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + + if (rich_container && !strcmp(attr->attr.name, "online")) { + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + + rich_container_get_cpus(scenario, &cpuset_allowed); + + put_task_struct(scenario); + } + else + cpumask_copy(&cpuset_allowed, ca->map); + + return cpumap_print_to_pagebuf(true, buf, &cpuset_allowed); } #define _CPU_ATTR(name, map) \ diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index a1474fb67db9b2f09812b293127730efc6655243..525574c312d3aeb7d063341487699ba91e81738d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "power.h" /* @@ -108,7 +109,19 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { + struct pci_dev *pdev = (!dev || !dev_is_pci(dev)) ? NULL : to_pci_dev(dev); + device_lock(dev); + + /* Zhaoxin sata controller may occur error when resume from runtime pm, so disable it */ + if (pdev && + pdev->vendor == PCI_VENDOR_ID_ZHAOXIN && + pdev->device == 0x9083 && + pdev->revision <= 0x20) { + device_unlock(dev); + return -EPERM; + } + if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); else if (sysfs_streq(buf, ctrl_on)) diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig index 8de74dcfa18cf53bf9ad623264bd3d3d83669fe4..7c486989dd04d58abb0055528c375fc7d5299727 100644 --- a/drivers/char/hw_random/Kconfig +++ b/drivers/char/hw_random/Kconfig @@ -152,6 +152,19 @@ config HW_RANDOM_VIA If unsure, say Y. +config HW_RANDOM_ZHAOXIN + tristate "Zhaoxin HW Random Number Generator support" + depends on X86 + default HW_RANDOM + help + This driver provides kernel-side support for the Random Number + Generator hardware found on Zhaoxin based motherboards. + + To compile this driver as a module, choose M here: the + module will be called zhaoxin-rng. + + If unsure, say Y. + config HW_RANDOM_IXP4XX tristate "Intel IXP4xx NPU HW Pseudo-Random Number Generator support" depends on ARCH_IXP4XX || COMPILE_TEST diff --git a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile index 32549a1186dc57a26250d117a70820b328af62ac..ef5b3ae0794dd87089709a5b9548fc427ebfd253 100644 --- a/drivers/char/hw_random/Makefile +++ b/drivers/char/hw_random/Makefile @@ -14,6 +14,7 @@ obj-$(CONFIG_HW_RANDOM_GEODE) += geode-rng.o obj-$(CONFIG_HW_RANDOM_N2RNG) += n2-rng.o n2-rng-y := n2-drv.o n2-asm.o obj-$(CONFIG_HW_RANDOM_VIA) += via-rng.o +obj-$(CONFIG_HW_RANDOM_ZHAOXIN) += zhaoxin-rng.o obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos-trng.o obj-$(CONFIG_HW_RANDOM_IXP4XX) += ixp4xx-rng.o obj-$(CONFIG_HW_RANDOM_OMAP) += omap-rng.o diff --git a/drivers/char/hw_random/via-rng.c b/drivers/char/hw_random/via-rng.c index a9a0a3b09c8bdddda0b029b56147a435ed3a07bf..4288e1114fc96e2a02fae47484ac127f4b0ae683 100644 --- a/drivers/char/hw_random/via-rng.c +++ b/drivers/char/hw_random/via-rng.c @@ -35,7 +35,7 @@ #include #include - +static struct x86_cpu_id via_rng_cpu_id[]; enum { @@ -135,7 +135,7 @@ static int via_rng_init(struct hwrng *rng) * is always enabled if CPUID rng_en is set. There is no * RNG configuration like it used to be the case in this * register */ - if (((c->x86 == 6) && (c->x86_model >= 0x0f)) || (c->x86 > 6)){ + if ((c->x86 == 6) && (c->x86_model >= 0x0f)) { if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { pr_err(PFX "can't enable hardware RNG " "if XSTORE is not enabled\n"); @@ -196,7 +196,7 @@ static int __init via_rng_mod_init(void) { int err; - if (!boot_cpu_has(X86_FEATURE_XSTORE)) + if (!x86_match_cpu(via_rng_cpu_id)) return -ENODEV; pr_info("VIA RNG detected\n"); @@ -217,8 +217,8 @@ static void __exit via_rng_mod_exit(void) } module_exit(via_rng_mod_exit); -static struct x86_cpu_id __maybe_unused via_rng_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XSTORE, NULL), +static struct x86_cpu_id via_rng_cpu_id[] = { + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 6, X86_FEATURE_XSTORE, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, via_rng_cpu_id); diff --git a/drivers/char/hw_random/zhaoxin-rng.c b/drivers/char/hw_random/zhaoxin-rng.c new file mode 100644 index 0000000000000000000000000000000000000000..f0bfda78fea14547a6b9b9711ad6a4ea1bd087e5 --- /dev/null +++ b/drivers/char/hw_random/zhaoxin-rng.c @@ -0,0 +1,98 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RNG driver for Zhaoxin RNGs + * + * Copyright 2023 (c) Zhaoxin Semiconductor Co., Ltd + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "2.0.0" + +enum { + ZHAOXIN_RNG_CHUNK_8 = 0x00, /* 64 rand bits, 64 stored bits */ + ZHAOXIN_RNG_CHUNK_4 = 0x01, /* 32 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_2 = 0x02, /* 16 rand bits, 32 stored bits */ + ZHAOXIN_RNG_CHUNK_1 = 0x03, /* 8 rand bits, 32 stored bits */ + ZHAOXIN_RNG_MAX_SIZE = (128 * 1024), +}; + +static int zhaoxin_rng_init(struct hwrng *rng) +{ + if (!boot_cpu_has(X86_FEATURE_XSTORE_EN)) { + pr_err(PFX "can't enable hardware RNG if XSTORE is not enabled\n"); + return -ENODEV; + } + + return 0; +} + +static inline int rep_xstore(size_t size, size_t factor, void *result) +{ + asm(".byte 0xf3, 0x0f, 0xa7, 0xc0" + : "=m"(*(size_t *)result), "+c"(size), "+d"(factor), "+D"(result)); + + return 0; +} + +static int zhaoxin_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + if (max > ZHAOXIN_RNG_MAX_SIZE) + max = ZHAOXIN_RNG_MAX_SIZE; + + rep_xstore(max, ZHAOXIN_RNG_CHUNK_1, data); + + return max; +} + +static struct hwrng zhaoxin_rng = { + .name = "zhaoxin", + .init = zhaoxin_rng_init, + .read = zhaoxin_rng_read, +}; + +static struct x86_cpu_id zhaoxin_rng_cpu_ids[] = { + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 6, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(ZHAOXIN, 7, X86_FEATURE_XSTORE, NULL), + X86_MATCH_VENDOR_FAM_FEATURE(CENTAUR, 7, X86_FEATURE_XSTORE, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_rng_cpu_ids); + +static int __init zhaoxin_rng_mod_init(void) +{ + int err; + + if (!x86_match_cpu(zhaoxin_rng_cpu_ids)) { + pr_err(PFX "The CPU isn't support XSTORE.\n"); + return -ENODEV; + } + + pr_info("Zhaoxin RNG detected\n"); + + err = hwrng_register(&zhaoxin_rng); + if (err) + pr_err(PFX "RNG registering failed (%d)\n", err); + + return err; +} +module_init(zhaoxin_rng_mod_init); + +static void __exit zhaoxin_rng_mod_exit(void) +{ + hwrng_unregister(&zhaoxin_rng); +} +module_exit(zhaoxin_rng_mod_exit); + +MODULE_DESCRIPTION("H/W RNG driver for Zhaoxin CPUs"); +MODULE_AUTHOR("YunShen@zhaoxin.com"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRIVER_VERSION); diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index cb6138b8ded90548dea6bee34fc10fd805e618e8..bc9c6506fd59e6225eb4e76d9da022c78493bbe4 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -13,6 +13,10 @@ ifdef CONFIG_PARISC ipmi_si-y += ipmi_si_parisc.o endif +ifdef CONFIG_LOONGARCH +ipmi_si-y += ipmi_si_ls2k500.o +endif + obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o diff --git a/drivers/char/ipmi/btlock.h b/drivers/char/ipmi/btlock.h new file mode 100644 index 0000000000000000000000000000000000000000..cf585e42d42d4ba5bda34ca52c476aaff40af0bb --- /dev/null +++ b/drivers/char/ipmi/btlock.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __BTLOCK_H__ +#define __BTLOCK_H__ + +#include +#include + +union btlock { + char b[2]; + unsigned int u; +}; + +/* + *wait delay us if lock failed. + *lock fail if another one get lock or both try get lock. + *c must compile b with byte access. + */ +static inline int btlock_lock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + unsigned long c0 = get_cycles(), c1; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + while (1) { + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + c1 = get_cycles(); + if (c1 - c0 > *mscycles * 1000) + return -1; + ndelay(((t.b[1 - n] & 0x7f) + (c1 & 1)) * 100); + } + return 0; +} + +static inline int btlock_trylock(volatile union btlock *p, int n, unsigned char delay) +{ + union btlock t, t1; + unsigned long flags; + + if (n > 1) + return -1; + delay |= 0x80; + t1.u = 0; + t1.b[n] = delay; + + local_irq_save(flags); + p->b[n] = delay; + t.u = p->u; + if (t.u == t1.u) { + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + return 0; + } + p->b[n] = 0; + t.u = p->u; + wmb(); /* flush write out immediately */ + local_irq_restore(flags); + ndelay(((t.b[1 - n] & 0x7f) + (get_cycles() & 1)) * 100); + return -1; +} + +static inline int btlock_unlock(volatile union btlock *p, int n) +{ + p->b[n] = 0; + wmb(); /* flush write out immediately */ + return p->u; +} + +static inline int btlock_islocked(volatile union btlock *p, int n) +{ + union btlock t; + + t.u = p->u; + return t.b[n] && !t.b[1 - n]; +} +#endif diff --git a/drivers/char/ipmi/ipmi_si.h b/drivers/char/ipmi/ipmi_si.h index a7ead2a4c753b4a9cca7d919c0ebf0dcf638b777..aa2f81472ce5320ad78fb89902125c7c98ce7455 100644 --- a/drivers/char/ipmi/ipmi_si.h +++ b/drivers/char/ipmi/ipmi_si.h @@ -51,6 +51,9 @@ struct si_sm_io { unsigned int regshift; enum ipmi_addr_space addr_space; unsigned long addr_data; +#ifdef CONFIG_LOONGARCH + void *addr_source_data; +#endif enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */ union ipmi_smi_info_union addr_info; @@ -101,6 +104,14 @@ static inline void ipmi_si_parisc_init(void) { } static inline void ipmi_si_parisc_shutdown(void) { } #endif +#ifdef CONFIG_LOONGARCH +int ipmi_si_ls2k500_init(void); +void ipmi_si_ls2k500_shutdown(void); +#else +static inline void ipmi_si_ls2k500_init(void) { } +static inline void ipmi_si_ls2k500_shutdown(void) { } +#endif + int ipmi_si_port_setup(struct si_sm_io *io); int ipmi_si_mem_setup(struct si_sm_io *io); diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5cd031f3fc9700f10b1d5bcef0564ece1928df69..373ee71811e35fdd64f8791c6763fdd88f3574ad 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2104,6 +2104,8 @@ static int __init init_ipmi_si(void) ipmi_si_platform_init(); + ipmi_si_ls2k500_init(); + ipmi_si_pci_init(); ipmi_si_parisc_init(); @@ -2289,6 +2291,8 @@ static void cleanup_ipmi_si(void) ipmi_si_parisc_shutdown(); + ipmi_si_ls2k500_shutdown(); + ipmi_si_platform_shutdown(); mutex_lock(&smi_infos_lock); diff --git a/drivers/char/ipmi/ipmi_si_ls2k500.c b/drivers/char/ipmi/ipmi_si_ls2k500.c new file mode 100644 index 0000000000000000000000000000000000000000..7e259d85729f5375c7e9def353f5730b40b32ae2 --- /dev/null +++ b/drivers/char/ipmi/ipmi_si_ls2k500.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * ipmi_si_pci.c + * + * Handling for IPMI devices on the PCI bus. + */ + +#define pr_fmt(fmt) "ipmi_pci: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ipmi_si.h" +static unsigned long *mscycles; +static unsigned long *event_jiffies; +#include "kcs_bmc_ls2k500.h" +static int resetbootwait = 60; +module_param(resetbootwait, int, 0664); + +#define KCS_STATUS_CMD_DAT BIT(3) + +static int pcie_busy(void) +{ + if (time_before(jiffies, *event_jiffies + resetbootwait*HZ)) + return -1; + return 0; +} + +static unsigned char intf_sim_inb(const struct si_sm_io *io, + unsigned int offset) +{ + IPMIKCS *ik = io->addr_source_data; + uint32_t ret; + + if (pcie_busy()) + return 0; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return 0; + switch (offset & 1) { + case 0: + ret = ik->data_out_reg; + IPMI_KCS_SET_OBF(ik->status_reg, 0); + break; + case 1: + ret = ik->status_reg; + break; + } + btlock_unlock(&ik->lock, 0); + return ret; +} + +static void intf_sim_outb(const struct si_sm_io *io, unsigned int offset, + unsigned char val) +{ + IPMIKCS *ik = io->addr_source_data; + + if (pcie_busy()) + return; + if (btlock_lock(&ik->lock, 0, 1) < 0) + return; + if (IPMI_KCS_GET_IBF(ik->status_reg)) + goto out; + + switch (offset & 1) { + case 0: + ik->data_in_reg = val; + ik->status_reg &= ~KCS_STATUS_CMD_DAT; + break; + + case 1: + ik->cmd_reg = val; + ik->status_reg |= KCS_STATUS_CMD_DAT; + break; + } + IPMI_KCS_SET_IBF(ik->status_reg, 1); + ik->write_req++; +out: + btlock_unlock(&ik->lock, 0); +} + +static void ipmi_ls2k500_cleanup(struct si_sm_io *io) +{ +} + +int ipmi_si_sim_setup(struct si_sm_io *io) +{ + io->inputb = intf_sim_inb; + io->outputb = intf_sim_outb; + io->io_cleanup = ipmi_ls2k500_cleanup; + return 0; +} + +#define platform_resource_start(dev, bar) ((dev)->resource[(bar)].start) +#define platform_resource_end(dev, bar) ((dev)->resource[(bar)].end) +static int of_ipmi_ls2k500_probe(struct platform_device *pdev) +{ + int rv; + struct si_sm_io io; + void **kcs_data; + + memset(&io, 0, sizeof(io)); + io.addr_source = SI_PLATFORM; + dev_info(&pdev->dev, "probing via ls2k500 platform"); + io.si_type = SI_KCS; + + io.addr_space = IPMI_MEM_ADDR_SPACE; + io.io_setup = ipmi_si_sim_setup; + io.addr_data = pdev->resource[0].start; + io.addr_source_data = ioremap(pdev->resource[0].start, + pdev->resource[0].end - + pdev->resource[0].start + 1); + kcs_data = dev_get_platdata(&pdev->dev); + event_jiffies = kcs_data[0]; + mscycles = kcs_data[1]; + io.dev = &pdev->dev; + io.regspacing = 4; + io.regsize = DEFAULT_REGSIZE; + io.regshift = 0; + io.irq = 0; + if (io.irq) + io.irq_setup = ipmi_std_irq_setup; + + dev_info(&pdev->dev, "%pR regsize %d spacing %d irq %d\n", + &pdev->resource[0], io.regsize, io.regspacing, io.irq); + + rv = ipmi_si_add_smi(&io); + if (rv) + ipmi_si_remove_by_dev(&pdev->dev); + + return rv; +} + +static int ipmi_ls2k500_remove(struct platform_device *pdev) +{ + ipmi_si_remove_by_dev(&pdev->dev); + + return 0; +} + +#define LS2K500_SI_DEVICE_NAME "ipmi_ls2k500_si" +struct platform_driver ipmi_ls2k500_platform_driver = { + .driver = { + .name = LS2K500_SI_DEVICE_NAME, + }, + .probe = of_ipmi_ls2k500_probe, + .remove = ipmi_ls2k500_remove, +}; + +static bool platform_registered; +int ipmi_si_ls2k500_init(void) +{ + int rv; + + rv = platform_driver_register(&ipmi_ls2k500_platform_driver); + if (rv) + pr_err("Unable to register driver: %d\n", rv); + else + platform_registered = true; + return rv; +} + +void ipmi_si_ls2k500_shutdown(void) +{ + if (platform_registered) + platform_driver_unregister(&ipmi_ls2k500_platform_driver); +} diff --git a/drivers/char/ipmi/ipmi_si_mem_io.c b/drivers/char/ipmi/ipmi_si_mem_io.c index 86b92e93a70d52731635a43792eb61110582e2a2..dc6cf7d89fea4c8a696922a5d6808c5c5e78e422 100644 --- a/drivers/char/ipmi/ipmi_si_mem_io.c +++ b/drivers/char/ipmi/ipmi_si_mem_io.c @@ -3,9 +3,77 @@ #include #include "ipmi_si.h" +#ifdef CONFIG_ARCH_PHYTIUM +#include + +#define CTL_RST_FUNC_ID 0xC2000011 + +static bool apply_phytium2500_workaround; + +struct ipmi_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; +}; + +#ifdef CONFIG_ACPI +static struct ipmi_workaround_oem_info wa_info[] = { + { + .oem_id = "KPSVVJ", + } +}; +#endif + +static void ipmi_check_phytium_workaround(void) +{ +#ifdef CONFIG_ACPI + struct acpi_table_header tbl; + int i; + + if (ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_DSDT, 0, &tbl))) + return; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (strncmp(wa_info[i].oem_id, tbl.oem_id, ACPI_OEM_ID_SIZE)) + continue; + + apply_phytium2500_workaround = true; + break; + } +#endif +} + +static void ctl_smc(unsigned long arg0, unsigned long arg1, + unsigned long arg2, unsigned long arg3) +{ + struct arm_smccc_res res; + + arm_smccc_smc(arg0, arg1, arg2, arg3, 0, 0, 0, 0, &res); + if (res.a0 != 0) + pr_err("Error: Firmware call SMC reset Failed: %d, addr: 0x%lx\n", + (int)res.a0, arg2); +} + +static void ctl_timeout_reset(void) +{ + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x28100208, 0x1); + ctl_smc(CTL_RST_FUNC_ID, 0x1, 0x2810020C, 0x1); +} + +static inline void ipmi_phytium_workaround(void) +{ + if (apply_phytium2500_workaround) + ctl_timeout_reset(); +} + +#else +static inline void ipmi_check_phytium_workaround(void) {} +static inline void ipmi_phytium_workaround(void) {} +#endif + static unsigned char intf_mem_inb(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return readb((io->addr)+(offset * io->regspacing)); } @@ -18,6 +86,8 @@ static void intf_mem_outb(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inw(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readw((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -31,6 +101,8 @@ static void intf_mem_outw(const struct si_sm_io *io, unsigned int offset, static unsigned char intf_mem_inl(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readl((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -44,6 +116,8 @@ static void intf_mem_outl(const struct si_sm_io *io, unsigned int offset, #ifdef readq static unsigned char mem_inq(const struct si_sm_io *io, unsigned int offset) { + ipmi_phytium_workaround(); + return (readq((io->addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; } @@ -81,6 +155,8 @@ int ipmi_si_mem_setup(struct si_sm_io *io) if (!addr) return -ENODEV; + ipmi_check_phytium_workaround(); + /* * Figure out the actual readb/readw/readl/etc routine to use based * upon the register size. diff --git a/drivers/char/ipmi/kcs_bmc_ls2k500.h b/drivers/char/ipmi/kcs_bmc_ls2k500.h new file mode 100644 index 0000000000000000000000000000000000000000..86e08a08d41a60005d0f126036a3309f1e5ab334 --- /dev/null +++ b/drivers/char/ipmi/kcs_bmc_ls2k500.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KCS_BMC_LS2K500__ +#define __KCS_BMC_LS2K500__ 1 +#include +#include "btlock.h" +#define IPMI_KCS_OBF_BIT 0 +#define IPMI_KCS_IBF_BIT 1 +#define IPMI_KCS_SMS_ATN_BIT 2 +#define IPMI_KCS_CD_BIT 3 + +#define IPMI_KCS_OBF_MASK (1 << IPMI_KCS_OBF_BIT) +#define IPMI_KCS_GET_OBF(d) (((d) >> IPMI_KCS_OBF_BIT) & 0x1) +#define IPMI_KCS_SET_OBF(d, v) ((d) = (((d) & ~IPMI_KCS_OBF_MASK) | \ + (((v) & 1) << IPMI_KCS_OBF_BIT))) +#define IPMI_KCS_IBF_MASK (1 << IPMI_KCS_IBF_BIT) +#define IPMI_KCS_GET_IBF(d) (((d) >> IPMI_KCS_IBF_BIT) & 0x1) +#define IPMI_KCS_SET_IBF(d, v) ((d) = (((d) & ~IPMI_KCS_IBF_MASK) | \ + (((v) & 1) << IPMI_KCS_IBF_BIT))) +#define IPMI_KCS_SMS_ATN_MASK (1 << IPMI_KCS_SMS_ATN_BIT) +#define IPMI_KCS_GET_SMS_ATN(d) (((d) >> IPMI_KCS_SMS_ATN_BIT) & 0x1) +#define IPMI_KCS_SET_SMS_ATN(d, v) ((d) = (((d) & ~IPMI_KCS_SMS_ATN_MASK) | \ + ((v) & 1) << IPMI_KCS_SMS_ATN_BIT)) +#define IPMI_KCS_CD_MASK (1 << IPMI_KCS_CD_BIT) +#define IPMI_KCS_GET_CD(d) (((d) >> IPMI_KCS_CD_BIT) & 0x1) +#define IPMI_KCS_SET_CD(d, v) ((d) = (((d) & ~IPMI_KCS_CD_MASK) | \ + (((v) & 1) << IPMI_KCS_CD_BIT))) + +#define IPMI_KCS_IDLE_STATE 0 +#define IPMI_KCS_READ_STATE 1 +#define IPMI_KCS_WRITE_STATE 2 +#define IPMI_KCS_ERROR_STATE 3 + +#define IPMI_KCS_GET_STATE(d) (((d) >> 6) & 0x3) +#define IPMI_KCS_SET_STATE(d, v) ((d) = ((d) & ~0xc0) | (((v) & 0x3) << 6)) + +#define IPMI_KCS_ABORT_STATUS_CMD 0x60 +#define IPMI_KCS_WRITE_START_CMD 0x61 +#define IPMI_KCS_WRITE_END_CMD 0x62 +#define IPMI_KCS_READ_CMD 0x68 +#define IPMI_KCS_STATUS_NO_ERR 0x00 +#define IPMI_KCS_STATUS_ABORTED_ERR 0x01 +#define IPMI_KCS_STATUS_BAD_CC_ERR 0x02 +#define IPMI_KCS_STATUS_LENGTH_ERR 0x06 +#define KCS_STATUS_CMD_DAT BIT(3) + +typedef struct IPMIKCS { + union btlock lock; + uint8_t status_reg; + uint8_t data_out_reg; + + int16_t data_in_reg; + int16_t cmd_reg; + int16_t reserved2; + + uint32_t write_req; + uint32_t write_ack; + + uint32_t reserved3; + uint32_t reserved4; +} IPMIKCS; + +struct loongson_kcs_bmc { + struct list_head next; + IPMIKCS *kcs; + struct kcs_bmc *bmc; +}; +#endif diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 927088b2c3d3f2c60bd5c3fa9735ba90d2e0bdd3..301284e07603bb353c3c04d1128293f325b5d479 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig @@ -210,5 +210,29 @@ config TCG_FTPM_TEE help This driver proxies for firmware TPM running in TEE. +config TCG_HYGON + tristate "Hygon TPM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TPM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tpm_hygon. + +config TCM_HYGON + tristate "Hygon TCM Interface" + depends on ACPI + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_DEV_SP_PSP + default y + help + If you want to make Hygon TCM support available, say Yes and + it will be accessible from within Linux. To compile this + driver as a module, choose M here; the module will be called + tcm_hygon. + source "drivers/char/tpm/st33zp24/Kconfig" endif # TCG_TPM diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile index 0222b1ddb3105e6726fa93e2ea3500d404964cbc..8f868c9b9ce78f53ec54620bec947f099fd13624 100644 --- a/drivers/char/tpm/Makefile +++ b/drivers/char/tpm/Makefile @@ -42,3 +42,5 @@ obj-$(CONFIG_TCG_XEN) += xen-tpmfront.o obj-$(CONFIG_TCG_CRB) += tpm_crb.o obj-$(CONFIG_TCG_VTPM_PROXY) += tpm_vtpm_proxy.o obj-$(CONFIG_TCG_FTPM_TEE) += tpm_ftpm_tee.o +obj-$(CONFIG_TCG_HYGON) += tpm_hygon.o +obj-$(CONFIG_TCM_HYGON) += tcm_hygon.o diff --git a/drivers/char/tpm/tcm_hygon.c b/drivers/char/tpm/tcm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..ef63d1a0a9027c61aed413d04d91ebd9f19f791d --- /dev/null +++ b/drivers/char/tpm/tcm_hygon.c @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TCM2.0 device driver. + * + * Copyright (C) 2023 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TCM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TCM_BUF_LEN 4096 + +struct tcm_hygon_priv { + u8 priv_buf[MAX_TCM_BUF_LEN]; +}; + +struct tcm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tcm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tcm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tcm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tcm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = sizeof(priv->priv_buf); + u32 cmd_size = (u32)count; + u8 *p = priv->priv_buf; + + if (buf_size - sizeof(u32) - sizeof(u32) < count) { + ret = -E2BIG; + goto out; + } + + *(u32 *)p = cpu_to_be32(buf_size); + p += sizeof(buf_size); + *(u32 *)p = cpu_to_be32(cmd_size); + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TCM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: psp do cmd error, %d\n", __func__, error); + ret = -EIO; + } + +out: + return ret; +} + +static const struct tpm_class_ops tcm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tcm_c_recv, + .send = tcm_c_send, +}; + +static void tcm_bios_log_teardown(struct tpm_chip *chip) +{ + int i; + struct inode *inode; + + /* securityfs_remove currently doesn't take care of handling sync + * between removal and opening of pseudo files. To handle this, a + * workaround is added by making i_private = NULL here during removal + * and to check it during open(), both within inode_lock()/unlock(). + * This design ensures that open() either safely gets kref or fails. + */ + for (i = (TPM_NUM_EVENT_LOG_FILES - 1); i >= 0; i--) { + if (chip->bios_dir[i]) { + inode = d_inode(chip->bios_dir[i]); + inode_lock(inode); + inode->i_private = NULL; + inode_unlock(inode); + securityfs_remove(chip->bios_dir[i]); + } + } +} + +static void tcm_chip_unregister(struct tpm_chip *chip) +{ + if (IS_ENABLED(CONFIG_HW_RANDOM_TPM)) + hwrng_unregister(&chip->hwrng); + tcm_bios_log_teardown(chip); + cdev_del(&chip->cdevs); + put_device(&chip->devs); + cdev_device_del(&chip->cdev, &chip->dev); +} + +static int hygon_tcm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tcm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tcm_c_ops); + if (IS_ERR(chip)) { + pr_err("tcmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + ret = dev_set_name(&chip->dev, "tcm%d", chip->dev_num); + if (ret) { + pr_err("tcm device set name fail\n"); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tcm chip_register fail\n"); + goto err; + } + + if (chip->flags & TPM_CHIP_FLAG_TPM2) { + device_del(&chip->devs); + ret = dev_set_name(&chip->devs, "tcmrm%d", chip->dev_num); + if (ret) { + pr_err("tcmrm device set name fail\n"); + goto err_dev; + } + ret = device_add(&chip->devs); + if (ret) { + pr_err("devs add fail\n"); + goto err_dev; + } + } + + pr_info("Hygon TCM2 detected\n"); + + return 0; + +err_dev: + tcm_chip_unregister(chip); + +err: + return ret; +} + +static void hygon_tcm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TCM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tcm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tcm2_device_ids[] = { + {"HYGT0201", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tcm2_device_ids); + +static struct acpi_driver hygon_tcm2_acpi_driver = { + .name = "tcm_hygon", + .ids = hygon_tcm2_device_ids, + .ops = { + .add = hygon_tcm2_acpi_add, + .remove = hygon_tcm2_acpi_remove, + }, + .drv = { + .pm = &tcm_hygon_pm, + }, +}; + +static int __init hygon_tcm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tcm2_acpi_driver); +} + +static void __exit hygon_tcm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tcm2_acpi_driver); +} + +/* + * hygon_tcm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tcm2_init); +module_exit(hygon_tcm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TCM2 device driver for Hygon PSP"); diff --git a/drivers/char/tpm/tpm_hygon.c b/drivers/char/tpm/tpm_hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..37e2e1f19c8d0dd0c50ae4fc4f52e4328e31e03e --- /dev/null +++ b/drivers/char/tpm/tpm_hygon.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * The Hygon TPM2.0 device driver. + * + * Copyright (C) 2020 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tpm.h" + +#define TPM2PSP_CMD(id) (0x100 | (id)) +#define MAX_TPM_BUF_LEN 4096 +#define MAX_CMD_BUF_LEN (MAX_TPM_BUF_LEN + sizeof(u32) + sizeof(u32)) + +struct tpm_hygon_priv { + u8 priv_buf[MAX_CMD_BUF_LEN]; +}; + +/* + * tpm header struct name is different in different kernel versions. + * so redefine it for driver porting. + */ +struct tpm_header_t { + __be16 tag; + __be32 length; + union { + __be32 ordinal; + __be32 return_code; + }; +} __packed; + +static int tpm_c_recv(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret = 0; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + struct tpm_header_t *header = (void *)(priv->priv_buf + sizeof(u32) + sizeof(u32)); + u32 len = be32_to_cpu(header->length); + + if (len > count) { + ret = -E2BIG; + goto out; + } + + if (len > 0) + memmove(buf, (u8 *)header, len); + + ret = len; + +out: + return ret; +} + +static int tpm_c_send(struct tpm_chip *chip, u8 *buf, size_t count) +{ + int ret, error; + struct tpm_hygon_priv *priv = dev_get_drvdata(&chip->dev); + u32 buf_size = cpu_to_be32(sizeof(priv->priv_buf)); + u32 cmd_size = cpu_to_be32((u32)count); + u8 *p = priv->priv_buf; + + *(u32 *)p = buf_size; + p += sizeof(buf_size); + *(u32 *)p = cmd_size; + p += sizeof(cmd_size); + memmove(p, buf, count); + + ret = psp_do_cmd(TPM2PSP_CMD(0), priv->priv_buf, &error); + if (ret) { + pr_err("%s: sev do cmd error, %d\n", __func__, error); + ret = -EIO; + } + + return ret; +} + +static const struct tpm_class_ops tpm_c_ops = { + .flags = TPM_OPS_AUTO_STARTUP, + .recv = tpm_c_recv, + .send = tpm_c_send, +}; + +static int hygon_tpm2_acpi_add(struct acpi_device *device) +{ + int ret; + struct tpm_chip *chip; + struct tpm_hygon_priv *priv; + struct device *dev = &device->dev; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto err; + } + + chip = tpmm_chip_alloc(dev, &tpm_c_ops); + if (IS_ERR(chip)) { + pr_err("tpmm_chip_alloc fail\n"); + ret = PTR_ERR(chip); + goto err; + } + + dev_set_drvdata(&chip->dev, priv); + + chip->flags |= TPM_CHIP_FLAG_TPM2; + chip->flags |= TPM_CHIP_FLAG_IRQ; + + ret = tpm_chip_register(chip); + if (ret) { + pr_err("tpm_chip_register fail\n"); + goto err; + } + + pr_info("Hygon TPM2 detected\n"); + + return 0; + +err: + return ret; +} + +static void hygon_tpm2_acpi_remove(struct acpi_device *device) +{ + struct device *dev = &device->dev; + struct tpm_chip *chip = dev_get_drvdata(dev); + + tpm_chip_unregister(chip); + + pr_info("Hygon TPM2 removed\n"); +} + +static SIMPLE_DEV_PM_OPS(tpm_hygon_pm, tpm_pm_suspend, tpm_pm_resume); + +static const struct acpi_device_id hygon_tpm2_device_ids[] = { + {"HYGT0101", 0}, + {"", 0}, +}; + +MODULE_DEVICE_TABLE(acpi, hygon_tpm2_device_ids); + +static struct acpi_driver hygon_tpm2_acpi_driver = { + .name = "tpm_hygon", + .ids = hygon_tpm2_device_ids, + .ops = { + .add = hygon_tpm2_acpi_add, + .remove = hygon_tpm2_acpi_remove, + }, + .drv = { + .pm = &tpm_hygon_pm, + }, +}; + +static int __init hygon_tpm2_init(void) +{ + return acpi_bus_register_driver(&hygon_tpm2_acpi_driver); +} + +static void __exit hygon_tpm2_exit(void) +{ + acpi_bus_unregister_driver(&hygon_tpm2_acpi_driver); +} + +/* + * hygon_tpm2_init must be done after ccp module init, but before + * ima module init. That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(hygon_tpm2_init); +module_exit(hygon_tpm2_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("mayuanchen (mayuanchen@hygon.cn)"); +MODULE_DESCRIPTION("TPM2 device driver for Hygon PSP"); diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 0ba0dc4ecf06201a0532d3af71194841a5167c64..2f25ee00616484af00c93c762f91c230a6e2bd52 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -732,4 +732,7 @@ config GOLDFISH_TIMER help Support for the timer/counter of goldfish-rtc +config SW64_TIMER + bool + endmenu diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 368c3461dab8146a6f89a111677c413b974e4608..b9ef4c79915e88b4de638548f43dd48d83624346 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -89,3 +89,4 @@ obj-$(CONFIG_MSC313E_TIMER) += timer-msc313e.o obj-$(CONFIG_GOLDFISH_TIMER) += timer-goldfish.o obj-$(CONFIG_GXP_TIMER) += timer-gxp.o obj-$(CONFIG_CLKSRC_LOONGSON1_PWM) += timer-loongson1-pwm.o +obj-$(CONFIG_SW64_TIMER) += timer-sw64.o diff --git a/drivers/clocksource/timer-sw64.c b/drivers/clocksource/timer-sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..a124b6d8fed94d785f94483610ac143a8de13920 --- /dev/null +++ b/drivers/clocksource/timer-sw64.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#define SHTCLK_RATE_KHZ 25000 +#define SHTCLK_RATE (SHTCLK_RATE_KHZ * 1000) + +#if defined(CONFIG_SUBARCH_C4) +static u64 read_longtime(struct clocksource *cs) +{ + return read_csr(CSR_SHTCLOCK); +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 notrace read_sched_clock(void) +{ + return read_csr(CSR_SHTCLOCK); +} + +void __init sw64_setup_clocksource(void) +{ + clocksource_register_khz(&clocksource_longtime, SHTCLK_RATE_KHZ); + sched_clock_register(read_sched_clock, BITS_PER_LONG, SHTCLK_RATE); +} + +void __init setup_sched_clock(void) { } +#elif defined(CONFIG_SUBARCH_C3B) +#ifdef CONFIG_SMP +static u64 read_longtime(struct clocksource *cs) +{ + unsigned long node; + + node = __this_cpu_read(hard_node_id); + return __io_read_longtime(node); +} + +static int longtime_enable(struct clocksource *cs) +{ + switch (cpu_desc.model) { + case CPU_SW3231: + sw64_io_write(0, GPIO_SWPORTA_DR, 0); + sw64_io_write(0, GPIO_SWPORTA_DDR, 0xff); + break; + case CPU_SW831: + __io_write_longtime_start_en(0, 0x1); + break; + default: + break; + } + + return 0; +} + +static struct clocksource clocksource_longtime = { + .name = "longtime", + .rating = 100, + .enable = longtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_longtime, +}; + +static u64 read_vtime(struct clocksource *cs) +{ + unsigned long vtime_addr; + + vtime_addr = IO_BASE | LONG_TIME; + return rdio64(vtime_addr); +} + +static int vtime_enable(struct clocksource *cs) +{ + return 0; +} + +static struct clocksource clocksource_vtime = { + .name = "vtime", + .rating = 100, + .enable = vtime_enable, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 0, + .mult = 0, + .read = read_vtime, +}; +#else /* !SMP */ +static u64 read_tc(struct clocksource *cs) +{ + return rdtc(); +} + +static struct clocksource clocksource_tc = { + .name = "tc", + .rating = 300, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .mask = CLOCKSOURCE_MASK(64), + .shift = 22, + .mult = 0, /* To be filled in */ + .read = read_tc, +}; +#endif /* SMP */ + +#define DEFAULT_MCLK 25 /* Mhz */ + +void __init sw64_setup_clocksource(void) +{ + unsigned int mclk = *((unsigned char *)__va(MB_MCLK)); + + if (!mclk) + mclk = DEFAULT_MCLK; + +#ifdef CONFIG_SMP + if (is_in_host()) + clocksource_register_khz(&clocksource_longtime, mclk * 1000); + else + clocksource_register_khz(&clocksource_vtime, DEFAULT_MCLK * 1000); +#else + clocksource_register_hz(&clocksource_tc, get_cpu_freq()); + pr_info("Setup clocksource TC, mult = %d\n", clocksource_tc.mult); +#endif +} + +DECLARE_PER_CPU(u64, tc_offset); +static u64 sc_start, sc_shift, sc_multi; +DEFINE_STATIC_KEY_FALSE(use_tc_as_sched_clock); + +static int __init sched_clock_setup(char *opt) +{ + if (!opt) + return -EINVAL; + + if (!strncmp(opt, "on", 2)) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("Using TC instead of jiffies as source of sched_clock()\n"); + } + + return 0; +} +early_param("tc_sched_clock", sched_clock_setup); + +static void __init calibrate_sched_clock(void) +{ + sc_start = rdtc(); +} + +void __init setup_sched_clock(void) +{ + unsigned long step; + + sc_shift = 7; + step = 1UL << sc_shift; + sc_multi = step * NSEC_PER_SEC / get_cpu_freq(); + calibrate_sched_clock(); + + pr_info("sched_clock: sc_multi=%llu, sc_shift=%llu\n", sc_multi, sc_shift); +} + +#ifdef CONFIG_GENERIC_SCHED_CLOCK +static u64 notrace read_sched_clock(void) +{ + return (rdtc() - sc_start) >> sc_shift; +} + +void __init sw64_sched_clock_init(void) +{ + sched_clock_register(sched_clock_read, BITS_PER_LONG, get_cpu_freq() >> sc_shift); +} +#else /* !CONFIG_GENERIC_SCHED_CLOCK */ +/* + * scheduler clock - returns current time in nanoseconds. + */ +unsigned long long notrace sched_clock(void) +{ + if (static_branch_likely(&use_tc_as_sched_clock)) + return ((rdtc() - sc_start + __this_cpu_read(tc_offset)) >> sc_shift) * sc_multi; + else + return (jiffies - INITIAL_JIFFIES) * (NSEC_PER_SEC / HZ); +} + +#ifdef CONFIG_DEBUG_FS +static ssize_t sched_clock_status_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + char buf[2]; + + if (static_key_enabled(&use_tc_as_sched_clock)) + buf[0] = 'Y'; + else + buf[0] = 'N'; + buf[1] = '\n'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t sched_clock_status_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + int r; + bool bv; + bool val = static_key_enabled(&use_tc_as_sched_clock); + + r = kstrtobool_from_user(user_buf, count, &bv); + if (!r) { + if (val != bv) { + if (bv) { + static_branch_enable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from jiffies to TC\n"); + } else { + static_branch_disable(&use_tc_as_sched_clock); + pr_info("source of sched_clock() switched from TC to jiffies\n"); + } + } else { + if (val) + pr_info("source of sched_clock() unchanged (using TC)\n"); + else + pr_info("source of sched_clock() unchanged (using jiffies)\n"); + } + } + + return count; +} + +static const struct file_operations sched_clock_status_fops = { + .read = sched_clock_status_read, + .write = sched_clock_status_write, + .open = nonseekable_open, + .llseek = no_llseek, +}; + +static int __init sched_clock_debug_init(void) +{ + struct dentry *sched_clock_status; + + if (!sw64_debugfs_dir) + return -ENODEV; + + sched_clock_status = debugfs_create_file("tc_sched_clock", + 0644, sw64_debugfs_dir, NULL, + &sched_clock_status_fops); + + if (!sched_clock_status) + return -ENOMEM; + + return 0; +} +late_initcall(sched_clock_debug_init); +#endif /* CONFIG_DEBUG_FS */ +#endif /* CONFIG_GENERIC_SCHED_CLOCK */ + +#endif + + + +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt); +static int timer_set_shutdown(struct clock_event_device *evt); +static int timer_set_oneshot(struct clock_event_device *evt); + +/* + * The local apic timer can be used for any function which is CPU local. + */ +static struct clock_event_device timer_clockevent = { + .name = "timer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = timer_set_shutdown, + .set_state_oneshot = timer_set_oneshot, + .set_next_event = timer_next_event, + .rating = 300, + .irq = -1, +}; + +static int vtimer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, delta, 0, 0); + return 0; +} + +static int vtimer_shutdown(struct clock_event_device *evt) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + return 0; +} + +static int vtimer_set_oneshot(struct clock_event_device *evt) +{ + return 0; +} +static struct clock_event_device vtimer_clockevent = { + .name = "vtimer", + .features = CLOCK_EVT_FEAT_ONESHOT, + .shift = 20, + .mult = 0, + .set_state_shutdown = vtimer_shutdown, + .set_state_oneshot = vtimer_set_oneshot, + .set_next_event = vtimer_next_event, + .rating = 300, + .irq = -1, +}; + +static DEFINE_PER_CPU(struct clock_event_device, timer_events); + +/* + * Program the next event, relative to now + */ +static int timer_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + wrtimer(delta); + return 0; +} + +static int timer_set_shutdown(struct clock_event_device *evt) +{ + wrtimer(0); + return 0; +} + +static int timer_set_oneshot(struct clock_event_device *evt) +{ + /* + * SW-TIMER support CLOCK_EVT_MODE_ONESHOT only, and automatically. + * unlike PIT and HPET, which support ONESHOT or PERIODIC by setting PIT_MOD or HPET_Tn_CFG + * so, nothing to do here ... + */ + return 0; +} + +void sw64_update_clockevents(unsigned long cpu, u32 freq) +{ + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + if (cpu == smp_processor_id()) + clockevents_update_freq(swevt, freq); + else { + clockevents_calc_mult_shift(swevt, freq, 4); + swevt->min_delta_ns = clockevent_delta2ns(swevt->min_delta_ticks, swevt); + swevt->max_delta_ns = clockevent_delta2ns(swevt->max_delta_ticks, swevt); + } +} + +/* + * Setup the local timer for this CPU. Copy the initialized values + * of the boot CPU and register the clock event in the framework. + */ +void sw64_setup_timer(void) +{ + unsigned long min_delta; + int cpu = smp_processor_id(); + struct clock_event_device *swevt = &per_cpu(timer_events, cpu); + + /* min_delta ticks => 100ns */ + min_delta = get_cpu_freq()/1000/1000/10; + + if (is_in_guest()) { + memcpy(swevt, &vtimer_clockevent, sizeof(*swevt)); + /* + * CUIWEI: This value is very important. + * If it's too small, the timer will timeout when the IER + * haven't been opened. + */ + min_delta *= 4; + } else { + memcpy(swevt, &timer_clockevent, sizeof(*swevt)); + } + swevt->cpumask = cpumask_of(cpu); + swevt->set_state_shutdown(swevt); + clockevents_config_and_register(swevt, get_cpu_freq(), min_delta, ULONG_MAX); +} + +void sw64_timer_interrupt(void) +{ + struct clock_event_device *evt = this_cpu_ptr(&timer_events); + + irq_enter(); + if (!evt->event_handler) { + pr_warn("Spurious local timer interrupt on cpu %d\n", + smp_processor_id()); + timer_set_shutdown(evt); + return; + } + + inc_irq_stat(timer_irqs_event); + + evt->event_handler(evt); + + irq_exit(); +} diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index f429b9b37b76c7e45e0c0cd9eb38980666ff597d..d1fdea27eb0d734b41e5ce6c5832ae3380af735a 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig @@ -273,6 +273,17 @@ config LOONGSON2_CPUFREQ If in doubt, say N. endif +if LOONGARCH +config LOONGSON3_ACPI_CPUFREQ + bool "Loongson3 ACPI cpufreq driver" + depends on ACPI_PROCESSOR + help + This driver adds a CPUFreq driver which utilizes the ACPI + Processor Performance States. + This driver supports Loongson 3A5000 compatible CPUs. + If in doubt, say N. +endif + if SPARC64 config SPARC_US3_CPUFREQ tristate "UltraSPARC-III CPU Frequency driver" @@ -303,6 +314,29 @@ config SH_CPU_FREQ If unsure, say N. endif +if SW64 +config SW64_CPUFREQ + bool "SW64 CPU Frequency interface" + depends on UNCORE_XUELANG + default y + help + This adds the CPUFreq driver for SW64 processor which supports + software configurable cpu frequency. + + For details, take a look at . + + If unsure, say N. + +config SW64_CPUFREQ_DEBUGFS + bool "SW64 CPU Frequency debugfs interface" + depends on SW64_CPUFREQ && DEBUG_FS + default y + help + Turns on the DebugFS interface for CPU Frequency. + + If you don't know what to do here, say N. +endif + config QORIQ_CPUFREQ tristate "CPU frequency scaling driver for Freescale QorIQ SoCs" depends on OF && COMMON_CLK diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index ef8510774913113b19f3ee9ee738bc228154d188..f9c1c9012ce7b20ca229044e1eb7b902ef20f5b7 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -104,6 +104,9 @@ obj-$(CONFIG_POWERNV_CPUFREQ) += powernv-cpufreq.o obj-$(CONFIG_BMIPS_CPUFREQ) += bmips-cpufreq.o obj-$(CONFIG_IA64_ACPI_CPUFREQ) += ia64-acpi-cpufreq.o obj-$(CONFIG_LOONGSON2_CPUFREQ) += loongson2_cpufreq.o +obj-$(CONFIG_LOONGSON3_ACPI_CPUFREQ) += loongson3-acpi-cpufreq.o obj-$(CONFIG_SH_CPU_FREQ) += sh-cpufreq.o obj-$(CONFIG_SPARC_US2E_CPUFREQ) += sparc-us2e-cpufreq.o obj-$(CONFIG_SPARC_US3_CPUFREQ) += sparc-us3-cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ) += sw64_cpufreq.o +obj-$(CONFIG_SW64_CPUFREQ_DEBUGFS) += sw64_cpufreq_debugfs.o diff --git a/drivers/cpufreq/loongson3-acpi-cpufreq.c b/drivers/cpufreq/loongson3-acpi-cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..018b529a0cf91a83b6ce9c78321c4bbde69c2877 --- /dev/null +++ b/drivers/cpufreq/loongson3-acpi-cpufreq.c @@ -0,0 +1,1527 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * loongson3-acpi-cpufreq.c - Loongson ACPI Processor P-States Driver + * + * Copyright (C) 2020 lvjianmin + * Yijun + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include "cpufreq_governor.h" + +#include +#define CPU_ID_FIELD 0xf + +#define COMPLETE_STATUS 0x80000000 +#define VOLTAGE_COMMAND 0x21 + +#define DVFS_INFO 0x22 +#define DVFS_INFO_BOOST_LEVEL 0x23 +#define DVFS_INFO_MIN_FREQ 0xf +#define DVFS_INFO_MAX_FREQ 0xf0 +#define DVFS_INFO_BOOST_CORE_FREQ 0xff00 +#define DVFS_INFO_NORMAL_CORE_UPPER_LIMIT 0xf0000 +#define DVFS_INFO_BOOST_CORES 0xf00000 + +#define BOOST_MODE 0x80000 +#define NORMAL_MODE 0x40000 + +MODULE_DESCRIPTION("Loongson 3A5000 ACPI Processor P-States Driver"); + +MODULE_LICENSE("GPL"); + +#define CPUFREQ_SAMPLING_INTERVAL (2 * TICK_NSEC / NSEC_PER_USEC) +#define LOONGSON_CONTROL_MASK (0xFF) +#define FACTOR (0xeac0c6e8) +#define BOOST_THRESHOLD (900) +#define MAX_CORES_PER_PACKAGE 64 +#define CPU_ID_FIELD 0xf +#define VOLTAGE_COMMAND 0x21 +#define MAX_READY_TIMEOUT 300000000 +#define RESERVED_FREQ 3 + +#define LOONGSON_BOOST_FREQ_MASK (0x7 << 8) +#define FREQ_STEP (25) + +static struct mutex boost_mutex[MAX_PACKAGES]; +static bool cpufreq_has_boost_freq; +static int max_boost_cores; +static int boost_gears; +static int boost_freqs[NR_CPUS + 1]; +struct package_data; +struct core_data; +static struct acpi_processor_performance __percpu *acpi_perf_data; +static struct cpufreq_driver loongson3_cpufreq_driver; +static struct freq_attr *loongson3_cpufreq_attr[]; +DECLARE_PER_CPU(struct clock_event_device, stable_clockevent_device); +static inline struct core_data *get_core_data(int cpu); + +static int min_freq_level; +static int max_freq_level; +static int max_upper_index; +static int max_boost_freq; + +/* threshold of core's get into msa */ +static int msa_count_threshold = 200; +/* threshold of core's get into lasx */ +static int lasx_count_threshold = 200; +/* other cores' upper load threshold when 1 core get into boost mode and enable msa/lasx */ +static int load_threshold = 60; + +DEFINE_PER_CPU(unsigned long, msa_count); +EXPORT_PER_CPU_SYMBOL(msa_count); + +#if defined(CONFIG_CPU_HAS_LASX) +DEFINE_PER_CPU(unsigned long, lasx_count); +EXPORT_PER_CPU_SYMBOL(lasx_count); +#endif + +struct ce_update_data { + struct clock_event_device *cd; + unsigned int new_freq; +}; + +static struct kthread_worker cpufreq_worker; +static struct task_struct *cpufreq_thread; +/** + * struct core_data - Store core related information + * @in_boost: the core is boosting to boost_freq + * @cpu: logical cpu of the core + * @update_util The update_util_data pointer of @cpu, is passed to the callback + * function, which will be called by cpufreq_update_util() + * @package The package_data structure the core belonged to + * @work_in_progress @work is busy + * @irq_work to enqueue callback handling on irq workqueue + * @work to enqueue work from irq workqueue on system workqueue + * @perf store frequency table related information from ACPI table + * @max_freq max normal freq of cpu + * @boost_freq max boost freq of cpu + * @clock_scale clock scale to calculate cpu_data[cpu].udelay_val in boost mode + * @package_id package id of core + * @shift clock shift to calculate cpu_data[cpu].udelay_val in boost mode + * @update_util_set if callback has been set for cpufreq_update_util() + * @load current load of the core + * @last_freq_update_time last freq update time + * @freq_update_delay_ns min interval of freq update, which is + * transition_latency configured in ACPI table + * + * following elements are used to calculate load of the core + * @prev_update_time + * @prev_cpu_idle + * @prev_load + * @sampling_rate + * + */ +struct core_data { + bool in_boost; + int cpu; + struct update_util_data update_util; + struct package_data *package; + bool work_in_progress; + struct irq_work irq_work; + struct kthread_work work; + struct acpi_processor_performance *perf; + unsigned int normal_max_freq; + unsigned int *boost_freq; + unsigned int *clock_scale; + unsigned int package_id; + unsigned int *shift; + bool update_util_set; + unsigned long long load; + + u64 last_freq_update_time; + s64 freq_update_delay_ns; + u64 prev_update_time; + u64 prev_cpu_idle; + u32 prev_load; + u32 sampling_rate; +}; + +struct package_data { + int boost_cores; + int max_boost_cores; + int nr_cores; + char in_boost; + int nr_full_load_cores; + struct core_data core[MAX_CORES_PER_PACKAGE]; +} all_package_data[MAX_PACKAGES]; + +static bool boost_supported(void) +{ + return loongson3_cpufreq_driver.set_boost; +} + +/* + * Check if target_freq is a boost freq + * + * target_freq must be a freq in freq table when + * calling the function. + */ +static int boost_level(struct acpi_processor_performance *perf, unsigned int target_freq) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + } + return 0; +} + +#ifdef CONFIG_SMP +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + struct ce_update_data __maybe_unused ce_data; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->policy->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, freqs->new); + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + cpufreq_scale(loops_per_jiffy, boost_freqs[cur_boost_level] * 1000, + freqs->new)) / core->shift[cur_boost_level]); + } else { + lpj_fine = + cpufreq_scale(loops_per_jiffy, core->normal_max_freq * 1000, freqs->new); + } + } + + return 0; +} +#else +static int loongson3_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs; + struct clock_event_device __maybe_unused *cd; + struct core_data *core; + unsigned int __maybe_unused new_freq; + unsigned long cpu; + int cur_boost_level; + + if (val == CPUFREQ_POSTCHANGE) { + + freqs = (struct cpufreq_freqs *)data; + cpu = freqs->cpu; + core = get_core_data(cpu); + cur_boost_level = boost_level(core->perf, target_freq); + + if (cur_boost_level != 0) { + lpj_fine = (unsigned int) (((int64_t)core->clock_scale[cur_boost_level] * + loops_per_jiffy) / core->shift[cur_boost_level]); + } else { + lpj_fine = loops_per_jiffy; + } + } + + return 0; +} +#endif +static struct notifier_block loongson3_cpufreq_notifier_block = { + .notifier_call = loongson3_cpu_freq_notifier +}; + +static int cpufreq_perf_find_level(struct acpi_processor_performance *perf, + unsigned int target_freq, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control & LOONGSON_CONTROL_MASK; + } + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_freq == (perf->states[i].core_frequency * 1000)) + return perf->states[i].control; + } + } + return 0; +} + +static int cpufreq_perf_find_freq(struct acpi_processor_performance *perf, + unsigned int target_index, + unsigned int boost_level) +{ + int i; + + for (i = 0; i < perf->state_count; i++) { + if (boost_level) { + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) + if (target_index == (perf->states[i].control & LOONGSON_CONTROL_MASK)) + return perf->states[i].core_frequency; + } else { + if (!(perf->states[i].control & LOONGSON_BOOST_FREQ_MASK)) + if (target_index == perf->states[i].control) + return perf->states[i].core_frequency; + } + } + + return 0; +} + + +static inline struct core_data *get_core_data(int cpu) +{ + int package_id = cpu_data[cpu].package; + struct package_data *package = &all_package_data[package_id]; + int core_id = cpu_logical_map(cpu) % package->nr_cores; + + return &package->core[core_id]; +} + +static bool package_boost(struct package_data *package) +{ + int i; + int cur_full_load = 0; + +#if defined(CONFIG_CPU_HAS_LASX) + int lasx_enable_count = 0; + unsigned long lasx_num; + bool clear_lasx = false; +#endif + + int msa_enable_count = 0; + unsigned long msa_num; + bool clear_msa = false; + + for (i = 0; i < package->nr_cores; i++) { + +#if defined(CONFIG_CPU_HAS_LASX) + lasx_num = per_cpu(lasx_count, package->core[i].cpu); + + if (lasx_num) + lasx_enable_count++; + + if (lasx_num >= lasx_count_threshold) + clear_lasx = true; + + pr_debug("%s: lasx enabled, i %d, cpu %d, lasx_num %lu\n", + __func__, i, package->core[i].cpu, lasx_num); +#endif + msa_num = per_cpu(msa_count, package->core[i].cpu); + + if (msa_num) + msa_enable_count++; + + if (msa_num >= msa_count_threshold) + clear_msa = true; + + pr_debug("%s: msa enabled, i %d, cpu %d, msa_num %lu\n", + __func__, i, package->core[i].cpu, msa_num); + + if (package->core[i].prev_load >= load_threshold) + cur_full_load++; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (clear_lasx) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(lasx_count, package->core[i].cpu) = 0; + } +#endif + + if (clear_msa) { + for (i = 0; i < package->nr_cores; i++) + per_cpu(msa_count, package->core[i].cpu) = 0; + } + +#if defined(CONFIG_CPU_HAS_LASX) + if (lasx_enable_count > 1 + || (lasx_enable_count && package->nr_full_load_cores > 1) + || (lasx_enable_count && cur_full_load > 1)) { + return false; + } +#endif + + if (msa_enable_count > 1 + || (msa_enable_count && package->nr_full_load_cores > 1) + || (msa_enable_count && cur_full_load > 1)) { + return false; + } + + if (package->nr_full_load_cores && + package->nr_full_load_cores <= package->max_boost_cores) + return true; + + return false; +} + +/* + * check if the cpu can be boosted. + * + * call the function after load of cpu updated. + */ +static bool cpu_can_boost(int cpu) +{ + struct core_data *core = get_core_data(cpu); + struct package_data *package = core->package; + + if (package->boost_cores >= package->max_boost_cores) + return false; + if (core->load > BOOST_THRESHOLD) + return true; + + return false; +} + +static void do_set_freq_level(int cpu, int freq_level) +{ + uint32_t message; + uint32_t val; + + message = (0 << 31) | (VOLTAGE_COMMAND << 24) + | ((uint32_t)freq_level << 4) + | (cpu & CPU_ID_FIELD); + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); +} + +static int wait_for_ready_timeout(int64_t timeout) +{ + int ret; + struct timespec64 prev_ts; + struct timespec64 curr_ts; + ktime_t delay = ktime_set(0, 100); + + ktime_get_ts64(&prev_ts); + ktime_get_ts64(&curr_ts); + + ret = -EPERM; + while (((curr_ts.tv_sec - prev_ts.tv_sec) * 1000000000 + (curr_ts.tv_nsec - prev_ts.tv_nsec)) < timeout) { + ktime_get_ts64(&curr_ts); + + if (iocsr_read32(0x51c) & COMPLETE_STATUS) { + ret = 0; + break; + } + + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_hrtimeout(&delay, HRTIMER_MODE_REL); + } + return ret; +} + +/* Find closest freq to target in a table in ascending order */ +static int cpufreq_table_find_freq_ac(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + if (freq == target_freq) + return freq; + + if (freq < target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found below target_freq, return freq above target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (target_freq - table[best].frequency > freq - target_freq) + return freq; + + return best_freq; + } + + return best_freq; +} + +/* Find closest freq to target in a table in descending order */ +static int cpufreq_table_find_freq_dc(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + struct cpufreq_frequency_table *table = policy->freq_table; + struct cpufreq_frequency_table *pos; + unsigned int freq; + unsigned int best_freq = 0; + int idx, best = -1; + + cpufreq_for_each_valid_entry_idx(pos, table, idx) { + freq = pos->frequency; + + if (pos->driver_data != boost_level) + continue; + if (freq > policy->max || freq < policy->min) + continue; + + if (freq == target_freq) + return freq; + + if (freq > target_freq) { + best = idx; + best_freq = freq; + continue; + } + + /* No freq found above target_freq, return freq below target_freq */ + if (best == -1) + return freq; + + /* Choose the closest freq */ + if (table[best].frequency - target_freq > target_freq - freq) + return freq; + return best_freq; + } + + return best_freq; +} + +/* Works only on sorted freq-tables */ +static int cpufreq_table_find_freq(struct cpufreq_policy *policy, + unsigned int target_freq, + int boost_level) +{ + target_freq = clamp_val(target_freq, policy->min, policy->max); + if (policy->freq_table_sorted == CPUFREQ_TABLE_SORTED_ASCENDING) + return cpufreq_table_find_freq_ac(policy, target_freq, boost_level); + else + return cpufreq_table_find_freq_dc(policy, target_freq, boost_level); +} + +static void transition_end(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs, bool failed) +{ + if (unlikely(!policy->transition_ongoing)) + return; + cpufreq_freq_transition_end(policy, freqs, failed); +} +static void transition_begin(struct cpufreq_policy *policy, + struct cpufreq_freqs *freqs) +{ + if (unlikely(policy->transition_ongoing)) + cpufreq_freq_transition_end(policy, freqs, true); + + cpufreq_freq_transition_begin(policy, freqs); +} + +static void update_core_boost_info(struct core_data *core, bool boost_set) +{ + core->in_boost = boost_set; + if (boost_set) + core->package->boost_cores++; + else + core->package->boost_cores--; +} + +static unsigned int cores_freq_trans_notify(struct package_data *package, + bool before_trans, + bool trans_failed, + int find_level, + int find_freq, + unsigned int skip_cpumask) +{ + int i; + struct cpufreq_policy *policy; + struct cpufreq_freqs freqs; + unsigned int cores_level = 0; + unsigned int core_level; + + for (i = 0; i < package->nr_cores; i++) { + struct core_data *core = &package->core[i]; + + policy = cpufreq_cpu_get_raw(core->cpu); + if (((1 << i) & skip_cpumask) || !policy) + continue; + freqs.old = policy->cur; + freqs.flags = 0; + + /* find level from normal levels */ + core_level = cpufreq_perf_find_level(core->perf, policy->cur, find_level); + if (!core_level) { + pr_debug("cpu%d policy->cur=%d find_level=%d freq=%d skip_cpumask=%x \n", + policy->cpu, policy->cur, + find_level, find_freq, skip_cpumask); + } + freqs.new = cpufreq_perf_find_freq(core->perf, core_level, find_freq) * 1000; + if (!freqs.new) + pr_debug("%s: find freq error\n", __func__); + + pr_debug("%s: cpu %d, old freq %d, new freq %d, find_level %d, find_freq %d\n", + __func__, policy->cpu, freqs.old, freqs.new, find_level, find_freq); + cores_level |= (core_level << (i << 2)); + + if (before_trans) + transition_begin(policy, &freqs); + else + transition_end(policy, &freqs, trans_failed); + } + return cores_level; +} +static int loongson3_set_freq(struct core_data *core, unsigned long freq, int boost_level) +{ + int ret = 0; + int freq_level; + int phy_cpu; + int target_freq; + struct cpufreq_freqs freqs; + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(core->cpu); + + if (!policy) + return -EINVAL; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + phy_cpu = cpu_logical_map(core->cpu); + target_freq = cpufreq_table_find_freq(policy, freq, boost_level); + if (!target_freq) + return -1; + if (target_freq == policy->cur) + return -1; + + freqs.flags = 0; + freqs.old = policy->cur; + freqs.new = target_freq; + freq_level = cpufreq_perf_find_level(core->perf, target_freq, boost_level); + if (!freq_level) { + pr_debug("%s: cpu%d freq=%lu targetfreq=%d boost_level=%d find level error\n", + __func__, core->cpu, freq, target_freq, boost_level); + } + + transition_begin(policy, &freqs); + do_set_freq_level(phy_cpu, freq_level); + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + transition_end(policy, &freqs, !!ret); + + return ret; +} + +int loongson3_set_mode(int mode, int freq_level) +{ + uint32_t val; + int ret = 0; + uint32_t message; + + ret = wait_for_ready_timeout(MAX_READY_TIMEOUT); + if (ret) + return ret; + + message = mode | (VOLTAGE_COMMAND << 24) | freq_level; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + val |= 1 << 10; + iocsr_write32(val, 0x420); + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +enum freq_adjust_action { + FAA_NORMAL, + FAA_N2B, + FAA_B2N, + FAA_BOOST, +}; + +static int faa_normal(struct cpufreq_policy *policy, int load) +{ + int ret; + unsigned int freq_next, min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + + if (!core) + return -1; + + min_f = policy->min; + max_f = policy->max; + freq_next = min_f + load * (max_f - min_f) / 100; + ret = loongson3_set_freq(core, freq_next, 0); + return ret; +} + +static void handle_boost_cores(struct core_data *core, struct package_data *package, + unsigned long target_freq, bool skip_update_and_notify, bool update_core, bool inc_boost) +{ + int boost_level; + int find_level; + int find_freq; + int ret; + int inc_core = inc_boost ? 1 : -1; + + if (boost_gears == 1) { + find_level = 0; + boost_level = boost_gears; + } else { + find_level = package->boost_cores; + if (update_core) + boost_level = package->boost_cores + inc_core; + else + boost_level = package->boost_cores; + } + find_freq = boost_level; + ret = loongson3_set_freq(core, target_freq, boost_level); + if (ret) + return; + + if (skip_update_and_notify) { + if (update_core) + update_core_boost_info(core, inc_boost); + return; + } + + if (boost_gears != 1) { + cores_freq_trans_notify(package, true, false, + find_level, find_freq, 1 << core->cpu); + cores_freq_trans_notify(package, false, false, + find_level, find_freq, 1 << core->cpu); + } + if (update_core) + update_core_boost_info(core, inc_boost); +} + +static void faa_boost(struct cpufreq_policy *policy, int load) +{ + unsigned int min_f, max_f; + struct core_data *core = get_core_data(policy->cpu); + struct package_data *package = core->package; + unsigned long target_freq; + + /* boost cores form n to n + 1 */ + if (core->load > BOOST_THRESHOLD) { + if (package->boost_cores < package->max_boost_cores + && !core->in_boost) { + if (boost_gears == 1) { + target_freq = policy->max; + } else { + target_freq = cpufreq_table_find_freq(policy, policy->max, + package->boost_cores + 1); + if (!target_freq) { + pr_debug("%s: find freq error ,boost_level %d, cur freq %d\n", + __func__, package->boost_cores, policy->max); + } + } + handle_boost_cores(core, package, target_freq, false, true, true); + } + } else { + /* 1. core not in boost, level up but not change pll + * 2. core in boost, boost cores from n to n - 1 + */ + min_f = policy->min; + max_f = policy->max; + target_freq = min_f + load * (max_f - min_f) / 100; + handle_boost_cores(core, package, target_freq, !core->in_boost, core->in_boost, false); + } + + +} + +static void get_boost_cores(struct package_data *package, int *boost_cores, int *boost_count) +{ + struct core_data *core; + struct cpufreq_policy *policy; + int i; + + /* count boost cores */ + for (i = 0; i < package->nr_cores; i++) { + core = &package->core[i]; + policy = cpufreq_cpu_get_raw(core->cpu); + if (!policy) + continue; + + if (cpu_can_boost(core->cpu)) { + if (boost_cores) + *boost_cores |= (1 << i); + + (*boost_count)++; + } + } +} + +static void faa_n2b(struct package_data *package, struct core_data *core) +{ + int boost_cores = 0; + int boost_count = 0; + int freq_level; + int i; + + get_boost_cores(package, &boost_cores, &boost_count); + + if (boost_gears == 1) + boost_count = 1; + + freq_level = cores_freq_trans_notify(package, true, false, + 0, boost_count, 0); + if (!loongson3_set_mode(BOOST_MODE, freq_level)) { + cores_freq_trans_notify(package, false, false, + 0, boost_count, 0); + package->in_boost = true; + for (i = 0; i < package->nr_cores; i++) { + if (boost_cores & (1 << i)) + update_core_boost_info(&package->core[i], true); + } + } else + cores_freq_trans_notify(package, false, true, + 0, boost_count, 0); +} + +static void faa_b2n(struct package_data *package) +{ + int i; + int boost_count = package->boost_cores; + + if (boost_gears == 1) + boost_count = 1; + + cores_freq_trans_notify(package, true, false, + boost_count, 0, 0); + if (!loongson3_set_mode(NORMAL_MODE, 0)) { + cores_freq_trans_notify(package, false, false, + boost_count, 0, 0); + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].in_boost) + update_core_boost_info(&package->core[i], false); + } + package->in_boost = false; + } else + cores_freq_trans_notify(package, false, true, + boost_count, 0, 0); +} + + +unsigned int load_update(struct core_data *core) +{ + int i; + u64 update_time, cur_idle_time; + unsigned int idle_time, time_elapsed; + unsigned int load = 0; + struct package_data *package = core->package; + + cur_idle_time = get_cpu_idle_time(core->cpu, &update_time, true); + + time_elapsed = update_time - core->prev_update_time; + core->prev_update_time = update_time; + + idle_time = cur_idle_time - core->prev_cpu_idle; + core->prev_cpu_idle = cur_idle_time; + + if (unlikely(!time_elapsed)) { + /* + * That can only happen when this function is called + * twice in a row with a very short interval between the + * calls, so the previous load value can be used then. + */ + load = core->prev_load; + } else if (unlikely((int)idle_time > 2 * core->sampling_rate && + core->prev_load)) { + + load = core->prev_load; + core->prev_load = 0; + } else { + if (time_elapsed >= idle_time) + load = 100 * (time_elapsed - idle_time) / time_elapsed; + else + load = (int)idle_time < 0 ? 100 : 0; + core->prev_load = load; + } + + package->nr_full_load_cores = 0; + for (i = 0; i < package->nr_cores; i++) { + if (package->core[i].load > BOOST_THRESHOLD) + package->nr_full_load_cores++; + } + + return load; +} + +static bool cpufreq_should_update_freq(struct core_data *core, u64 time) +{ + s64 delta_ns; + + delta_ns = time - core->last_freq_update_time; + return delta_ns >= core->freq_update_delay_ns; +} + +static void cpufreq_update(struct cpufreq_policy *policy) +{ + int action; + struct core_data *core; + struct package_data *package; + unsigned long int load; + bool should_be_boost = 0; + + core = get_core_data(policy->cpu); + package = core->package; + + mutex_lock(&boost_mutex[core->package_id]); + + if (!core->update_util_set) { + mutex_unlock(&boost_mutex[core->package_id]); + return; + } + + load = load_update(core); + core->load = (u64)load + ((core->load * FACTOR) >> 32); + + if (cpufreq_boost_enabled()) { + should_be_boost = package_boost(package); + } else { + if (package->in_boost) + should_be_boost = false; + } + + action = (package->in_boost << 1) | should_be_boost; + switch (action) { + case FAA_NORMAL: + faa_normal(policy, load); + break; + case FAA_B2N: + faa_b2n(package); + break; + case FAA_N2B: + faa_n2b(package, core); + break; + case FAA_BOOST: + faa_boost(policy, load); + break; + } + mutex_unlock(&boost_mutex[core->package_id]); +} + +static void set_max_within_limits(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + /* + * policy->max <= cpu->pstate.max_freq indecates that + * the boost is disabled, so max freq is in normal range + * + * Skip performance policy with boost enabled!!! + * + */ + if (policy->max <= (core->normal_max_freq * 1000)) { + mutex_lock(&boost_mutex[core->package_id]); + if (!loongson3_set_freq(core, policy->max, 0)) + pr_debug("Set cpu %d to performance mode under normal range.\n", + policy->cpu); + mutex_unlock(&boost_mutex[core->package_id]); + } +} + +static void clear_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (!core->update_util_set) + return; + + cpufreq_remove_update_util_hook(cpu); + core->update_util_set = false; + synchronize_rcu(); +} + +static void update_util_handler(struct update_util_data *data, u64 time, + unsigned int flags) +{ + struct core_data *core = container_of(data, struct core_data, update_util); + + if (!cpufreq_should_update_freq(core, time)) + return; + if (!core->work_in_progress) { + core->last_freq_update_time = time; + core->work_in_progress = true; + irq_work_queue(&core->irq_work); + } +} +static void set_update_util_hook(unsigned int cpu) +{ + struct core_data *core = get_core_data(cpu); + + if (core->update_util_set) + return; + + cpufreq_add_update_util_hook(cpu, &core->update_util, + update_util_handler); + core->update_util_set = true; +} +static int loongson3_cpufreq_set_policy(struct cpufreq_policy *policy) +{ + if (!policy->cpuinfo.max_freq) + return -ENODEV; + + if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { + clear_update_util_hook(policy->cpu); + set_max_within_limits(policy); + } else { + set_update_util_hook(policy->cpu); + } + + return 0; +} + +static int loongson3_cpufreq_verify_policy(struct cpufreq_policy_data *policy) +{ + cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, policy->cpuinfo.max_freq); + + return 0; +} + +static void set_boost_freq(bool has) +{ + cpufreq_has_boost_freq = has; +} + +static bool has_boost_freq(void) +{ + return cpufreq_has_boost_freq; +} + +static int compute_scale(int *shift, int dividor, int dividee) +{ + int i; + int result = 0; + int remainder = 0; + int scale_resolution = 8; + + result = dividor / dividee; + remainder = (dividor % dividee) * 10; + + for (i = 0; i < scale_resolution; i++) { + result = result * 10 + remainder / dividee; + remainder = (remainder % dividee) * 10; + *shift *= 10; + } + + return result; +} + +static void cpufreq_work_handler(struct kthread_work *work) +{ + struct core_data *core; + struct cpufreq_policy *policy; + + core = container_of(work, struct core_data, work); + policy = cpufreq_cpu_get_raw(core->cpu); + + if (policy) { + cpufreq_update(policy); + core->work_in_progress = false; + } +} + +static void cpufreq_irq_work(struct irq_work *irq_work) +{ + struct core_data *core = container_of(irq_work, struct core_data, irq_work); + + kthread_queue_work(&cpufreq_worker, &core->work); +} + +static void cpufreq_kthread_stop(void) +{ + kthread_flush_worker(&cpufreq_worker); + kthread_stop(cpufreq_thread); +} +static int cpufreq_kthread_create(void) +{ + struct sched_attr attr = { + .size = sizeof(struct sched_attr), + .sched_policy = SCHED_DEADLINE, + .sched_flags = 0x10000000, + .sched_nice = 0, + .sched_priority = 0, + .sched_runtime = 1000000, + .sched_deadline = 10000000, + .sched_period = 10000000, + }; + int ret; + + kthread_init_worker(&cpufreq_worker); + cpufreq_thread = kthread_create(kthread_worker_fn, &cpufreq_worker, "lsfrq:%d", 0); + if (IS_ERR(cpufreq_thread)) + return PTR_ERR(cpufreq_thread); + + ret = sched_setattr_nocheck(cpufreq_thread, &attr); + if (ret) { + kthread_stop(cpufreq_thread); + pr_warn("%s: failed to set SCHED_DEADLINE\n", __func__); + return ret; + } + + wake_up_process(cpufreq_thread); + + return 0; +} + +static int init_acpi(struct acpi_processor_performance *perf) +{ + int result = 0; + int i; + + perf->shared_type = 0; + perf->state_count = (max_freq_level - min_freq_level + 1) * (boost_gears + 1); + + perf->states = + kmalloc_array(perf->state_count, + sizeof(struct acpi_processor_px), + GFP_KERNEL); + + if (!perf->states) { + result = -ENOMEM; + return result; + } + + for (i = 0; i < perf->state_count; i++) { + perf->states[i].power = 0x3A98; + perf->states[i].transition_latency = 10000; + perf->states[i].bus_master_latency = 10000; + perf->states[i].status = (RESERVED_FREQ + i / (boost_gears + 1)); + perf->states[i].control = (RESERVED_FREQ + i / (boost_gears + 1)); + + switch (i % (boost_gears + 1)) { + case 0: + perf->states[i].core_frequency = (cpu_clock_freq / 1000000) * (8 - i / (boost_gears + 1)) / 8; + break; + case 1: + case 2: + case 3: + case 4: + perf->states[i].core_frequency = + boost_freqs[i % (boost_gears + 1)] * (8 - i / (boost_gears + 1)) / 8; + perf->states[i].control |= ((i % (boost_gears + 1)) << 8); + break; + default: + pr_info("%s: i %d freq table error\n", __func__, i); + } + } + + return result; +} + +static int loongson3_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + unsigned int i; + struct acpi_processor_performance *perf; + struct cpufreq_frequency_table *freq_table; + struct core_data *core; + int package_id; + unsigned int cpu = policy->cpu; + unsigned int result = 0; + + perf = per_cpu_ptr(acpi_perf_data, cpu); + package_id = cpu_data[cpu].package; + core = get_core_data(cpu); + all_package_data[package_id].nr_cores = loongson_sysconf.cores_per_package; + all_package_data[package_id].max_boost_cores = max_boost_cores; + core->normal_max_freq = 0; + all_package_data[package_id].nr_full_load_cores = 0; + core->cpu = cpu; + core->work_in_progress = false; + core->last_freq_update_time = 0; + core->perf = perf; + core->package_id = package_id; + core->package = &all_package_data[package_id]; + + core->boost_freq = kmalloc_array(boost_gears + 1, sizeof(typeof(core->boost_freq)), GFP_KERNEL); + core->clock_scale = kmalloc_array(boost_gears + 1, sizeof(typeof(core->clock_scale)), GFP_KERNEL); + core->shift = kmalloc_array(boost_gears + 1, sizeof(typeof(core->shift)), GFP_KERNEL); + + for (i = 0; i < boost_gears + 1; i++) { + core->boost_freq[i] = boost_freqs[i]; + core->shift[i] = 1; + } + + if (!acpi_disabled) + result = acpi_processor_register_performance(perf, cpu); + else { + result = init_acpi(perf); + policy->shared_type = perf->shared_type; + } + + if (result) { + pr_info("CPU%d acpi_processor_register_performance failed.\n", cpu); + return result; + } + + for (i = 0; i < MAX_PACKAGES; i++) + mutex_init(&boost_mutex[i]); + + /* capability check */ + if (perf->state_count <= 1) { + pr_debug("No P-States\n"); + result = -ENODEV; + goto err_unreg; + } + + freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), + GFP_KERNEL); + if (!freq_table) { + result = -ENOMEM; + goto err_unreg; + } + + /* detect transition latency */ + policy->cpuinfo.transition_latency = 0; + for (i = 0; i < perf->state_count; i++) { + if ((perf->states[i].transition_latency * 1000) > + policy->cpuinfo.transition_latency) + policy->cpuinfo.transition_latency = + perf->states[i].transition_latency * 1000; + if (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) { + set_boost_freq(true); + } else { + if (perf->states[i].core_frequency > core->normal_max_freq) + core->normal_max_freq = perf->states[i].core_frequency; + } + } + + core->freq_update_delay_ns = policy->cpuinfo.transition_latency; + + for (i = 0; i < boost_gears + 1; i++) { + core->clock_scale[i] = compute_scale(&core->shift[i], boost_freqs[i], core->normal_max_freq); + pr_debug("%s: boost_freqs[%d] %d, normal_max_freq %d, scale %d, shift %d\n", + __func__, i, boost_freqs[i], core->normal_max_freq, + core->clock_scale[i], core->shift[i]); + } + + /* table init */ + for (i = 0; i < perf->state_count; i++) { + freq_table[i].driver_data = (perf->states[i].control & LOONGSON_BOOST_FREQ_MASK) >> 8; + if (freq_table[i].driver_data) + freq_table[i].flags |= CPUFREQ_BOOST_FREQ; + freq_table[i].frequency = + perf->states[i].core_frequency * 1000; + } + freq_table[i].frequency = CPUFREQ_TABLE_END; + policy->freq_table = freq_table; + perf->state = 0; + + /* add boost-attr if supported. */ + if (has_boost_freq() && boost_supported()) + loongson3_cpufreq_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; + + pr_info("CPU%u - ACPI performance management activated.\n", cpu); + for (i = 0; i < perf->state_count; i++) + pr_debug(" %cP%d: %d MHz, %d mW, %d uS %d level\n", + (i == perf->state ? '*' : ' '), i, + (u32) perf->states[i].core_frequency, + (u32) perf->states[i].power, + (u32) perf->states[i].transition_latency, + (u32) perf->states[i].control); + + /* + * the first call to ->target() should result in us actually + * writing something to the appropriate registers. + */ + policy->fast_switch_possible = false; + + init_irq_work(&core->irq_work, cpufreq_irq_work); + kthread_init_work(&core->work, cpufreq_work_handler); + core->sampling_rate = max_t(unsigned int, + CPUFREQ_SAMPLING_INTERVAL, + cpufreq_policy_transition_delay_us(policy)); + return result; + +err_unreg: + if (!acpi_disabled) + acpi_processor_unregister_performance(cpu); + + return result; +} + +static int loongson3_cpufreq_cpu_exit(struct cpufreq_policy *policy) +{ + struct core_data *core = get_core_data(policy->cpu); + + clear_update_util_hook(policy->cpu); + irq_work_sync(&core->irq_work); + kthread_cancel_work_sync(&core->work); + core->work_in_progress = false; + policy->fast_switch_possible = false; + if (!acpi_disabled) + acpi_processor_unregister_performance(policy->cpu); + kfree(policy->freq_table); + kfree(core->boost_freq); + kfree(core->clock_scale); + kfree(core->shift); + return 0; +} + +static struct freq_attr *loongson3_cpufreq_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if supported */ + NULL, +}; + +static struct cpufreq_driver loongson3_cpufreq_driver = { + .verify = loongson3_cpufreq_verify_policy, + .setpolicy = loongson3_cpufreq_set_policy, + .init = loongson3_cpufreq_cpu_init, + .exit = loongson3_cpufreq_cpu_exit, + .name = "acpi-cpufreq", + .attr = loongson3_cpufreq_attr, +}; + +static void free_acpi_perf_data(void) +{ + unsigned int i; + + /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ + for_each_possible_cpu(i) + free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) + ->shared_cpu_map); + free_percpu(acpi_perf_data); +} + +static int __init loongson3_cpufreq_early_init(void) +{ + unsigned int i; + + acpi_perf_data = alloc_percpu(struct acpi_processor_performance); + if (!acpi_perf_data) + return -ENOMEM; + for_each_possible_cpu(i) { + if (!zalloc_cpumask_var_node( + &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, + GFP_KERNEL, cpu_to_node(i))) { + free_acpi_perf_data(); + return -ENOMEM; + } + } + return 0; +} + +static bool support_boost(void) +{ + int message; + int val; + int i; + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + message = DVFS_INFO << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: not support boost\n", __func__); + return false; + } + + val = iocsr_read32(0x51c); + + min_freq_level = val & DVFS_INFO_MIN_FREQ; + max_freq_level = (val & DVFS_INFO_MAX_FREQ) >> 4; + + if ((val & DVFS_INFO_BOOST_CORE_FREQ) && ((val & DVFS_INFO_BOOST_CORES) >> 20)) { + max_boost_cores = (val & DVFS_INFO_BOOST_CORES) >> 20; + max_boost_freq = ((val & DVFS_INFO_BOOST_CORE_FREQ) >> 8) * 25; + max_upper_index = (val & DVFS_INFO_NORMAL_CORE_UPPER_LIMIT) >> 16; + } else { + boost_gears = 0; + return false; + } + + /* Read boost levels */ + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) + return false; + + /* for version 1, single boost freq boost */ + message = DVFS_INFO_BOOST_LEVEL << 24; + iocsr_write32(message, 0x51c); + val = iocsr_read32(0x420); + + val |= 1 << 10; + iocsr_write32(val, 0x420); + + if (wait_for_ready_timeout(MAX_READY_TIMEOUT)) { + pr_info("%s: single boost mode\n", __func__); + boost_gears = 1; + boost_freqs[0] = calc_const_freq() / 1000000; + for (i = 1; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq; + + /* set 0x51c complete */ + iocsr_write32(COMPLETE_STATUS, 0x51c); + } else { + pr_info("%s: multi boost mode\n", __func__); + boost_gears = max_boost_cores; + val = iocsr_read32(0x51c); + + boost_freqs[0] = calc_const_freq() / 1000000; + boost_freqs[1] = max_boost_freq; + + if (boost_gears > 1) { + for (i = 2; i < boost_gears + 1; i++) + boost_freqs[i] = max_boost_freq - (((val >> ((i-2) * 4)) & 0xf) * FREQ_STEP); + } + } + + pr_info("%s: min_freq_level %d, max_freq_level %d, max_boost_cores %d, boost_gears %d\n", + __func__, min_freq_level, max_freq_level, max_boost_cores, boost_gears); + + return true; +} + +static int cpufreq_table_cpuinfo(struct cpufreq_policy *policy, + struct cpufreq_frequency_table *table, + bool boost) +{ + struct cpufreq_frequency_table *pos; + unsigned int min_freq = ~0; + unsigned int max_freq = 0; + unsigned int freq; + + cpufreq_for_each_valid_entry(pos, table) { + freq = pos->frequency; + + if (!boost) { + if (pos->driver_data) + continue; + } + if (freq < min_freq) + min_freq = freq; + if (freq > max_freq) + max_freq = freq; + } + + policy->min = policy->cpuinfo.min_freq = min_freq; + policy->max = policy->cpuinfo.max_freq = max_freq; + if (policy->min == ~0) + return -EINVAL; + else + return 0; +} + +static int set_boost(struct cpufreq_policy *policy, int state) +{ + if (!has_boost_freq()) + return -EINVAL; + + if (!policy) + return -EINVAL; + + if (!state) { + if (policy->policy == CPUFREQ_POLICY_POWERSAVE) + cpufreq_update(policy); + } + if (!policy->freq_table) + return -EINVAL; + + cpufreq_table_cpuinfo(policy, policy->freq_table, state); + down_write(&policy->rwsem); + up_write(&policy->rwsem); + + if (!state) + set_max_within_limits(policy); + + return 0; +} + +static void __init loongson3_cpufreq_boost_init(void) +{ + if (!support_boost()) { + pr_info("Boost capabilities not present in the processor\n"); + return; + } + + loongson3_cpufreq_driver.set_boost = set_boost; +} + +static int cpufreq_supported_detect(void) +{ + return wait_for_ready_timeout(MAX_READY_TIMEOUT); +} + +static int __init loongson3_cpufreq_init(void) +{ + int ret; + + if (!cpu_has_csr || !cpu_has_scalefreq) + return -ENODEV; + + /* don't keep reloading if cpufreq_driver exists */ + if (cpufreq_get_current_driver()) + return -EEXIST; + + if (cpufreq_supported_detect()) { + pr_info("%s failed!\n", __func__); + return -ENODEV; + } + + ret = loongson3_cpufreq_early_init(); + if (ret) + return ret; + loongson3_cpufreq_boost_init(); + + cpufreq_register_notifier(&loongson3_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + ret = cpufreq_register_driver(&loongson3_cpufreq_driver); + cpufreq_kthread_create(); + if (ret) + free_acpi_perf_data(); + + return ret; +} + +static void __exit loongson3_cpufreq_exit(void) +{ + cpufreq_unregister_driver(&loongson3_cpufreq_driver); + free_acpi_perf_data(); + cpufreq_kthread_stop(); +} + +late_initcall(loongson3_cpufreq_init); +module_exit(loongson3_cpufreq_exit); + +static const struct acpi_device_id processor_device_ids[] = { + {ACPI_PROCESSOR_OBJECT_HID, }, + {ACPI_PROCESSOR_DEVICE_HID, }, + {}, +}; +MODULE_DEVICE_TABLE(acpi, processor_device_ids); + +MODULE_ALIAS("acpi"); diff --git a/drivers/cpufreq/sw64_cpufreq.c b/drivers/cpufreq/sw64_cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..f4bf5f3cc550d9ce0788707c2f8cb7eef11d77b2 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/arch/sw/kernel/setup.c + * + * Copyright (C) 1995 Linus Torvalds + */ + +/* + * Cpufreq driver for the sw64 processors + * + */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include /* set_cpus_allowed() */ +#include +#include +#include + +#include +#include +#include + +static uint nowait; + +static struct clk *cpuclk; + + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data); + +static struct notifier_block sw64_cpufreq_notifier_block = { + .notifier_call = sw64_cpu_freq_notifier +}; + +static int sw64_cpu_freq_notifier(struct notifier_block *nb, + unsigned long val, void *data) +{ + struct cpufreq_freqs *freqs = (struct cpufreq_freqs *)data; + unsigned long cpu = freqs->policy->cpu; + + if (val == CPUFREQ_POSTCHANGE) + sw64_update_clockevents(cpu, freqs->new * 1000); + + return 0; +} + +static unsigned int sw64_cpufreq_get(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + + if (!policy || IS_ERR(policy->clk)) { + pr_err("%s: No %s associated to cpu: %d\n", + __func__, policy ? "clk" : "policy", cpu); + return 0; + } + + return __sw64_cpufreq_get(policy); +} + +/* + * Here we notify other drivers of the proposed change and the final change. + */ +static int sw64_cpufreq_target(struct cpufreq_policy *policy, + unsigned int index) +{ + unsigned int cpu = policy->cpu; + + if (!cpu_online(cpu)) + return -ENODEV; + + /* setting the cpu frequency */ + sw64_set_rate(index); + update_cpu_freq(freq_table[index].frequency); + + return 0; +} + +static int sw64_cpufreq_cpu_init(struct cpufreq_policy *policy) +{ + cpuclk = sw64_clk_get(NULL, "cpu_clk"); + if (IS_ERR(cpuclk)) { + pr_err("couldn't get CPU clk\n"); + return PTR_ERR(cpuclk); + } + + policy->clk = cpuclk; + + cpufreq_generic_init(policy, freq_table, 0); + + return 0; +} + +static int sw64_cpufreq_verify(struct cpufreq_policy_data *policy) +{ + return cpufreq_frequency_table_verify(policy, freq_table); +} + +static int sw64_cpufreq_exit(struct cpufreq_policy *policy) +{ + return 0; +} + +static struct freq_attr *sw64_table_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, NULL, +}; + +static struct cpufreq_driver sw64_cpufreq_driver = { + .name = "sw64", + .init = sw64_cpufreq_cpu_init, + .verify = sw64_cpufreq_verify, + .target_index = sw64_cpufreq_target, + .get = sw64_cpufreq_get, + .exit = sw64_cpufreq_exit, + .attr = sw64_table_attr, +}; + +static const struct platform_device_id platform_device_ids[] = { + { + .name = "sw64_cpufreq", + }, + {} +}; + +MODULE_DEVICE_TABLE(platform, platform_device_ids); + +static struct platform_driver platform_driver = { + .driver = { + .name = "sw64_cpufreq", + }, + .id_table = platform_device_ids, +}; + + +static int __init cpufreq_init(void) +{ + int ret; + + if (is_in_guest()) { + pr_warn("Now sw_64 CPUFreq does not support virtual machines\n"); + return -ENODEV; + } + + /* Register platform stuff */ + ret = platform_driver_register(&platform_driver); + if (ret) + return ret; + + pr_info("SW-64 CPU frequency driver\n"); + + cpufreq_register_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + return cpufreq_register_driver(&sw64_cpufreq_driver); +} + +static void __exit cpufreq_exit(void) +{ + cpufreq_unregister_driver(&sw64_cpufreq_driver); + cpufreq_unregister_notifier(&sw64_cpufreq_notifier_block, + CPUFREQ_TRANSITION_NOTIFIER); + + platform_driver_unregister(&platform_driver); +} + +module_init(cpufreq_init); +module_exit(cpufreq_exit); + +module_param(nowait, uint, 0644); +MODULE_PARM_DESC(nowait, "Disable SW-64 specific wait"); + +MODULE_DESCRIPTION("cpufreq driver for sw64"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/sw64_cpufreq_debugfs.c b/drivers/cpufreq/sw64_cpufreq_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..bb4ae26bc22b4a561039433f4e291c9a87d78a32 --- /dev/null +++ b/drivers/cpufreq/sw64_cpufreq_debugfs.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include + +#include +#include +#include + +static int cpufreq_show(struct seq_file *m, void *v) +{ + int i; + u64 val; + int freq; + + val = sw64_io_read(0, CLK_CTL); + val = val >> CORE_PLL2_CFG_SHIFT; + + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (val == i) + seq_printf(m, "[%d] ", freq); + else + seq_printf(m, "%d ", freq); + } + seq_puts(m, "\n"); + + return 0; +} + +static int cpufreq_open(struct inode *inode, struct file *file) +{ + return single_open(file, cpufreq_show, NULL); +} + +static ssize_t cpufreq_set(struct file *file, const char __user *user_buf, + size_t len, loff_t *ppos) +{ + char buf[5]; + size_t size; + int cf, i, err, index, freq; + + size = min(sizeof(buf) - 1, len); + if (copy_from_user(buf, user_buf, size)) + return -EFAULT; + buf[size] = '\0'; + + err = kstrtoint(buf, 10, &cf); + if (err) + return err; + + index = -1; + for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { + if (freq_table[i].frequency != CPUFREQ_ENTRY_INVALID) + freq = freq_table[i].frequency; + else + freq = freq_table[i].driver_data; + + if (cf == freq) { + index = i; + break; + } + } + + if (index < 0) + return -EINVAL; + + sw64_set_rate(index); + update_cpu_freq(freq); + return len; +} + +static const struct file_operations set_cpufreq_fops = { + .open = cpufreq_open, + .read = seq_read, + .write = cpufreq_set, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init cpufreq_debugfs_init(void) +{ + struct dentry *cpufreq_entry; + + if (!sw64_debugfs_dir) + return -ENODEV; + + cpufreq_entry = debugfs_create_file("cpufreq", 0600, + sw64_debugfs_dir, NULL, + &set_cpufreq_fops); + if (!cpufreq_entry) + return -ENOMEM; + + return 0; +} +late_initcall(cpufreq_debugfs_init); diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index c761952f0dc6df92e1ee37ad5707bb7539c2cef3..b84a921d293f0b9f97379ba6d35514570b7ffa29 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -52,6 +52,45 @@ config CRYPTO_DEV_PADLOCK_SHA If unsure say M. The compiled module will be called padlock-sha. +config CRYPTO_DEV_ZHAOXIN + tristate "Support for Zhaoxin ACE" + depends on X86 && !UML + help + Some Zhaoxin processors come with an integrated crypto engine + (so called Zhaoxin ACE, Advanced Cryptography Engine) + that provides instructions for very fast cryptographic + operations with supported algorithms. + + The instructions are used only when the CPU supports them. + Otherwise software encryption is used. + +config CRYPTO_DEV_ZHAOXIN_AES + tristate "Zhaoxin ACE driver for AES algorithm" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_BLKCIPHER + select CRYPTO_AES + help + Use Zhaoxin ACE for AES algorithm. + + Available in Zhaoxin CPUs. + + If unsure say M. The compiled module will be + called zhaoxin-aes. + +config CRYPTO_DEV_ZHAOXIN_SHA + tristate "Zhaoxin ACE driver for SHA1 and SHA256 algorithms" + depends on CRYPTO_DEV_ZHAOXIN + select CRYPTO_HASH + select CRYPTO_SHA1 + select CRYPTO_SHA256 + help + Use Zhaoxin ACE for SHA1/SHA256 algorithms. + + Available in Zhaoxin processors. + + If unsure say M. The compiled module will be + called zhaoxin-sha. + config CRYPTO_DEV_GEODE tristate "Support for the Geode LX AES engine" depends on X86_32 && PCI @@ -796,5 +835,6 @@ config CRYPTO_DEV_SA2UL source "drivers/crypto/aspeed/Kconfig" source "drivers/crypto/starfive/Kconfig" +source "drivers/crypto/montage/Kconfig" endif # CRYPTO_HW diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index d859d6a5f3a45439c6e14bb19d6240e121c9ac62..5247d2bf09ce63d3d1ec8f293c4c3893b074db1e 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -31,6 +31,8 @@ obj-$(CONFIG_CRYPTO_DEV_OMAP_DES) += omap-des.o obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_AES) += padlock-aes.o obj-$(CONFIG_CRYPTO_DEV_PADLOCK_SHA) += padlock-sha.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_AES) += zhaoxin-aes.o +obj-$(CONFIG_CRYPTO_DEV_ZHAOXIN_SHA) += zhaoxin-sha.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/ obj-$(CONFIG_CRYPTO_DEV_QCOM_RNG) += qcom-rng.o @@ -51,3 +53,4 @@ obj-y += hisilicon/ obj-$(CONFIG_CRYPTO_DEV_AMLOGIC_GXL) += amlogic/ obj-y += intel/ obj-y += starfive/ +obj-y += montage/ diff --git a/drivers/crypto/ccp/Kconfig b/drivers/crypto/ccp/Kconfig index 32268e239bf15e49c9749e3fd5e9640ea09056a0..702b4c6761fd52f0fd2f8cdf847d7de43b45734d 100644 --- a/drivers/crypto/ccp/Kconfig +++ b/drivers/crypto/ccp/Kconfig @@ -46,6 +46,39 @@ config CRYPTO_DEV_SP_PSP along with software-based Trusted Execution Environment (TEE) to enable third-party trusted applications. +config HYGON_GM + bool "Hygon GM (sm2/sm3/sm4) Interface" + default y + depends on CRYPTO_DEV_CCP_CRYPTO && X86_64 + help + Hygon GM ccp driver + +config HYGON_PSP2CPU_CMD + bool "Hygon PSP2CPU Command Interface" + default y + depends on CRYPTO_DEV_SP_PSP + help + Hygon PSP2CPU Command Support + +config TDM_DEV_HYGON + bool "Hygon TDM Interface" + default y + depends on CRYPTO_DEV_CCP_DD + depends on HYGON_PSP2CPU_CMD + help + Hygon TDM driver + +config TDM_KERNEL_GUARD + tristate "Hygon TDM kernel guard" + default y + depends on TDM_DEV_HYGON + depends on CRYPTO_DEV_CCP_DD + depends on CRYPTO_SM3 + help + The key part of kernel is protected by TDM technology, SCT and IDT + are protected by default, and others are added later according to the + requirements. + config CRYPTO_DEV_CCP_DEBUGFS bool "Enable CCP Internals in DebugFS" default n diff --git a/drivers/crypto/ccp/Makefile b/drivers/crypto/ccp/Makefile index aa0ba2d17e1e2202bf6115e0774bf63a802115cb..088d53009824fa3fa50de08eb5b904d852874a59 100644 --- a/drivers/crypto/ccp/Makefile +++ b/drivers/crypto/ccp/Makefile @@ -12,8 +12,11 @@ ccp-$(CONFIG_CRYPTO_DEV_SP_PSP) += psp-dev.o \ sev-dev.o \ tee-dev.o \ platform-access.o \ - dbc.o + dbc.o \ + psp-ringbuf.o \ + csv-dev.o +ccp-$(CONFIG_TDM_DEV_HYGON) += tdm-dev.o obj-$(CONFIG_CRYPTO_DEV_CCP_CRYPTO) += ccp-crypto.o ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-aes.o \ @@ -23,3 +26,12 @@ ccp-crypto-objs := ccp-crypto-main.o \ ccp-crypto-des3.o \ ccp-crypto-rsa.o \ ccp-crypto-sha.o +obj-$(CONFIG_TDM_KERNEL_GUARD) += tdm-kernel-guard.o + +$(obj)/ccp_sm2_sign.asn1.o: $(obj)/ccp_sm2_sign.asn1.c $(obj)/ccp_sm2_sign.asn1.h +$(obj)/ccp-crypto-sm2-hygon.o: $(obj)/ccp_sm2_sign.asn1.h + +ccp-crypto-$(CONFIG_HYGON_GM) += ccp-crypto-sm2-hygon.o \ + ccp-crypto-sm3-hygon.o \ + ccp-crypto-sm4-hygon.o \ + ccp_sm2_sign.asn1.o diff --git a/drivers/crypto/ccp/ccp-crypto-main.c b/drivers/crypto/ccp/ccp-crypto-main.c index ecd58b38c46eed8fa79e9028f7e2be097f85463b..3d22fbabc815a5d452bcf782617ec58e1db81866 100644 --- a/drivers/crypto/ccp/ccp-crypto-main.c +++ b/drivers/crypto/ccp/ccp-crypto-main.c @@ -39,6 +39,10 @@ static unsigned int rsa_disable; module_param(rsa_disable, uint, 0444); MODULE_PARM_DESC(rsa_disable, "Disable use of RSA - any non-zero value"); +static unsigned int sm_disable; +module_param(sm_disable, uint, 0444); +MODULE_PARM_DESC(sm_disable, "Disable use of SM2/SM3/SM4 - any non-zero value"); + /* List heads for the supported algorithms */ static LIST_HEAD(hash_algs); static LIST_HEAD(skcipher_algs); @@ -322,6 +326,25 @@ static int ccp_register_algs(void) { int ret; +#ifdef CONFIG_HYGON_GM + if (!sm_disable && boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = ccp_register_sm2_hygon_algs(&akcipher_algs); + if (ret) + return ret; + + ret = ccp_register_sm3_hygon_algs(&hash_algs); + if (ret) + return ret; + + ret = ccp_register_sm4_hygon_algs(&skcipher_algs); + if (ret) + return ret; + + /* Return on hygon platform */ + return 0; + } +#endif + if (!aes_disable) { ret = ccp_register_aes_algs(&skcipher_algs); if (ret) diff --git a/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..25c9a49f7d2236059517a5b62f18375dc6a53f8b --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm2-hygon.c @@ -0,0 +1,1155 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM2 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" +#include "ccp_sm2_sign.asn1.h" + +static const u8 sm2_ecc_p[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, +}; + +static const u8 sm2_ecc_a[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, +}; + +static const u8 sm2_ecc_b[CCP_SM2_OPERAND_LEN] = { + 0x28, 0xE9, 0xFA, 0x9E, 0x9D, 0x9F, 0x5E, 0x34, + 0x4D, 0x5A, 0x9E, 0x4B, 0xCF, 0x65, 0x09, 0xA7, + 0xF3, 0x97, 0x89, 0xF5, 0x15, 0xAB, 0x8F, 0x92, + 0xDD, 0xBC, 0xBD, 0x41, 0x4D, 0x94, 0x0E, 0x93, +}; + +static const u8 sm2_ecc_n_sub_1[CCP_SM2_OPERAND_LEN] = { + 0xFF, 0xFF, 0xFF, 0xFE, 0xFF, 0xFF, 0xFF, 0xFF, + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, + 0x72, 0x03, 0xDF, 0x6B, 0x21, 0xC6, 0x05, 0x2B, + 0x53, 0xBB, 0xF4, 0x09, 0x39, 0xD5, 0x41, 0x22, +}; + +static const u8 sm2_ecc_gx[CCP_SM2_OPERAND_LEN] = { + 0x32, 0xC4, 0xAE, 0x2C, 0x1F, 0x19, 0x81, 0x19, + 0x5F, 0x99, 0x04, 0x46, 0x6A, 0x39, 0xC9, 0x94, + 0x8F, 0xE3, 0x0B, 0xBF, 0xF2, 0x66, 0x0B, 0xE1, + 0x71, 0x5A, 0x45, 0x89, 0x33, 0x4C, 0x74, 0xC7, +}; + +static const u8 sm2_ecc_gy[CCP_SM2_OPERAND_LEN] = { + 0xBC, 0x37, 0x36, 0xA2, 0xF4, 0xF6, 0x77, 0x9C, + 0x59, 0xBD, 0xCE, 0xE3, 0x6B, 0x69, 0x21, 0x53, + 0xD0, 0xA9, 0x87, 0x7C, 0xC6, 0x2A, 0x47, 0x40, + 0x02, 0xDF, 0x32, 0xE5, 0x21, 0x39, 0xF0, 0xA0, +}; + +struct ccp_sm2_verify_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* input data r */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* input data s */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_lp_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ + u8 operand_px[CCP_SM2_OPERAND_LEN]; /* x of public key */ + u8 operand_py[CCP_SM2_OPERAND_LEN]; /* y of public key */ +}; + +struct ccp_sm2_kg_src { + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_sign_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* compressed message */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* private key */ + u8 operand_k[CCP_SM2_OPERAND_LEN]; /* random number */ +}; + +struct ccp_sm2_mmul_src { + u8 operand_e[CCP_SM2_OPERAND_LEN]; /* mulplicand */ + u8 operand_d[CCP_SM2_OPERAND_LEN]; /* mulplicator */ +}; + +struct ccp_sm2_dst { + union { + u8 result[CCP_SM2_OPERAND_LEN]; + u32 status; + } u; + u8 result_r[CCP_SM2_OPERAND_LEN]; + u8 result_s[CCP_SM2_OPERAND_LEN]; + u8 result_t[CCP_SM2_OPERAND_LEN]; +}; + +struct sm2_signature_ctx { + const u8 *sig_r; + const u8 *sig_s; + size_t r_len; + size_t s_len; +}; + +int ccp_sm2_get_signature_r(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_r = value; + sig->r_len = vlen; + + if (!sig->sig_r) + return -ENOMEM; + + return 0; +} + +int ccp_sm2_get_signature_s(void *context, size_t hdrlen, unsigned char tag, + const void *value, size_t vlen) +{ + struct sm2_signature_ctx *sig = context; + + if (!value || !vlen) + return -EINVAL; + + sig->sig_s = value; + sig->s_len = vlen; + + if (!sig->sig_s) + return -ENOMEM; + + return 0; +} + +static bool ccp_sm2_is_zero(const u64 *data, u32 count) +{ + u32 i; + + for (i = 0; i < count; i++) { + if (data[i]) + return false; + } + + return true; +} + +/* Return: + * 1: a > b + * -1: a < b + * 0: a = b + */ +static int ccp_sm2_fp_cmp(const u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu; + u32 i; + + for (i = 0; i < count; i++) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + if (a_cpu > b_cpu) + return 1; + else if (a_cpu < b_cpu) + return -1; + } + + return 0; +} + +/* a = a + b */ +static void ccp_sm2_fp_add(u64 *a, const u64 *b, u32 count) +{ + u64 a_cpu, b_cpu, c_cpu, d_cpu; + u32 carry = 0; + s32 i; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + b_cpu = be64_to_cpu(b[i]); + c_cpu = a_cpu + b_cpu; + d_cpu = c_cpu + carry; + a[i] = cpu_to_be64(d_cpu); + + if (c_cpu < a_cpu) + carry = 1; + else if (carry && !d_cpu) + carry = 1; + else + carry = 0; + } +} + +/* a = -a */ +static void ccp_sm2_fp_neg(u64 *a, u32 count) +{ + u64 a_cpu, c_cpu; + s32 i; + + for (i = 0; i <= count - 1; i++) + a[i] = ~a[i]; + + for (i = count - 1; i >= 0; i--) { + a_cpu = be64_to_cpu(a[i]); + c_cpu = a_cpu + 1; + a[i] = cpu_to_be64(c_cpu); + + if (a_cpu < c_cpu) + break; + } +} + +/* a = a - b */ +static void ccp_sm2_fp_sub(u64 *a, u64 *b, u32 count) +{ + ccp_sm2_fp_neg(b, count); + ccp_sm2_fp_add(a, b, count); +} + +/* a and tmp must be 64B, b and c must be 32B + * a = b * c + */ +static void ccp_sm2_fp_mmul32(u8 *a, const u32 *b, const u32 *c, u8 *tmp) +{ + u64 b_cpu, c_cpu, m_cpu; + u32 rem_cpu; + u32 *base, *m_cur; + int i, j, iter; + + memset(a, 0, CCP_SM2_MMUL_LEN); + + iter = 7; + base = (u32 *)(tmp + CCP_SM2_MMUL_LEN - sizeof(u32)); + for (i = iter; i >= 0; i--) { + b_cpu = be32_to_cpu(b[i]); + memset(tmp, 0, CCP_SM2_MMUL_LEN); + + rem_cpu = 0; + m_cur = base; + for (j = iter; j >= 0; j--) { + c_cpu = be32_to_cpu(c[j]); + + m_cpu = b_cpu * c_cpu + rem_cpu; + rem_cpu = (u32)(m_cpu >> 32); + *m_cur = cpu_to_be32((u32)(m_cpu)); + m_cur--; + } + *m_cur = cpu_to_be32(rem_cpu); + ccp_sm2_fp_add((u64 *)a, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + + base--; + } +} + +/* mmul, dst, tmp must be 64B, remainder in mmul[32-63] + * high:low mod p + * = high*2^256+low mod p + * = high*(p+h)+low mod p + * = high*h+low mod p + * = high*(2^224+2^96-2^64+1)+low mod p + * iterating 8 times + */ +static void ccp_sm2_fast_mod_p(u8 *mmul, u8 *dst, u8 *tmp) +{ + u8 *mmul_high, *mmul_low; + u32 count; + int i, iter, ret; + + mmul_high = mmul; + mmul_low = mmul + CCP_SM2_OPERAND_LEN; + count = CCP_SM2_MMUL_LEN / sizeof(u64); + + iter = 8; + for (i = 0; i < iter; i++) { + /* dst = high * 2^224 */ + memset(dst, 0, CCP_SM2_MMUL_LEN); + memcpy(dst + 4, mmul_high, CCP_SM2_OPERAND_LEN); + + /* dst += high * 2^96 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 20, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 2^64 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 24, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_sub((u64 *)dst, (u64 *)tmp, count); + + /* dst += high * 1 */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_high, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* dst += low */ + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, mmul_low, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)dst, (u64 *)tmp, count); + + /* copy dst to mmul */ + memcpy(mmul, dst, CCP_SM2_MMUL_LEN); + } + + do { + memset(tmp, 0, CCP_SM2_MMUL_LEN); + memcpy(tmp + 32, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + ret = ccp_sm2_fp_cmp( + (u64 *)mmul, (u64 *)tmp, + CCP_SM2_MMUL_LEN / sizeof(u64)); + if (ret < 0) + break; + + ccp_sm2_fp_sub((u64 *)mmul, (u64 *)tmp, count); + } while (1); +} + +static int ccp_sm2_is_privkey_valid(const u8 *priv_key) +{ + u64 last, last_cpu; + bool zero; + int ret; + + /* private key is satisfied with(1, n-1) */ + zero = ccp_sm2_is_zero((const u64 *)priv_key, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64) - 1); + if (zero) { + last = *(const u64 *) + (priv_key + CCP_SM2_PRIVATE_KEY_LEN - sizeof(u64)); + last_cpu = be64_to_cpu(last); + if (last_cpu <= 1) + return -EINVAL; + } + + ret = ccp_sm2_fp_cmp((const u64 *)priv_key, + (const u64 *)sm2_ecc_n_sub_1, + CCP_SM2_PRIVATE_KEY_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_setprivkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + int ret; + + if (!key || keylen != CCP_SM2_PRIVATE_KEY_LEN) + return -EINVAL; + + ret = ccp_sm2_is_privkey_valid(key); + if (ret < 0) + return ret; + + memcpy(sm2->pri_key, key, CCP_SM2_PRIVATE_KEY_LEN); + sm2->pri_key_len = CCP_SM2_PRIVATE_KEY_LEN; + + return 0; +} + +static int ccp_sm2_post_cmd(struct ccp_sm2_req_ctx *rctx, + u32 src_size, enum ccp_sm2_mode mode, u32 rand) +{ + struct akcipher_request *req = rctx->req; + struct ccp_sm2_engine *sm2 = NULL; + int ret; + + sg_init_one(&rctx->src_sg, rctx->src, src_size); + memset(rctx->dst, 0, CCP_SM2_DST_SIZE); + sg_init_one(&rctx->dst_sg, rctx->dst, CCP_SM2_DST_SIZE); + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM2; + + sm2 = &rctx->cmd.u.sm2; + sm2->mode = mode; + sm2->rand = rand; /* whether read operand_k from trng */ + sm2->src = &rctx->src_sg; + sm2->src_len = src_size; + sm2->dst = &rctx->dst_sg; + sm2->dst_len = CCP_SM2_DST_SIZE; + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm2_pubkey_strict_valid(const u8 *px, const u8 *py) +{ + u64 buf[CCP_SM2_OPERAND_LEN / sizeof(u64)]; + int ret1, ret2; + + /* private key is 1, corresponding public key is invalid */ + ret1 = memcmp(px, sm2_ecc_gx, CCP_SM2_OPERAND_LEN); + ret2 = memcmp(py, sm2_ecc_gy, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + /* private key is n - 1, corresponding public key is invalid */ + memcpy(buf, py, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add(buf, (const u64 *)sm2_ecc_gy, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + ret2 = memcmp(buf, sm2_ecc_p, CCP_SM2_OPERAND_LEN); + if (!ret1 && !ret2) + return -EINVAL; + + return 0; +} + +static int ccp_sm2_is_pubkey_valid(struct ccp_sm2_req_ctx *rctx, bool strict) +{ + const u8 *px, *py; + u8 *tmp; + bool zero; + int ret; + + px = rctx->src + CCP_SM2_LP_SRC_SIZE; + py = px + CCP_SM2_OPERAND_LEN; + + zero = ccp_sm2_is_zero((u64 *)px, CCP_SM2_PUBLIC_KEY_LEN / sizeof(u64)); + if (zero) + return -EINVAL; + + /* x < p */ + ret = ccp_sm2_fp_cmp((u64 *)px, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + /* y < p */ + ret = ccp_sm2_fp_cmp((u64 *)py, (const u64 *)sm2_ecc_p, + CCP_SM2_OPERAND_LEN / sizeof(u64)); + if (ret >= 0) + return -EINVAL; + + if (strict) { + ret = ccp_sm2_pubkey_strict_valid(px, py); + if (ret < 0) + return ret; + } + + /* check whether y^2 = x^3 + ax + b */ + tmp = rctx->dst + CCP_SM2_MMUL_LEN; + /* y * y */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)py, (u32 *)py, tmp); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* x * x + a */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)px, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_a, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + memcpy(rctx->src, rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + /* (x * x + a) * x + b */ + ccp_sm2_fp_mmul32(rctx->dst, (u32 *)px, (u32 *)rctx->src, tmp); + memset(rctx->src, 0, CCP_SM2_MMUL_LEN); + memcpy(rctx->src + CCP_SM2_OPERAND_LEN, sm2_ecc_b, CCP_SM2_OPERAND_LEN); + ccp_sm2_fp_add((u64 *)rctx->dst, (u64 *)rctx->src, + CCP_SM2_MMUL_LEN / sizeof(u64)); + ccp_sm2_fast_mod_p(rctx->dst, rctx->src, tmp); + + ret = memcmp(rctx->src + CCP_SM2_MMUL_LEN, + rctx->dst + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + if (ret) + return -EINVAL; + + /* Because the cofactor of the ECC group is 1, + * the checking that [n]P=O is not required. + */ + + return 0; +} + +static int ccp_sm2_setpubkey(struct crypto_akcipher *tfm, + const void *key, unsigned int keylen) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_ctx *sm2 = &ctx->u.sm2; + struct ccp_sm2_req_ctx *rctx = NULL; + const unsigned char *cflag = (const unsigned char *)key; + int ret; + + if (!key || keylen < CCP_SM2_PUBLIC_KEY_LEN) + return -EINVAL; + + /* When the length of sm2 public key is 65, + * content of key should be 04 || X || Y, from GM/T0009-2012. + */ + if (keylen > CCP_SM2_PUBLIC_KEY_LEN) { + if (*cflag != 0x04) + return -EINVAL; + key = key + 1; + } + + /* check whether public key is valid */ + rctx = kmalloc(sizeof(*rctx), GFP_KERNEL); + if (!rctx) + return -ENOMEM; + + memcpy(rctx->src + CCP_SM2_LP_SRC_SIZE, key, CCP_SM2_PUBLIC_KEY_LEN); + ret = ccp_sm2_is_pubkey_valid(rctx, true); + kfree(rctx); + if (ret < 0) + return ret; + + /* public key is valid */ + memcpy(sm2->pub_key, key, CCP_SM2_PUBLIC_KEY_LEN); + sm2->pub_key_len = CCP_SM2_PUBLIC_KEY_LEN; + + return 0; +} + +static unsigned int ccp_sm2_maxsize(struct crypto_akcipher *tfm) +{ + return CCP_SM2_DST_SIZE; +} + +static int ccp_sm2_compute_c3(struct crypto_shash *shash, + struct scatterlist *sg, u32 mlen, + u8 *c3, const u8 *x2, const u8 *y2) +{ + unsigned int len, remain; + int ret; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret < 0) + return ret; + + /* update X2 */ + ret = crypto_shash_update(sdesc, x2, CCP_SM2_OPERAND_LEN); + if (ret < 0) + return ret; + + /* update M */ + remain = mlen; + while (sg) { + len = sg->length; + if (len > remain) + len = remain; + ret = crypto_shash_update(sdesc, (u8 *)sg_virt(sg), len); + if (ret < 0) + return ret; + + remain -= len; + if (!remain) + break; + + sg = sg_next(sg); + } + + /* ccp_sm2_encrypt should have checked length */ + if (unlikely(!sg)) + return -EINVAL; + + /* update Y2 */ + ret = crypto_shash_finup(sdesc, y2, CCP_SM2_OPERAND_LEN, c3); + + return ret; +} + +static bool ccp_sm2_msg_xor_t(u8 *msg, const u8 *t, u32 len) +{ + u64 *msg_cur, *msg_last, *t_cur; + u32 zero_cnt = 0; + u32 rem; + int i; + + msg_cur = (u64 *)msg; + t_cur = (u64 *)t; + msg_last = msg_cur + (len / sizeof(u64)); + while (msg_cur != msg_last) { + if (likely(*t_cur)) + *msg_cur = *msg_cur ^ *t_cur; + else + zero_cnt += sizeof(u64); + + msg_cur++; + t_cur++; + } + + msg = (u8 *)msg_cur; + t = (const u8 *)t_cur; + rem = len % sizeof(u64); + for (i = 0; i < rem; i++) { + if (likely(t[i])) + msg[i] = msg[i] ^ t[i]; + else + zero_cnt++; + } + + return zero_cnt == len; +} + +static int ccp_sm2_kdf_xor(struct crypto_shash *shash, + struct scatterlist *src, u32 src_offset, u32 src_len, + struct scatterlist *dst, u32 dst_offset, + u8 *x2_y2_ct, bool *all_zero, struct ccp_sm2_req_ctx *rctx) +{ + u32 *be_ct = NULL; + u32 ct, len, remain; + bool zero; + int ret = 0; + + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + *all_zero = true; + ct = 1; + be_ct = (u32 *)(x2_y2_ct + CCP_SM2_PUBLIC_KEY_LEN); + remain = src_len; + while (remain) { + len = SM3_DIGEST_SIZE; + if (len > remain) + len = remain; + *be_ct = cpu_to_be32(ct); + ret = crypto_shash_digest(sdesc, x2_y2_ct, + CCP_SM2_PUBLIC_KEY_LEN + sizeof(*be_ct), rctx->src); + if (ret < 0) + break; + + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, src, + src_offset, len, 0); + zero = ccp_sm2_msg_xor_t(rctx->src + SM3_DIGEST_SIZE, + rctx->src, len); + if (zero == false) + *all_zero = false; + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, dst, + dst_offset, len, 1); + + remain -= len; + src_offset += len; + dst_offset += len; + ct++; + } + + return ret; +} + +static void ccp_sm2_enc_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* C2 = M ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, 0, req->src_len, + req->dst, CCP_SM2_ENCRYPT_EXT_LEN, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (unlikely(all_zero)) { + ret = -EAGAIN; + goto e_hash; + } + + /* C3 */ + ret = ccp_sm2_compute_c3(shash, req->src, req->src_len, rctx->src, + dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* save C3 */ + scatterwalk_map_and_copy(rctx->src, req->dst, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 1); + +e_hash: + crypto_free_shash(shash); + +e_complete: + req->base.complete(req->base.data, ret); +} + +static void ccp_sm2_enc_lp(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int ret; + + /* save C1 */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_PUBLIC_KEY_LEN, 1); + /* operand_k used by kg is placed in dst->result_t */ + memcpy(src->operand_k, dst->result_t, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + rctx->phase = CCP_SM2_ENC_PH_LP; + + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + if (ret != -EBUSY && ret != -EINPROGRESS) + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_encrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + int nents; + int ret; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (!req->src_len || + req->dst_len < CCP_SM2_ENCRYPT_EXT_LEN + req->src_len) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + rctx->req = req; + rctx->phase = CCP_SM2_ENC_PH_KG; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_KG_SRC_SIZE, CCP_SM2_MODE_KG, 1); + + return ret; +} + +static void ccp_sm2_dec_compute(struct work_struct *work) +{ + struct ccp_sm2_req_ctx *rctx = + container_of(work, struct ccp_sm2_req_ctx, work); + struct akcipher_request *req = rctx->req; + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct crypto_shash *shash = NULL; + bool all_zero = true; + int ret; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + goto e_complete; + } + + /* M' = C2 ^ t */ + ret = ccp_sm2_kdf_xor(shash, req->src, CCP_SM2_ENCRYPT_EXT_LEN, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, req->dst, 0, + dst->result_r, &all_zero, rctx); + if (ret < 0) + goto e_hash; + if (all_zero) { + ret = -EBADMSG; + goto e_hash; + } + + /* u */ + ret = ccp_sm2_compute_c3(shash, req->dst, + req->src_len - CCP_SM2_ENCRYPT_EXT_LEN, + rctx->src, dst->result_r, dst->result_s); + if (ret < 0) + goto e_hash; + + /* load and compare C3 */ + scatterwalk_map_and_copy(rctx->src + SM3_DIGEST_SIZE, req->src, + CCP_SM2_PUBLIC_KEY_LEN, SM3_DIGEST_SIZE, 0); + ret = memcmp(rctx->src, rctx->src + SM3_DIGEST_SIZE, SM3_DIGEST_SIZE); + if (ret) + ret = -EBADMSG; + +e_hash: + crypto_free_shash(shash); + +e_complete: + /* clear private key, plain, and dC1 */ + memset(rctx->src, 0, CCP_SM2_OPERAND_LEN * 2); + memset(dst, 0, CCP_SM2_DST_SIZE); + req->base.complete(req->base.data, ret); +} + +static int ccp_sm2_decrypt(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_lp_src *src = (struct ccp_sm2_lp_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len <= (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE)) + return -EINVAL; + + if (req->dst_len < req->src_len - CCP_SM2_ENCRYPT_EXT_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, req->src_len); + if (nents < 0) + return -EINVAL; + + /* load C1 */ + scatterwalk_map_and_copy(rctx->src + CCP_SM2_LP_SRC_SIZE, + req->src, 0, CCP_SM2_PUBLIC_KEY_LEN, 0); + ret = ccp_sm2_is_pubkey_valid(rctx, false); + if (ret < 0) + return -EBADMSG; + + /* do kP */ + memcpy(src->operand_k, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + memcpy(src->operand_px, rctx->src + CCP_SM2_LP_SRC_SIZE, + CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, rctx->src + CCP_SM2_LP_SRC_SIZE + + CCP_SM2_OPERAND_LEN, CCP_SM2_OPERAND_LEN); + rctx->req = req; + rctx->phase = CCP_SM2_DEC_PH_LP; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_LP_SRC_SIZE, CCP_SM2_MODE_LP, 0); + + return ret; +} + +static int ccp_sm2_sign(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + int nents; + int ret; + + if (!ctx->u.sm2.pri_key_len) + return -ENOKEY; + + if (req->src_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN, 0); + memcpy(src->operand_d, ctx->u.sm2.pri_key, CCP_SM2_PRIVATE_KEY_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_SIGN_PH_SIGN; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_SIGN_SRC_SIZE, + CCP_SM2_MODE_SIGN, 1); + + return ret; +} + +static int ccp_sm2_verify(struct akcipher_request *req) +{ + struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + struct ccp_sm2_verify_src *src = (struct ccp_sm2_verify_src *)rctx->src; + int siglen; + int nents; + int ret; + struct sm2_signature_ctx sig; + unsigned char *buffer; + + if (!ctx->u.sm2.pub_key_len) + return -ENOKEY; + + if (req->src_len == CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with non-encoded signature from user space */ + nents = sg_nents_for_len(req->src, CCP_SM2_OPERAND_LEN * 3); + if (nents < 0) + return -EINVAL; + + scatterwalk_map_and_copy(src->operand_e, req->src, 0, + CCP_SM2_OPERAND_LEN * 3, 0); + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + + return ret; + } else if (req->src_len < CCP_SM2_OPERAND_LEN * 3) { + /* Compatible with usage like sm2 test of testmgr */ + siglen = req->src_len; + if (req->dst_len != CCP_SM2_OPERAND_LEN) + return -EINVAL; + } else { + /* deal with der encoding signature from user space */ + siglen = req->src_len - CCP_SM2_OPERAND_LEN; + } + + buffer = kmalloc(siglen + CCP_SM2_OPERAND_LEN, GFP_KERNEL); + if (!buffer) + return -ENOMEM; + + sg_pcopy_to_buffer(req->src, + sg_nents_for_len(req->src, siglen + CCP_SM2_OPERAND_LEN), + buffer, siglen + CCP_SM2_OPERAND_LEN, 0); + + sig.sig_r = NULL; + sig.sig_s = NULL; + ret = asn1_ber_decoder(&ccp_sm2_sign_decoder, &sig, + buffer, siglen); + + if (ret) + goto error; + + memcpy(src->operand_e, buffer + siglen, CCP_SM2_OPERAND_LEN); + + if (sig.r_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_d, sig.sig_r + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_d, sig.sig_r, CCP_SM2_OPERAND_LEN); + + if (sig.s_len > CCP_SM2_OPERAND_LEN) + memcpy(src->operand_k, sig.sig_s + 1, CCP_SM2_OPERAND_LEN); + else + memcpy(src->operand_k, sig.sig_s, CCP_SM2_OPERAND_LEN); + + memcpy(src->operand_px, ctx->u.sm2.pub_key, CCP_SM2_OPERAND_LEN); + memcpy(src->operand_py, ctx->u.sm2.pub_key + CCP_SM2_OPERAND_LEN, + CCP_SM2_OPERAND_LEN); + + rctx->req = req; + rctx->phase = CCP_SM2_VERIFY_PH_VERIFY; + ret = ccp_sm2_post_cmd(rctx, CCP_SM2_VERIFY_SRC_SIZE, + CCP_SM2_MODE_VERIFY, 0); + +error: + kfree(buffer); + return ret; +} + +static int ccp_sm2_verify_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (dst->u.status) + return -EBADMSG; + + return 0; +} + +static int ccp_sm2_sign_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + struct ccp_sm2_sign_src *src = (struct ccp_sm2_sign_src *)rctx->src; + struct akcipher_request *req = rctx->req; + + if (unlikely(dst->u.status)) + return -EAGAIN; + + /* save signature */ + scatterwalk_map_and_copy(dst->result_r, req->dst, 0, + CCP_SM2_OPERAND_LEN * 2, 1); + /* clear private key */ + memset(src->operand_d, 0, CCP_SM2_PRIVATE_KEY_LEN); + + return 0; +} + +static int ccp_sm2_enc_kg_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + /* random operand_k is not satisfied with[1, n-1], try again */ + if (unlikely(dst->u.status)) + return -EAGAIN; + + INIT_WORK(&rctx->work, ccp_sm2_enc_lp); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_enc_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_enc_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_dec_lp_handle(struct ccp_sm2_req_ctx *rctx) +{ + struct ccp_sm2_dst *dst = (struct ccp_sm2_dst *)rctx->dst; + + if (unlikely(dst->u.status)) + return -EIO; + + INIT_WORK(&rctx->work, ccp_sm2_dec_compute); + schedule_work(&rctx->work); + + return -EINPROGRESS; +} + +static int ccp_sm2_complete(struct crypto_async_request *async_req, int ret) +{ + struct akcipher_request *req = + container_of(async_req, struct akcipher_request, base); + struct ccp_sm2_req_ctx *rctx = akcipher_request_ctx(req); + + if (ret) + return ret; + + switch (rctx->phase) { + case CCP_SM2_SIGN_PH_SIGN: + ret = ccp_sm2_sign_handle(rctx); + break; + case CCP_SM2_VERIFY_PH_VERIFY: + ret = ccp_sm2_verify_handle(rctx); + break; + case CCP_SM2_ENC_PH_KG: + ret = ccp_sm2_enc_kg_handle(rctx); + break; + case CCP_SM2_ENC_PH_LP: + ret = ccp_sm2_enc_lp_handle(rctx); + break; + case CCP_SM2_DEC_PH_LP: + ret = ccp_sm2_dec_lp_handle(rctx); + break; + } + + return ret; +} + +static int ccp_sm2_init_tfm(struct crypto_akcipher *tfm) +{ + struct ccp_ctx *ctx = akcipher_tfm_ctx(tfm); + + akcipher_set_reqsize(tfm, sizeof(struct ccp_sm2_req_ctx)); + ctx->complete = ccp_sm2_complete; + + return 0; +} + +static void ccp_sm2_exit_tfm(struct crypto_akcipher *tfm) +{ +} + +static struct akcipher_alg ccp_sm2_defaults = { + .sign = ccp_sm2_sign, + .verify = ccp_sm2_verify, + .encrypt = ccp_sm2_encrypt, + .decrypt = ccp_sm2_decrypt, + .set_pub_key = ccp_sm2_setpubkey, + .set_priv_key = ccp_sm2_setprivkey, + .max_size = ccp_sm2_maxsize, + .init = ccp_sm2_init_tfm, + .exit = ccp_sm2_exit_tfm, + .base = { + .cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY, + .cra_ctxsize = sizeof(struct ccp_ctx), + .cra_priority = CCP_CRA_PRIORITY, + .cra_module = THIS_MODULE, + }, +}; + +struct ccp_sm2_def { + unsigned int version; + const char *name; + const char *driver_name; + struct akcipher_alg *alg_defaults; +}; + +static struct ccp_sm2_def sm2_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm2", + .driver_name = "sm2-ccp", + .alg_defaults = &ccp_sm2_defaults, + } +}; + +static int ccp_register_sm2_hygon_alg(struct list_head *head, + const struct ccp_sm2_def *def) +{ + struct ccp_crypto_akcipher_alg *ccp_alg; + struct akcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + + ret = crypto_register_akcipher(alg); + if (ret) { + pr_err("%s akcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm2_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm2_algs); i++) { + if (sm2_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm2_hygon_alg(head, &sm2_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..46ddbc1f14a8ec48df3b4e447d99a9c4788272db --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm3-hygon.c @@ -0,0 +1,488 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM3 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +static int ccp_sm3_complete(struct crypto_async_request *async_req, int ret) +{ + struct ahash_request *req = ahash_request_cast(async_req); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if (ret) + goto e_free; + + rctx->msg_bits += (rctx->hash_cnt << 3); + if (rctx->hash_rem) { + /* save remaining data to buffer */ + unsigned int offset = rctx->nbytes - rctx->hash_rem; + + scatterwalk_map_and_copy(rctx->buf, rctx->src, + offset, rctx->hash_rem, 0); + rctx->buf_count = rctx->hash_rem; + } else { + rctx->buf_count = 0; + } + + if (rctx->final) { + if (req->result) + memcpy(req->result, rctx->ctx, SM3_DIGEST_SIZE); + + memset(rctx->ctx, 0, SM3_DIGEST_SIZE); + } + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_do_sm3_update(struct ahash_request *req, unsigned int nbytes, + unsigned int final) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct scatterlist *sg = req->src; + struct ccp_sm3_engine *sm3 = NULL; + unsigned int sg_count; + gfp_t gfp; + u64 len, msg_bits = 0; + int nents; + int ret; + + /* must check length of src, + * otherwise will result in NullPointer exception in ccp_sm3_complete + */ + if (nbytes) { + nents = sg_nents_for_len(req->src, nbytes); + if (nents < 0) + return -EINVAL; + } + + len = (u64)rctx->buf_count + (u64)nbytes; + if (len <= SM3_BLOCK_SIZE) { + scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, + 0, nbytes, 0); + rctx->buf_count += nbytes; + if (!final) + return 0; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = &rctx->buf_sg; + } else { + gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? + GFP_KERNEL : GFP_ATOMIC; + + if (rctx->buf_count) { + /* build the scatterlist table: (buffer and input data) */ + sg_count = sg_nents(req->src) + 1; + ret = sg_alloc_table(&rctx->data_sg, sg_count, gfp); + if (ret) + return ret; + + sg_init_one(&rctx->buf_sg, rctx->buf, rctx->buf_count); + sg = ccp_crypto_sg_table_add( + &rctx->data_sg, &rctx->buf_sg); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg = ccp_crypto_sg_table_add(&rctx->data_sg, + req->src); + if (!sg) { + ret = -EINVAL; + goto e_free; + } + sg_mark_end(sg); + + sg = rctx->data_sg.sgl; + } else { + sg = req->src; + } + } + + rctx->final = final; + if (final) { + rctx->hash_rem = 0; + rctx->hash_cnt = len; + msg_bits = rctx->msg_bits + (len << 3); + } else { + rctx->hash_rem = len & (SM3_BLOCK_SIZE - 1); + rctx->hash_cnt = len - rctx->hash_rem; + rctx->src = req->src; + rctx->nbytes = nbytes; + } + + memset(&rctx->cmd, 0, sizeof(rctx->cmd)); + INIT_LIST_HEAD(&rctx->cmd.entry); + rctx->cmd.engine = CCP_ENGINE_SM3; + + sm3 = &rctx->cmd.u.sm3; + sm3->type = CCP_SM3_TYPE_256; + sm3->ctx = &rctx->ctx_sg; + sm3->ctx_len = SM3_DIGEST_SIZE; + sm3->src = sg; + sm3->src_len = rctx->hash_cnt; + sm3->first = rctx->msg_bits ? 0 : 1; + sm3->final = final; + sm3->msg_bits = msg_bits; + if (final && ctx->u.sm3.key_len) { + sm3->opad = &ctx->u.sm3.opad_sg; + sm3->opad_len = SM3_BLOCK_SIZE; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; + +e_free: + sg_free_table(&rctx->data_sg); + + return ret; +} + +static int ccp_sm3_init(struct ahash_request *req) +{ + struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); + struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + + if ((crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) && + (!ctx->u.sm3.key_len)) + return -ENOKEY; + + memset(rctx, 0, sizeof(*rctx)); + if (ctx->u.sm3.key_len) { + /* buffer the HMAC key for first update */ + memcpy(rctx->buf, ctx->u.sm3.ipad, SM3_BLOCK_SIZE); + rctx->buf_count = SM3_BLOCK_SIZE; + } + + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + + return 0; +} + +static int ccp_sm3_update(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 0); +} + +static int ccp_sm3_final(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, 0, 1); +} + +static int ccp_sm3_finup(struct ahash_request *req) +{ + return ccp_do_sm3_update(req, req->nbytes, 1); +} + +static int ccp_sm3_digest(struct ahash_request *req) +{ + int ret; + + ret = ccp_sm3_init(req); + if (unlikely(ret)) + return ret; + + return ccp_sm3_finup(req); +} + +static int ccp_sm3_export(struct ahash_request *req, void *out) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!out) + return -EINVAL; + + /* don't let anything leak to 'out' */ + memset(&state, 0, sizeof(state)); + + state.msg_bits = rctx->msg_bits; + memcpy(state.ctx, rctx->ctx, SM3_DIGEST_SIZE); + state.buf_count = rctx->buf_count; + memcpy(state.buf, rctx->buf, SM3_BLOCK_SIZE); + + /* 'out' may not be aligned so memcpy from local variable */ + memcpy(out, &state, sizeof(state)); + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_import(struct ahash_request *req, const void *in) +{ + struct ccp_sm3_req_ctx *rctx = ahash_request_ctx(req); + struct ccp_sm3_exp_ctx state; + + if (!in) + return -EINVAL; + + /* 'in' may not be aligned so memcpy to local variable */ + memcpy(&state, in, sizeof(state)); + + memset(rctx, 0, sizeof(*rctx)); + rctx->msg_bits = state.msg_bits; + memcpy(rctx->ctx, state.ctx, SM3_DIGEST_SIZE); + sg_init_one(&rctx->ctx_sg, rctx->ctx, SM3_DIGEST_SIZE); + rctx->buf_count = state.buf_count; + memcpy(rctx->buf, state.buf, SM3_BLOCK_SIZE); + + memset(&state, 0, sizeof(state)); + + return 0; +} + +static int ccp_sm3_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); + struct crypto_shash *shash = ctx->u.sm3.hmac_tfm; + + SHASH_DESC_ON_STACK(sdesc, shash); + + int i, ret; + + /* set to zero until complete */ + ctx->u.sm3.key_len = 0; + if (!key) + return -EINVAL; + + if (!key_len) { + crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + /* clear key area to provide zero padding for keys smaller + * than the block size + */ + memset(ctx->u.sm3.key, 0, SM3_BLOCK_SIZE); + + if (key_len > SM3_BLOCK_SIZE) { + /* must hash the input key */ + sdesc->tfm = shash; + ret = crypto_shash_digest(sdesc, key, key_len, + ctx->u.sm3.key); + if (ret) { + crypto_ahash_set_flags( + tfm, CRYPTO_TFM_NEED_KEY); + return -EINVAL; + } + + key_len = SM3_DIGEST_SIZE; + } else { + memcpy(ctx->u.sm3.key, key, key_len); + } + + for (i = 0; i < SM3_BLOCK_SIZE; i++) { + ctx->u.sm3.ipad[i] = ctx->u.sm3.key[i] ^ HMAC_IPAD_VALUE; + ctx->u.sm3.opad[i] = ctx->u.sm3.key[i] ^ HMAC_OPAD_VALUE; + } + + sg_init_one(&ctx->u.sm3.opad_sg, ctx->u.sm3.opad, SM3_BLOCK_SIZE); + + ctx->u.sm3.key_len = key_len; + + return 0; +} + +static int ccp_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + + ctx->complete = ccp_sm3_complete; + crypto_ahash_set_reqsize(ahash, sizeof(struct ccp_sm3_req_ctx)); + + return 0; +} + +static void ccp_sm3_cra_exit(struct crypto_tfm *tfm) +{ +} + +static int ccp_hmac_sm3_cra_init(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + struct ccp_crypto_ahash_alg *alg = ccp_crypto_ahash_alg(tfm); + struct crypto_shash *hmac_tfm; + + hmac_tfm = crypto_alloc_shash(alg->child_alg, 0, 0); + if (IS_ERR(hmac_tfm)) { + pr_warn("could not load driver %s need for HMAC support\n", + alg->child_alg); + return PTR_ERR(hmac_tfm); + } + + ctx->u.sm3.hmac_tfm = hmac_tfm; + + return ccp_sm3_cra_init(tfm); +} + +static void ccp_hmac_sm3_cra_exit(struct crypto_tfm *tfm) +{ + struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->u.sm3.hmac_tfm) + crypto_free_shash(ctx->u.sm3.hmac_tfm); + + ccp_sm3_cra_exit(tfm); +} + +struct ccp_sm3_def { + unsigned int version; + const char *name; + const char *drv_name; + enum ccp_sm3_type type; + u32 digest_size; + u32 block_size; +}; + +static struct ccp_sm3_def sm3_algs[] = { + { + .version = CCP_VERSION(5, 0), + .name = "sm3", + .drv_name = "sm3-ccp", + .type = CCP_SM3_TYPE_256, + .digest_size = SM3_DIGEST_SIZE, + .block_size = SM3_BLOCK_SIZE, + }, +}; + +static int ccp_register_hmac_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def, + const struct ccp_crypto_ahash_alg *base_alg) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + /* copy the base algorithm and only change what's necessary */ + *ccp_alg = *base_alg; + INIT_LIST_HEAD(&ccp_alg->entry); + + strscpy(ccp_alg->child_alg, def->name, CRYPTO_MAX_ALG_NAME); + + alg = &ccp_alg->alg; + alg->setkey = ccp_sm3_setkey; + + base = &alg->halg.base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s", + def->drv_name); + base->cra_flags |= CRYPTO_ALG_NEED_FALLBACK; + base->cra_init = ccp_hmac_sm3_cra_init; + base->cra_exit = ccp_hmac_sm3_cra_exit; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return ret; +} + +static int ccp_register_sm3_hygon_alg(struct list_head *head, + const struct ccp_sm3_def *def) +{ + struct ccp_crypto_ahash_alg *ccp_alg; + struct ahash_alg *alg; + struct hash_alg_common *halg; + struct crypto_alg *base; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->type = def->type; + + alg = &ccp_alg->alg; + alg->init = ccp_sm3_init; + alg->update = ccp_sm3_update; + alg->final = ccp_sm3_final; + alg->finup = ccp_sm3_finup; + alg->digest = ccp_sm3_digest; + alg->export = ccp_sm3_export; + alg->import = ccp_sm3_import; + + halg = &alg->halg; + halg->digestsize = def->digest_size; + halg->statesize = sizeof(struct ccp_sm3_exp_ctx); + + base = &halg->base; + snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->drv_name); + base->cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK; + base->cra_blocksize = def->block_size; + base->cra_ctxsize = sizeof(struct ccp_ctx); + base->cra_priority = CCP_CRA_PRIORITY; + base->cra_init = ccp_sm3_cra_init; + base->cra_exit = ccp_sm3_cra_exit; + base->cra_module = THIS_MODULE; + + ret = crypto_register_ahash(alg); + if (ret) { + pr_err("%s ahash algorithm registration error (%d)\n", + base->cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + ret = ccp_register_hmac_sm3_hygon_alg(head, def, ccp_alg); + + return ret; +} + +int ccp_register_sm3_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm3_algs); i++) { + if (sm3_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm3_hygon_alg(head, &sm3_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c new file mode 100644 index 0000000000000000000000000000000000000000..0d1c750ff118d99546b3d345465b0354dc3fadfa --- /dev/null +++ b/drivers/crypto/ccp/ccp-crypto-sm4-hygon.c @@ -0,0 +1,324 @@ +/* + * Hygon Cryptographic Coprocessor (CCP) SM4 crypto API support + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "ccp-crypto.h" + +enum ccp_sm4_alg_mode { + CCP_SM4_ALG_MODE_ECB = CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC = CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE_OFB = CCP_SM4_MODE_OFB, + CCP_SM4_ALG_MODE_CFB = CCP_SM4_MODE_CFB, + CCP_SM4_ALG_MODE_CTR = CCP_SM4_MODE_CTR, + CCP_SM4_ALG_MODE_ECB_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_ECB, + CCP_SM4_ALG_MODE_CBC_HS = CCP_SM4_MODE_HS_SEL | CCP_SM4_MODE_CBC, + CCP_SM4_ALG_MODE__LAST, +}; + +static int ccp_sm4_complete(struct crypto_async_request *async_req, int ret) +{ + struct skcipher_request *req = skcipher_request_cast(async_req); + struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + + if (ret) + return ret; + + if ((ctx->u.sm4.mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + memcpy(req->iv, rctx->iv, SM4_BLOCK_SIZE); + memset(rctx->iv, 0, SM4_BLOCK_SIZE); + } + + return 0; +} + +static int ccp_sm4_setkey(struct crypto_skcipher *tfm, const u8 *key, + unsigned int key_len) +{ + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + /* key_len is checked by crypto_ablkcipher_type, + * but key isn't checked + */ + if (!key) + return -EINVAL; + + memcpy(ctx->u.sm4.key, key, SM4_KEY_SIZE); + sg_init_one(&ctx->u.sm4.key_sg, ctx->u.sm4.key, SM4_KEY_SIZE); + + ctx->u.sm4.key_len = SM4_KEY_SIZE; + + return 0; +} + +static int ccp_sm4_crypt(struct skcipher_request *req, bool encrypt) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + struct ccp_sm4_req_ctx *rctx = skcipher_request_ctx(req); + struct scatterlist *iv_sg = NULL; + struct ccp_cmd *cmd = NULL; + enum ccp_sm4_alg_mode mode; + enum ccp_sm4_action action; + int ret; + + if (!ctx->u.sm4.key_len) + return -ENOKEY; + + mode = ctx->u.sm4.mode; + if ((mode != CCP_SM4_ALG_MODE_CTR) && + (mode != CCP_SM4_ALG_MODE_OFB) && + (mode != CCP_SM4_ALG_MODE_CFB) && + (req->cryptlen & (SM4_BLOCK_SIZE - 1))) + return -EINVAL; + + if ((mode & CCP_SM4_MODE_MASK) != CCP_SM4_ALG_MODE_ECB) { + if (!req->iv) + return -EINVAL; + + memcpy(rctx->iv, req->iv, SM4_BLOCK_SIZE); + iv_sg = &rctx->iv_sg; + sg_init_one(iv_sg, rctx->iv, SM4_BLOCK_SIZE); + } + + cmd = &rctx->cmd; + memset(cmd, 0, sizeof(*cmd)); + INIT_LIST_HEAD(&cmd->entry); + action = encrypt ? CCP_SM4_ACTION_ENCRYPT : CCP_SM4_ACTION_DECRYPT; + if (mode == CCP_SM4_ALG_MODE_CTR) { + cmd->engine = CCP_ENGINE_SM4_CTR; + cmd->u.sm4_ctr.action = action; + cmd->u.sm4_ctr.size = 63; + cmd->u.sm4_ctr.step = 1; + + cmd->u.sm4_ctr.key = &ctx->u.sm4.key_sg; + cmd->u.sm4_ctr.key_len = SM4_KEY_SIZE; + cmd->u.sm4_ctr.iv = iv_sg; + cmd->u.sm4_ctr.iv_len = SM4_BLOCK_SIZE; + + cmd->u.sm4_ctr.src = req->src; + cmd->u.sm4_ctr.dst = req->dst; + cmd->u.sm4_ctr.src_len = req->cryptlen; + + } else { + cmd->engine = CCP_ENGINE_SM4; + cmd->u.sm4.mode = mode & CCP_SM4_MODE_MASK; + cmd->u.sm4.action = action; + if (mode & CCP_SM4_MODE_HS_SEL) + cmd->u.sm4.select = 1; + + cmd->u.sm4.key = &ctx->u.sm4.key_sg; + cmd->u.sm4.key_len = SM4_KEY_SIZE; + cmd->u.sm4.iv = iv_sg; + cmd->u.sm4.iv_len = iv_sg ? SM4_BLOCK_SIZE : 0; + + cmd->u.sm4.src = req->src; + cmd->u.sm4.dst = req->dst; + cmd->u.sm4.src_len = req->cryptlen; + } + + ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); + + return ret; +} + +static int ccp_sm4_encrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, true); +} + +static int ccp_sm4_decrypt(struct skcipher_request *req) +{ + return ccp_sm4_crypt(req, false); +} + +static int ccp_sm4_init_tfm(struct crypto_skcipher *tfm) +{ + struct ccp_crypto_skcipher_alg *alg = ccp_crypto_skcipher_alg(tfm); + struct ccp_ctx *ctx = crypto_skcipher_ctx(tfm); + + ctx->complete = ccp_sm4_complete; + ctx->u.sm4.mode = alg->mode; + + crypto_skcipher_set_reqsize(tfm, sizeof(struct ccp_sm4_req_ctx)); + + return 0; +} + +static const struct skcipher_alg ccp_sm4_defaults = { + .setkey = ccp_sm4_setkey, + .encrypt = ccp_sm4_encrypt, + .decrypt = ccp_sm4_decrypt, + .min_keysize = SM4_KEY_SIZE, + .max_keysize = SM4_KEY_SIZE, + .init = ccp_sm4_init_tfm, + + .base.cra_flags = CRYPTO_ALG_ASYNC | + CRYPTO_ALG_KERN_DRIVER_ONLY | + CRYPTO_ALG_NEED_FALLBACK, + .base.cra_blocksize = SM4_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct ccp_ctx), + .base.cra_priority = CCP_CRA_PRIORITY, + .base.cra_module = THIS_MODULE, +}; + +struct ccp_sm4_def { + enum ccp_sm4_alg_mode mode; + unsigned int version; + const char *name; + const char *driver_name; + unsigned int blocksize; + unsigned int ivsize; + const struct skcipher_alg *alg_defaults; +}; + +static struct ccp_sm4_def sm4_algs[] = { + { + .mode = CCP_SM4_ALG_MODE_ECB, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_ECB_HS, + .version = CCP_VERSION(5, 0), + .name = "ecb(sm4)", + .driver_name = "ecb-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = 0, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CBC_HS, + .version = CCP_VERSION(5, 0), + .name = "cbc(sm4)", + .driver_name = "cbc-sm4-hs-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_OFB, + .version = CCP_VERSION(5, 0), + .name = "ofb(sm4)", + .driver_name = "ofb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CFB, + .version = CCP_VERSION(5, 0), + .name = "cfb(sm4)", + .driver_name = "cfb-sm4-ccp", + .blocksize = SM4_BLOCK_SIZE, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, + { + .mode = CCP_SM4_ALG_MODE_CTR, + .version = CCP_VERSION(5, 0), + .name = "ctr(sm4)", + .driver_name = "ctr-sm4-ccp", + .blocksize = 1, + .ivsize = SM4_BLOCK_SIZE, + .alg_defaults = &ccp_sm4_defaults, + }, +}; + +static int ccp_register_sm4_hygon_alg(struct list_head *head, + const struct ccp_sm4_def *def) +{ + struct ccp_crypto_skcipher_alg *ccp_alg; + struct skcipher_alg *alg; + int ret; + + ccp_alg = kzalloc(sizeof(*ccp_alg), GFP_KERNEL); + if (!ccp_alg) + return -ENOMEM; + + INIT_LIST_HEAD(&ccp_alg->entry); + + ccp_alg->mode = def->mode; + + /* copy the defaults and override as necessary */ + alg = &ccp_alg->alg; + *alg = *def->alg_defaults; + snprintf(alg->base.cra_name, CRYPTO_MAX_ALG_NAME, "%s", def->name); + snprintf(alg->base.cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s", + def->driver_name); + alg->base.cra_blocksize = def->blocksize; + alg->ivsize = def->ivsize; + + ret = crypto_register_skcipher(alg); + if (ret) { + pr_err("%s skcipher algorithm registration error (%d)\n", + alg->base.cra_name, ret); + kfree(ccp_alg); + return ret; + } + + list_add(&ccp_alg->entry, head); + + return 0; +} + +int ccp_register_sm4_hygon_algs(struct list_head *head) +{ + int i, ret; + unsigned int ccpversion = ccp_version(); + + for (i = 0; i < ARRAY_SIZE(sm4_algs); i++) { + if (sm4_algs[i].version > ccpversion) + continue; + ret = ccp_register_sm4_hygon_alg(head, &sm4_algs[i]); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/crypto/ccp/ccp-crypto.h b/drivers/crypto/ccp/ccp-crypto.h index e42450d071680e2ef0432ce704bf4a48f1c35962..58b2950f91001a6b12cfa5c5bae8add6663e7c67 100644 --- a/drivers/crypto/ccp/ccp-crypto.h +++ b/drivers/crypto/ccp/ccp-crypto.h @@ -258,6 +258,105 @@ struct ccp_rsa_req_ctx { #define CCP_RSA_MAXMOD (4 * 1024 / 8) #define CCP5_RSA_MAXMOD (16 * 1024 / 8) +/***** SM2 related defines *****/ +#define CCP_SM2_OPERAND_LEN 32 +#define CCP_SM2_PRIVATE_KEY_LEN CCP_SM2_OPERAND_LEN +#define CCP_SM2_PUBLIC_KEY_LEN (CCP_SM2_OPERAND_LEN * 2) +#define CCP_SM2_ENCRYPT_EXT_LEN (CCP_SM2_PUBLIC_KEY_LEN + SM3_DIGEST_SIZE) +#define CCP_SM2_MMUL_LEN (CCP_SM2_OPERAND_LEN * 2) + +struct ccp_sm2_ctx { + u32 pri_key_len; + u32 pub_key_len; + u8 pri_key[CCP_SM2_PRIVATE_KEY_LEN]; + u8 pub_key[CCP_SM2_PUBLIC_KEY_LEN]; +}; + +enum ccp_sm2_op_phase { + CCP_SM2_SIGN_PH_SIGN, + CCP_SM2_VERIFY_PH_VERIFY, + CCP_SM2_ENC_PH_KG, + CCP_SM2_ENC_PH_LP, + CCP_SM2_DEC_PH_LP +}; + +struct ccp_sm2_req_ctx { + enum ccp_sm2_op_phase phase; + struct akcipher_request *req; + + u8 src[CCP_SM2_VERIFY_SRC_SIZE]; + u8 dst[CCP_SM2_DST_SIZE]; + + struct scatterlist src_sg; + struct scatterlist dst_sg; + + struct work_struct work; + + struct ccp_cmd cmd; +}; + +/***** SM3 related defines *****/ +struct ccp_sm3_ctx { + u32 key_len; + u8 key[SM3_BLOCK_SIZE]; + + u8 ipad[SM3_BLOCK_SIZE]; + + u8 opad[SM3_BLOCK_SIZE]; + struct scatterlist opad_sg; + + struct crypto_shash *hmac_tfm; +}; + +struct ccp_sm3_req_ctx { + u64 msg_bits; + + unsigned int first; + unsigned int final; + + struct scatterlist *src; + u32 nbytes; + + u64 hash_cnt; + u32 hash_rem; + + struct sg_table data_sg; + struct scatterlist *src_sg; + + struct scatterlist ctx_sg; + u8 ctx[SM3_DIGEST_SIZE]; + + struct scatterlist buf_sg; + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + +struct ccp_sm3_exp_ctx { + u64 msg_bits; + + u8 ctx[SM3_DIGEST_SIZE]; + + u32 buf_count; + u8 buf[SM3_BLOCK_SIZE]; +}; + +/***** SM4 related defines *****/ +struct ccp_sm4_ctx { + struct scatterlist key_sg; + u8 key[SM4_KEY_SIZE]; + u32 key_len; + u32 mode; +}; + +struct ccp_sm4_req_ctx { + struct scatterlist iv_sg; + u8 iv[SM4_BLOCK_SIZE]; + + struct ccp_cmd cmd; +}; + /***** Common Context Structure *****/ struct ccp_ctx { int (*complete)(struct crypto_async_request *req, int ret); @@ -267,6 +366,9 @@ struct ccp_ctx { struct ccp_rsa_ctx rsa; struct ccp_sha_ctx sha; struct ccp_des3_ctx des3; + struct ccp_sm2_ctx sm2; + struct ccp_sm3_ctx sm3; + struct ccp_sm4_ctx sm4; } u; }; @@ -282,5 +384,8 @@ int ccp_register_aes_aeads(struct list_head *head); int ccp_register_sha_algs(struct list_head *head); int ccp_register_des3_algs(struct list_head *head); int ccp_register_rsa_algs(struct list_head *head); +int ccp_register_sm2_hygon_algs(struct list_head *head); +int ccp_register_sm3_hygon_algs(struct list_head *head); +int ccp_register_sm4_hygon_algs(struct list_head *head); #endif diff --git a/drivers/crypto/ccp/ccp-dev-v5.c b/drivers/crypto/ccp/ccp-dev-v5.c index 7b73332d6aa1abcbe3794d75a6b66522d4031b88..e5c129c3e0497f96ccbb2d454b00aca675e99d7c 100644 --- a/drivers/crypto/ccp/ccp-dev-v5.c +++ b/drivers/crypto/ccp/ccp-dev-v5.c @@ -131,6 +131,28 @@ union ccp_function { u16 type:2; u16 mode:3; } ecc; + struct { + u16 rand:1; + u16 rsvd:11; + u16 mode:3; + } sm2; + struct { + u16 rsvd:10; + u16 type:4; + u16 rsvd2:1; + } sm3; + struct { + u16 rsvd:7; + u16 encrypt:1; + u16 mode:4; + u16 select:1; + u16 rsvd2:2; + } sm4; + struct { + u16 size:7; + u16 encrypt:1; + u16 step:7; + } sm4_ctr; u16 raw; }; @@ -151,6 +173,15 @@ union ccp_function { #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) #define CCP_ECC_MODE(p) ((p)->ecc.mode) #define CCP_ECC_AFFINE(p) ((p)->ecc.one) +#define CCP_SM2_RAND(p) ((p)->sm2.rand) +#define CCP_SM2_MODE(p) ((p)->sm2.mode) +#define CCP_SM3_TYPE(p) ((p)->sm3.type) +#define CCP_SM4_ENCRYPT(p) ((p)->sm4.encrypt) +#define CCP_SM4_MODE(p) ((p)->sm4.mode) +#define CCP_SM4_SELECT(p) ((p)->sm4.select) +#define CCP_SM4_CTR_ENCRYPT(p) ((p)->sm4_ctr.encrypt) +#define CCP_SM4_CTR_STEP(p) ((p)->sm4_ctr.step) +#define CCP_SM4_CTR_SIZE(p) ((p)->sm4_ctr.size) /* Word 0 */ #define CCP5_CMD_DW0(p) ((p)->dw0) @@ -186,6 +217,8 @@ union ccp_function { #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) +#define CCP5_CMD_SM3_LO(p) ((p)->dw4.sm3_len_lo) +#define CCP5_CMD_SM3_HI(p) ((p)->dw5.sm3_len_hi) /* Word 6/7 */ #define CCP5_CMD_DW6(p) ((p)->key_lo) @@ -194,6 +227,17 @@ union ccp_function { #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) +static inline unsigned int command_per_queue(void) +{ +#ifdef CONFIG_HYGON_GM + return boot_cpu_data.x86_vendor == X86_VENDOR_HYGON ? + HYGON_COMMANDS_PER_QUEUE : + COMMANDS_PER_QUEUE; +#else + return COMMANDS_PER_QUEUE; +#endif +} + static inline u32 low_address(unsigned long addr) { return (u64)addr & 0x0ffffffff; @@ -207,15 +251,86 @@ static inline u32 high_address(unsigned long addr) static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) { unsigned int head_idx, n; - u32 head_lo, queue_start; + u32 head_lo, queue_start, command_per_q; + command_per_q = command_per_queue(); queue_start = low_address(cmd_q->qdma_tail); head_lo = ioread32(cmd_q->reg_head_lo); head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); - n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; + n = head_idx + command_per_q - cmd_q->qidx - 1; - return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ + return n % command_per_q; /* Always one unused spot */ +} + +static int ccp5_do_multi_cmds(struct ccp5_desc *desc, + struct ccp_cmd_queue *cmd_q) +{ + u32 *mP; + __le32 *dP; + int i; + u32 command_per_q; + + command_per_q = command_per_queue(); + + cmd_q->total_ops++; + + if (CCP5_CMD_SOC(desc)) { + CCP5_CMD_IOC(desc) = 1; + CCP5_CMD_SOC(desc) = 0; + } + + mutex_lock(&cmd_q->q_mutex); + + mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; + dP = (__le32 *) desc; + for (i = 0; i < 8; i++) + mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ + + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; + + mutex_unlock(&cmd_q->q_mutex); + + return 0; +} + +static int ccp5_do_run_cmd(struct ccp_op *op) +{ + struct ccp_cmd_queue *cmd_q = op->cmd_q; + u32 tail; + int ret = 0; + + mutex_lock(&cmd_q->q_mutex); + + /* The data used by this command must be flushed to memory */ + wmb(); + + /* Write the new tail address back to the queue register */ + tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); + iowrite32(tail, cmd_q->reg_tail_lo); + + /* Turn the queue back on using our cached control register */ + iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); + mutex_unlock(&cmd_q->q_mutex); + + if (op->ioc) { + /* Wait for the job to complete */ + ret = wait_event_interruptible(cmd_q->int_queue, + cmd_q->int_rcvd); + if (ret || cmd_q->cmd_error) { + /* Log the error and flush the queue by + * moving the head pointer + */ + if (cmd_q->cmd_error) + ccp_log_error(cmd_q->ccp, cmd_q->cmd_error); + iowrite32(tail, cmd_q->reg_head_lo); + if (!ret) + ret = -EIO; + } + cmd_q->int_rcvd = 0; + } + + return ret; } static int ccp5_do_cmd(struct ccp5_desc *desc, @@ -223,10 +338,11 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, { __le32 *mP; u32 *dP; - u32 tail; + u32 tail, command_per_q; int i; int ret = 0; + command_per_q = command_per_queue(); cmd_q->total_ops++; if (CCP5_CMD_SOC(desc)) { @@ -240,7 +356,7 @@ static int ccp5_do_cmd(struct ccp5_desc *desc, for (i = 0; i < 8; i++) mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ - cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; + cmd_q->qidx = (cmd_q->qidx + 1) % command_per_q; /* The data used by this command must be flushed to memory */ wmb(); @@ -584,6 +700,163 @@ static int ccp5_perform_ecc(struct ccp_op *op) return ccp5_do_cmd(&desc, op->cmd_q); } +static int ccp5_perform_sm2(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + struct ccp_dma_info *saddr = &op->src.u.dma; + struct ccp_dma_info *daddr = &op->dst.u.dma; + + op->cmd_q->total_sm2_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM2; + + CCP5_CMD_SOC(&desc) = 0; + CCP5_CMD_IOC(&desc) = 1; + CCP5_CMD_INIT(&desc) = 1; + CCP5_CMD_EOM(&desc) = 1; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM2_RAND(&function) = op->u.sm2.rand; + CCP_SM2_MODE(&function) = op->u.sm2.mode; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + /* Length of source data must match with mode */ + CCP5_CMD_LEN(&desc) = saddr->length; + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(saddr); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(saddr); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(daddr); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(daddr); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + return ccp5_do_cmd(&desc, op->cmd_q); +} + +static int ccp5_perform_sm3(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + + op->cmd_q->total_sm3_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM3; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM3_TYPE(&function) = op->u.sm3.type; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + if (op->eom) { + CCP5_CMD_SM3_LO(&desc) = lower_32_bits(op->u.sm3.msg_bits); + CCP5_CMD_SM3_HI(&desc) = upper_32_bits(op->u.sm3.msg_bits); + } + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_ENCRYPT(&function) = op->u.sm4.action; + CCP_SM4_MODE(&function) = op->u.sm4.mode; + CCP_SM4_SELECT(&function) = op->u.sm4.select; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + +static int ccp5_perform_sm4_ctr(struct ccp_op *op) +{ + struct ccp5_desc desc; + union ccp_function function; + u32 key_addr = op->sb_ctx * LSB_ITEM_SIZE + SM4_BLOCK_SIZE; + + op->cmd_q->total_sm4_ctr_ops++; + + memset(&desc, 0, Q_DESC_SIZE); + + CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SM4_CTR; + + CCP5_CMD_SOC(&desc) = op->soc; + CCP5_CMD_IOC(&desc) = op->ioc; + CCP5_CMD_INIT(&desc) = op->init; + CCP5_CMD_EOM(&desc) = op->eom; + CCP5_CMD_PROT(&desc) = 0; + + function.raw = 0; + CCP_SM4_CTR_SIZE(&function) = op->u.sm4_ctr.size; + CCP_SM4_CTR_ENCRYPT(&function) = op->u.sm4_ctr.action; + CCP_SM4_CTR_STEP(&function) = op->u.sm4_ctr.step; + CCP5_CMD_FUNCTION(&desc) = function.raw; + + CCP5_CMD_LEN(&desc) = op->src.u.dma.length; + + CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); + CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); + CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; + + CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); + CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); + CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; + + CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); + CCP5_CMD_KEY_HI(&desc) = 0; + CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; + + return ccp5_do_multi_cmds(&desc, op->cmd_q); +} + static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) { int q_mask = 1 << cmd_q->id; @@ -593,6 +866,7 @@ static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) /* Build a bit mask to know which LSBs this queue has access to. * Don't bother with segment 0 as it has special privileges. */ + status >>= LSB_REGION_WIDTH; for (j = 1; j < MAX_LSB_CNT; j++) { if (status & q_mask) bitmap_set(cmd_q->lsbmask, j, 1); @@ -744,7 +1018,7 @@ static void ccp5_irq_bh(unsigned long data) status = ioread32(cmd_q->reg_interrupt_status); - if (status) { + if (status & SUPPORTED_INTERRUPTS) { cmd_q->int_status = status; cmd_q->q_status = ioread32(cmd_q->reg_status); cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); @@ -753,10 +1027,9 @@ static void ccp5_irq_bh(unsigned long data) if ((status & INT_ERROR) && !cmd_q->cmd_error) cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); - cmd_q->int_rcvd = 1; - /* Acknowledge the interrupt and wake the kthread */ iowrite32(status, cmd_q->reg_interrupt_status); + cmd_q->int_rcvd = 1; wake_up_interruptible(&cmd_q->int_queue); } } @@ -784,7 +1057,7 @@ static int ccp5_init(struct ccp_device *ccp) char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; unsigned int qmr, i; u64 status; - u32 status_lo, status_hi; + u32 status_lo, status_hi, command_per_q, queue_size_val; int ret; /* Find available queues */ @@ -801,6 +1074,9 @@ static int ccp5_init(struct ccp_device *ccp) return 1; } + command_per_q = command_per_queue(); + queue_size_val = QUEUE_SIZE_VAL(command_per_q); + for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { if (!(qmr & (1 << i))) continue; @@ -827,7 +1103,7 @@ static int ccp5_init(struct ccp_device *ccp) /* Page alignment satisfies our needs for N <= 128 */ BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); - cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); + cmd_q->qsize = Q_SIZE(command_per_q, Q_DESC_SIZE); cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, &cmd_q->qbase_dma, GFP_KERNEL); @@ -914,7 +1190,7 @@ static int ccp5_init(struct ccp_device *ccp) cmd_q = &ccp->cmd_q[i]; cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); - cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; + cmd_q->qcontrol |= queue_size_val << CMD5_Q_SHIFT; cmd_q->qdma_tail = cmd_q->qbase_dma; dma_addr_lo = low_address(cmd_q->qdma_tail); @@ -1062,6 +1338,26 @@ static void ccp5_destroy(struct ccp_device *ccp) } } +static int ccp5_get_trng_mask_param(void) +{ + /* According to spec description for SM4 high secure module, + * which need 64 bytes data, so the initialize times of writing + * mask register must be 16 or a multiple of 16. + * + * The AES algorithem need 48 bytes, so the initialize times will + * be 12 or a multiple of 12. + */ + +#ifdef CONFIG_HYGON_GM + /* for sm4 HS */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + return 16; +#endif + + /* for AES HS */ + return 12; +} + static void ccp5_config(struct ccp_device *ccp) { /* Public side */ @@ -1072,12 +1368,13 @@ static void ccp5other_config(struct ccp_device *ccp) { int i; u32 rnd; + int len = ccp5_get_trng_mask_param(); /* We own all of the queues on the NTB CCP */ iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); - for (i = 0; i < 12; i++) { + for (i = 0; i < len; i++) { rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); } @@ -1103,6 +1400,11 @@ static const struct ccp_actions ccp5_actions = { .rsa = ccp5_perform_rsa, .passthru = ccp5_perform_passthru, .ecc = ccp5_perform_ecc, + .sm2 = ccp5_perform_sm2, + .sm3 = ccp5_perform_sm3, + .sm4 = ccp5_perform_sm4, + .sm4_ctr = ccp5_perform_sm4_ctr, + .run_cmd = ccp5_do_run_cmd, .sballoc = ccp_lsb_alloc, .sbfree = ccp_lsb_free, .init = ccp5_init, diff --git a/drivers/crypto/ccp/ccp-dev.h b/drivers/crypto/ccp/ccp-dev.h index 83350e2d9821e5fd8d76dc4eb26bddf4aa9fc812..e1aa68f4044c84f3f207f966356f61afa7a55386 100644 --- a/drivers/crypto/ccp/ccp-dev.h +++ b/drivers/crypto/ccp/ccp-dev.h @@ -99,12 +99,15 @@ #define CMD5_Q_MEM_LOCATION 0x4 #define CMD5_Q_SIZE 0x1F #define CMD5_Q_SHIFT 3 + #define COMMANDS_PER_QUEUE 16 -#define QUEUE_SIZE_VAL ((ffs(COMMANDS_PER_QUEUE) - 2) & \ - CMD5_Q_SIZE) -#define Q_PTR_MASK (2 << (QUEUE_SIZE_VAL + 5) - 1) +#define HYGON_COMMANDS_PER_QUEUE 8192 + #define Q_DESC_SIZE sizeof(struct ccp5_desc) -#define Q_SIZE(n) (COMMANDS_PER_QUEUE*(n)) + +#define QUEUE_SIZE_VAL(c) ((ffs((c)) - 2) & CMD5_Q_SIZE) +#define Q_PTR_MASK(c) (2 << (QUEUE_SIZE_VAL((c)) + 5) - 1) +#define Q_SIZE(c, n) ((c)*(n)) #define INT_COMPLETION 0x1 #define INT_ERROR 0x2 @@ -334,6 +337,10 @@ struct ccp_cmd_queue { unsigned long total_rsa_ops; unsigned long total_pt_ops; unsigned long total_ecc_ops; + unsigned long total_sm2_ops; + unsigned long total_sm3_ops; + unsigned long total_sm4_ops; + unsigned long total_sm4_ctr_ops; } ____cacheline_aligned; struct ccp_device { @@ -528,6 +535,28 @@ struct ccp_ecc_op { enum ccp_ecc_function function; }; +struct ccp_sm2_op { + u32 rand; + enum ccp_sm2_mode mode; +}; + +struct ccp_sm3_op { + enum ccp_sm3_type type; + u64 msg_bits; +}; + +struct ccp_sm4_op { + enum ccp_sm4_action action; + enum ccp_sm4_mode mode; + u32 select; +}; + +struct ccp_sm4_ctr_op { + u32 size; + enum ccp_sm4_action action; + u32 step; +}; + struct ccp_op { struct ccp_cmd_queue *cmd_q; @@ -551,6 +580,10 @@ struct ccp_op { struct ccp_rsa_op rsa; struct ccp_passthru_op passthru; struct ccp_ecc_op ecc; + struct ccp_sm2_op sm2; + struct ccp_sm3_op sm3; + struct ccp_sm4_op sm4; + struct ccp_sm4_ctr_op sm4_ctr; } u; }; @@ -599,6 +632,7 @@ struct dword3 { union dword4 { u32 dst_lo; /* NON-SHA */ u32 sha_len_lo; /* SHA */ + __le32 sm3_len_lo; /* SM3 */ }; union dword5 { @@ -609,6 +643,7 @@ union dword5 { unsigned int fixed:1; } fields; u32 sha_len_hi; + __le32 sm3_len_hi; }; struct dword7 { @@ -657,6 +692,11 @@ struct ccp_actions { int (*rsa)(struct ccp_op *); int (*passthru)(struct ccp_op *); int (*ecc)(struct ccp_op *); + int (*sm2)(struct ccp_op *op); + int (*sm3)(struct ccp_op *op); + int (*sm4)(struct ccp_op *op); + int (*sm4_ctr)(struct ccp_op *op); + int (*run_cmd)(struct ccp_op *op); u32 (*sballoc)(struct ccp_cmd_queue *, unsigned int); void (*sbfree)(struct ccp_cmd_queue *, unsigned int, unsigned int); unsigned int (*get_free_slots)(struct ccp_cmd_queue *); diff --git a/drivers/crypto/ccp/ccp-ops.c b/drivers/crypto/ccp/ccp-ops.c index aa4e1a5006919da78f1e5392c686479da2d579b9..684aa6c55cf169dd66f2a7ab8040325be17acad4 100644 --- a/drivers/crypto/ccp/ccp-ops.c +++ b/drivers/crypto/ccp/ccp-ops.c @@ -2460,6 +2460,520 @@ ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) } } +static int ccp_run_sm2_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm2_engine *sm2 = &cmd->u.sm2; + struct ccp_data src, dst; + struct ccp_op op; + int ret; + + if (!sm2->src || !sm2->dst) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.init = 1; + op.eom = 1; + op.u.sm2.rand = sm2->rand & 0x1; + op.u.sm2.mode = sm2->mode; + + memset(&src, 0, sizeof(src)); + ret = ccp_init_sg_workarea(&src.sg_wa, cmd_q->ccp->dev, + sm2->src, sm2->src_len, DMA_TO_DEVICE); + if (ret) + return ret; + + /* if src isn't contiguous, should copy to a contiguous buffer */ + if (src.sg_wa.dma_count == 1) { + op.src.u.dma.address = sg_dma_address(src.sg_wa.sg); + } else { + ccp_sg_free(&src.sg_wa); + ret = ccp_init_dm_workarea(&src.dm_wa, cmd_q, sm2->src_len, + DMA_TO_DEVICE); + if (ret) + goto e_src; + + ccp_set_dm_area(&src.dm_wa, 0, sm2->src, 0, sm2->src_len); + op.src.u.dma.address = src.dm_wa.dma.address; + } + + op.src.type = CCP_MEMTYPE_SYSTEM; + op.src.u.dma.offset = 0; + op.src.u.dma.length = sm2->src_len; + op.src.u.dma.dir = DMA_TO_DEVICE; + + memset(&dst, 0, sizeof(dst)); + ret = ccp_init_sg_workarea(&dst.sg_wa, cmd_q->ccp->dev, + sm2->dst, sm2->dst_len, DMA_FROM_DEVICE); + if (ret) + goto e_src; + + /* if dst isn't contiguous, should copy to a contiguous buffer */ + if (dst.sg_wa.dma_count == 1) { + op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg); + } else { + ccp_sg_free(&dst.sg_wa); + ret = ccp_init_dm_workarea(&dst.dm_wa, cmd_q, sm2->dst_len, + DMA_FROM_DEVICE); + if (ret) + goto e_dst; + + op.dst.u.dma.address = dst.dm_wa.dma.address; + } + + op.dst.type = CCP_MEMTYPE_SYSTEM; + op.dst.u.dma.offset = 0; + op.dst.u.dma.length = sm2->dst_len; + op.dst.u.dma.dir = DMA_FROM_DEVICE; + + ret = cmd_q->ccp->vdata->perform->sm2(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_dst; + } + + if (dst.dm_wa.address) { + ccp_get_dm_area(&dst.dm_wa, 0, sm2->dst, 0, sm2->dst_len); + memset(dst.dm_wa.address, 0, sm2->dst_len); + } + +e_dst: + ccp_free_data(&dst, cmd_q); + +e_src: + if (src.dm_wa.address) + memset(src.dm_wa.address, 0, sm2->src_len); + + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm3_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm3_engine *sm3 = &cmd->u.sm3; + struct ccp_dm_workarea ctx; + struct ccp_data src; + struct ccp_op op; + int ret; + + u8 sm3_zero_message_hash[SM3_DIGEST_SIZE] = { + 0x1A, 0xB2, 0x1D, 0x83, 0x55, 0xCF, 0xA1, 0x7F, + 0x8e, 0x61, 0x19, 0x48, 0x31, 0xE8, 0x1A, 0x8F, + 0x22, 0xBE, 0xC8, 0xC7, 0x28, 0xFE, 0xFB, 0x74, + 0x7E, 0xD0, 0x35, 0xEB, 0x50, 0x82, 0xAA, 0x2B, + }; + + if ((sm3->ctx == NULL) || (sm3->ctx_len != SM3_DIGEST_SIZE)) + return -EINVAL; + + if (sg_nents_for_len(sm3->ctx, SM3_DIGEST_SIZE) < 0) + return -EINVAL; + + if (sm3->final && sm3->first) { + if (!sm3->src_len) { + scatterwalk_map_and_copy( + (void *)sm3_zero_message_hash, + sm3->ctx, 0, SM3_DIGEST_SIZE, 1); + return 0; + } + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.init = sm3->first & 0x1; + op.u.sm3.type = sm3->type; + op.u.sm3.msg_bits = sm3->msg_bits; + + memset(&ctx, 0, sizeof(ctx)); + ret = ccp_init_dm_workarea(&ctx, cmd_q, SM3_DIGEST_SIZE, + DMA_BIDIRECTIONAL); + if (ret) + return ret; + + if (!sm3->first) { + /* load iv */ + ccp_set_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_ctx; + } + } + + ret = ccp_init_data(&src, cmd_q, sm3->src, sm3->src_len, + SM3_BLOCK_SIZE, DMA_TO_DEVICE); + if (ret) + goto e_ctx; + + /* send data to the CCP SM3 engine */ + if (sm3->src_len) { + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, NULL, &op, SM3_BLOCK_SIZE, + false); + if (!src.sg_wa.bytes_left && sm3->final) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ccp_process_data(&src, NULL, &op); + } + } else { + /* do sm3 padding */ + src.dm_wa.address[0] = 0x80; + *(__be64 *)&src.dm_wa.address[56] = cpu_to_be64(sm3->msg_bits); + + op.soc = 0; + op.ioc = 1; + op.eom = 0; + op.src.u.dma.address = src.dm_wa.dma.address; + op.src.u.dma.offset = 0; + op.src.u.dma.length = SM3_BLOCK_SIZE; + + ret = cmd_q->ccp->vdata->perform->sm3(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + } + + ret = ccp_copy_from_sb(cmd_q, &ctx, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_data; + } + + if (sm3->final && sm3->opad) { + /* HMAC operation, recursively perform final SM3 */ + struct ccp_cmd hmac_cmd; + struct scatterlist sg; + u8 *hmac_buf = NULL; + + hmac_buf = kmalloc( + SM3_BLOCK_SIZE + SM3_DIGEST_SIZE, GFP_KERNEL); + if (!hmac_buf) { + ret = -ENOMEM; + goto e_data; + } + scatterwalk_map_and_copy(hmac_buf, sm3->opad, + 0, SM3_BLOCK_SIZE, 0); + memcpy(hmac_buf + SM3_BLOCK_SIZE, ctx.address, + SM3_DIGEST_SIZE); + sg_init_one(&sg, hmac_buf, SM3_BLOCK_SIZE + SM3_DIGEST_SIZE); + + memset(&hmac_cmd, 0, sizeof(hmac_cmd)); + hmac_cmd.engine = CCP_ENGINE_SM3; + hmac_cmd.u.sm3.type = sm3->type; + hmac_cmd.u.sm3.ctx = sm3->ctx; + hmac_cmd.u.sm3.ctx_len = sm3->ctx_len; + hmac_cmd.u.sm3.src = &sg; + hmac_cmd.u.sm3.src_len = SM3_BLOCK_SIZE + SM3_DIGEST_SIZE; + hmac_cmd.u.sm3.opad = NULL; + hmac_cmd.u.sm3.opad_len = 0; + hmac_cmd.u.sm3.first = 1; + hmac_cmd.u.sm3.final = 1; + hmac_cmd.u.sm3.msg_bits = + (SM3_BLOCK_SIZE + SM3_DIGEST_SIZE) << 3; + + ret = ccp_run_sm3_cmd(cmd_q, &hmac_cmd); + if (ret) + cmd->engine_error = hmac_cmd.engine_error; + + kfree(hmac_buf); + } else { + ccp_get_dm_area(&ctx, 0, sm3->ctx, 0, SM3_DIGEST_SIZE); + } + +e_data: + ccp_free_data(&src, cmd_q); + +e_ctx: + ccp_dm_free(&ctx); + + return ret; +} + +static int ccp_run_sm4_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_engine *sm4 = &cmd->u.sm4; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4->src == NULL || sm4->dst == NULL) + return -EINVAL; + + if (sm4->key == NULL || sm4->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4->mode != CCP_SM4_MODE_ECB) { + if (sm4->iv == NULL || sm4->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + } + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4.action = sm4->action; + op.u.sm4.mode = sm4->mode; + op.u.sm4.select = sm4->select; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4->src) == sg_virt(sm4->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4->src, sm4->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4->dst, sm4->src_len, + SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + if (sm4->mode != CCP_SM4_MODE_ECB) + ccp_set_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4 engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, true); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + if (sm4->mode != CCP_SM4_MODE_ECB) { + /* retrieve the SM4 iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4->iv, 0, SM4_BLOCK_SIZE); + } + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + +static int ccp_run_sm4_ctr_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) +{ + struct ccp_sm4_ctr_engine *sm4_ctr = &cmd->u.sm4_ctr; + struct ccp_dm_workarea iv_key; + struct ccp_data src, dst; + struct ccp_op op; + bool in_place = false; + int ret; + + if (sm4_ctr->src == NULL || sm4_ctr->dst == NULL) + return -EINVAL; + + if (sm4_ctr->key == NULL || sm4_ctr->key_len != SM4_KEY_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->key, SM4_KEY_SIZE) < 0) + return -EINVAL; + + if (sm4_ctr->iv == NULL || sm4_ctr->iv_len != SM4_BLOCK_SIZE) + return -EINVAL; + + if (sg_nents_for_len(sm4_ctr->iv, SM4_BLOCK_SIZE) < 0) + return -EINVAL; + + memset(&op, 0, sizeof(op)); + op.cmd_q = cmd_q; + op.jobid = CCP_NEW_JOBID(cmd_q->ccp); + op.ioc = 1; + op.sb_ctx = cmd_q->sb_ctx; + op.u.sm4_ctr.size = sm4_ctr->size; + op.u.sm4_ctr.action = sm4_ctr->action; + op.u.sm4_ctr.step = sm4_ctr->step; + + /* Prepare the input and output data workareas. For in-place + * operations we need to set the dma direction to BIDIRECTIONAL + * and copy the src workarea to the dst workarea. + */ + if (sg_virt(sm4_ctr->src) == sg_virt(sm4_ctr->dst)) + in_place = true; + + ret = ccp_init_data(&src, cmd_q, sm4_ctr->src, sm4_ctr->src_len, + SM4_BLOCK_SIZE, in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (ret) + return ret; + + if (in_place) { + dst = src; + } else { + ret = ccp_init_data(&dst, cmd_q, sm4_ctr->dst, + sm4_ctr->src_len, SM4_BLOCK_SIZE, DMA_FROM_DEVICE); + if (ret) + goto e_src; + } + + /* load iv and key */ + ret = ccp_init_dm_workarea(&iv_key, cmd_q, + SM4_BLOCK_SIZE + SM4_KEY_SIZE, DMA_BIDIRECTIONAL); + if (ret) + goto e_dst; + + ccp_set_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + ccp_set_dm_area(&iv_key, SM4_BLOCK_SIZE, sm4_ctr->key, 0, SM4_KEY_SIZE); + + ret = ccp_copy_to_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + /* send data to the CCP SM4_CTR engine */ + while (src.sg_wa.bytes_left) { + ccp_prepare_data(&src, &dst, &op, SM4_BLOCK_SIZE, false); + if (!src.sg_wa.bytes_left) + op.eom = 1; + + if (!src.sg_wa.bytes_left || op.soc) + op.ioc = 1; + else + op.ioc = 0; + + ret = cmd_q->ccp->vdata->perform->sm4_ctr(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + if (!src.sg_wa.bytes_left || op.soc) { + ret = cmd_q->ccp->vdata->perform->run_cmd(&op); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + } + + ccp_process_data(&src, &dst, &op); + } + + /* retrieve the SM4_CTR iv */ + ret = ccp_copy_from_sb(cmd_q, &iv_key, 0, op.sb_ctx, + CCP_PASSTHRU_BYTESWAP_NOOP); + if (ret) { + cmd->engine_error = cmd_q->cmd_error; + goto e_iv_key; + } + + ccp_get_dm_area(&iv_key, 0, sm4_ctr->iv, 0, SM4_BLOCK_SIZE); + +e_iv_key: + memset(iv_key.address, 0, SM4_BLOCK_SIZE + SM4_KEY_SIZE); + ccp_dm_free(&iv_key); + +e_dst: + if (!in_place) + ccp_free_data(&dst, cmd_q); + +e_src: + ccp_free_data(&src, cmd_q); + + return ret; +} + int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) { int ret; @@ -2504,6 +3018,18 @@ int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd) case CCP_ENGINE_ECC: ret = ccp_run_ecc_cmd(cmd_q, cmd); break; + case CCP_ENGINE_SM2: + ret = ccp_run_sm2_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM3: + ret = ccp_run_sm3_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4: + ret = ccp_run_sm4_cmd(cmd_q, cmd); + break; + case CCP_ENGINE_SM4_CTR: + ret = ccp_run_sm4_ctr_cmd(cmd_q, cmd); + break; default: ret = -EINVAL; } diff --git a/drivers/crypto/ccp/ccp_sm2_sign.asn1 b/drivers/crypto/ccp/ccp_sm2_sign.asn1 new file mode 100644 index 0000000000000000000000000000000000000000..7e83e6799cb47574430dbecb06415ea179767adc --- /dev/null +++ b/drivers/crypto/ccp/ccp_sm2_sign.asn1 @@ -0,0 +1,4 @@ +Sm2Signature ::= SEQUENCE { + sig_r INTEGER ({ ccp_sm2_get_signature_r }), + sig_s INTEGER ({ ccp_sm2_get_signature_s }) +} diff --git a/drivers/crypto/ccp/csv-dev.c b/drivers/crypto/ccp/csv-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..b9a9ca4fa3c716226e62e9c9875827fbc3a33950 --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include "sev-dev.h" +#include "csv-dev.h" + +/* Function pointers for hooks */ +struct csv_hooks_table csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) +{ + int ret = 0; + unsigned int i = 0; + struct csv3_data_set_smr *cmd_set_smr; + struct csv3_data_set_smcr *cmd_set_smcr; + struct csv3_data_memory_region *smr_regions; + + if (!csv_smr || !csv_smr_num) + return -EINVAL; + + cmd_set_smr = kzalloc(sizeof(*cmd_set_smr), GFP_KERNEL); + if (!cmd_set_smr) + return -ENOMEM; + + smr_regions = kcalloc(csv_smr_num, sizeof(*smr_regions), GFP_KERNEL); + if (!smr_regions) { + ret = -ENOMEM; + goto e_free_cmd_set_smr; + } + + for (i = 0; i < csv_smr_num; i++) { + smr_regions[i].base_address = csv_smr[i].start; + smr_regions[i].size = csv_smr[i].size; + } + cmd_set_smr->smr_entry_size = 1 << csv_get_smr_entry_shift(); + cmd_set_smr->regions_paddr = __psp_pa(smr_regions); + cmd_set_smr->nregions = csv_smr_num; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMR, cmd_set_smr, error); + if (ret) { + pr_err("Fail to set SMR, ret %#x, error %#x\n", ret, *error); + goto e_free_smr_area; + } + + cmd_set_smcr = kzalloc(sizeof(*cmd_set_smcr), GFP_KERNEL); + if (!cmd_set_smcr) { + ret = -ENOMEM; + goto e_free_smr_area; + } + + cmd_set_smcr->base_address = csv_alloc_from_contiguous(1UL << CSV_MR_ALIGN_BITS, + &node_online_map, + get_order(1 << CSV_MR_ALIGN_BITS)); + if (!cmd_set_smcr->base_address) { + pr_err("Fail to alloc SMCR memory\n"); + ret = -ENOMEM; + goto e_free_cmd_set_smcr; + } + + cmd_set_smcr->size = 1UL << CSV_MR_ALIGN_BITS; + ret = csv_hooks.sev_do_cmd(CSV3_CMD_SET_SMCR, cmd_set_smcr, error); + if (ret) { + if (*error == SEV_RET_INVALID_COMMAND) + ret = 0; + else + pr_err("set smcr ret %#x, error %#x\n", ret, *error); + + csv_release_to_contiguous(cmd_set_smcr->base_address, + 1UL << CSV_MR_ALIGN_BITS); + } + +e_free_cmd_set_smcr: + kfree((void *)cmd_set_smcr); +e_free_smr_area: + kfree((void *)smr_regions); +e_free_cmd_set_smr: + kfree((void *)cmd_set_smr); + + if (ret) + dev_warn(sev->dev, + "CSV3: fail to set secure memory region, CSV3 support unavailable\n"); + + return ret; +} + +#endif /* CONFIG_HYGON_CSV */ diff --git a/drivers/crypto/ccp/csv-dev.h b/drivers/crypto/ccp/csv-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..8865b945728bf21a8722da6e4c07cb9383a105b8 --- /dev/null +++ b/drivers/crypto/ccp/csv-dev.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Liyang Han + */ + +#ifndef __CSV_DEV_H__ +#define __CSV_DEV_H__ + +#include + +/* Hooks table: a table of function pointers filled in when psp init */ +extern struct csv_hooks_table { + int (*sev_do_cmd)(int cmd, void *data, int *psp_ret); +} csv_hooks; + +#ifdef CONFIG_HYGON_CSV + +int csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error); + +#else /* !CONFIG_HYGON_CSV */ + +static inline int +csv_platform_cmd_set_secure_memory_region(struct sev_device *sev, int *error) { return 0; } + +#endif /* CONFIG_HYGON_CSV */ + +#endif /* __CSV_DEV_H__ */ diff --git a/drivers/crypto/ccp/psp-dev.c b/drivers/crypto/ccp/psp-dev.c index d42d7bc623523dad25f4665d18405b499be2bee9..1566b955730ed718663349a4baa1d8340f2f01db 100644 --- a/drivers/crypto/ccp/psp-dev.c +++ b/drivers/crypto/ccp/psp-dev.c @@ -9,6 +9,11 @@ #include #include +#include +#include +#include +#include +#include #include "sp-dev.h" #include "psp-dev.h" @@ -16,9 +21,88 @@ #include "tee-dev.h" #include "platform-access.h" #include "dbc.h" +#ifdef CONFIG_TDM_DEV_HYGON +#include "tdm-dev.h" +#endif struct psp_device *psp_master; +struct psp_misc_dev *psp_misc; +int is_hygon_psp; +#define HYGON_PSP_IOC_TYPE 'H' +enum HYGON_PSP_OPCODE { + HYGON_PSP_MUTEX_ENABLE = 1, + HYGON_PSP_MUTEX_DISABLE, + HYGON_VPSP_CTRL_OPT, + HYGON_PSP_OPCODE_MAX_NR, +}; + +enum VPSP_DEV_CTRL_OPCODE { + VPSP_OP_VID_ADD, + VPSP_OP_VID_DEL, + VPSP_OP_SET_DEFAULT_VID_PERMISSION, + VPSP_OP_GET_DEFAULT_VID_PERMISSION, +}; + +struct vpsp_dev_ctrl { + unsigned char op; + union { + unsigned int vid; + // Set or check the permissions for the default VID + unsigned int def_vid_perm; + unsigned char reserved[128]; + } data; +}; + +int psp_mutex_enabled; +extern struct mutex sev_cmd_mutex; + +uint64_t atomic64_exchange(uint64_t *dst, uint64_t val) +{ + return xchg(dst, val); +} + +int psp_mutex_init(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + mutex->locked = 0; + return 0; +} + +int psp_mutex_trylock(struct psp_mutex *mutex) +{ + if (atomic64_exchange(&mutex->locked, 1)) + return 0; + else + return 1; +} + +int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms) +{ + int ret = 0; + unsigned long je; + + je = jiffies + msecs_to_jiffies(ms); + do { + if (psp_mutex_trylock(mutex)) { + ret = 1; + break; + } + } while ((ms == 0) || time_before(jiffies, je)); + + return ret; +} + +int psp_mutex_unlock(struct psp_mutex *mutex) +{ + if (!mutex) + return -1; + + atomic64_exchange(&mutex->locked, 0); + return 0; +} + static struct psp_device *psp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; @@ -56,6 +140,109 @@ static irqreturn_t psp_irq_handler(int irq, void *data) return IRQ_HANDLED; } +#ifdef CONFIG_HYGON_PSP2CPU_CMD +static DEFINE_SPINLOCK(p2c_notifier_lock); +static p2c_notifier_t p2c_notifiers[P2C_NOTIFIERS_MAX] = {NULL}; +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && !p2c_notifiers[cmd_id]) { + p2c_notifiers[cmd_id] = notifier; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_cmd_notifier); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)) +{ + int ret = -ENODEV; + unsigned long flags; + + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (cmd_id < P2C_NOTIFIERS_MAX && p2c_notifiers[cmd_id] == notifier) { + p2c_notifiers[cmd_id] = NULL; + ret = 0; + } + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + + return ret; +} +EXPORT_SYMBOL_GPL(psp_unregister_cmd_notifier); + +#define PSP2CPU_MAX_LOOP 100 +static irqreturn_t psp_irq_handler_hygon(int irq, void *data) +{ + struct psp_device *psp = data; + struct sev_device *sev = psp->sev_irq_data; + unsigned int status; + int reg; + unsigned long flags; + int count = 0; + uint32_t p2c_cmd; + uint32_t p2c_lo_data; + uint32_t p2c_hi_data; + uint64_t p2c_data; + + /* Read the interrupt status: */ + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + + while (status && (count++ < PSP2CPU_MAX_LOOP)) { + /* Clear the interrupt status by writing the same value we read. */ + iowrite32(status, psp->io_regs + psp->vdata->intsts_reg); + + /* Check if it is command completion: */ + if (status & SEV_CMD_COMPLETE) { + /* Check if it is SEV command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->sev->cmdresp_reg); + if (reg & PSP_CMDRESP_RESP) { + sev->int_rcvd = 1; + wake_up(&sev->int_queue); + } + } + + if (status & PSP_X86_CMD) { + /* Check if it is P2C command completion: */ + reg = ioread32(psp->io_regs + psp->vdata->p2c_cmdresp_reg); + if (!(reg & PSP_CMDRESP_RESP)) { + p2c_lo_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_lo_reg); + p2c_hi_data = ioread32(psp->io_regs + + psp->vdata->p2c_cmdbuff_addr_hi_reg); + p2c_data = (((uint64_t)(p2c_hi_data) << 32) + + ((uint64_t)(p2c_lo_data))); + p2c_cmd = (uint32_t)(reg & SEV_CMDRESP_IOC); + if (p2c_cmd < P2C_NOTIFIERS_MAX) { + spin_lock_irqsave(&p2c_notifier_lock, flags); + if (p2c_notifiers[p2c_cmd]) + p2c_notifiers[p2c_cmd](p2c_cmd, p2c_data); + + spin_unlock_irqrestore(&p2c_notifier_lock, flags); + } + + reg |= PSP_CMDRESP_RESP; + iowrite32(reg, psp->io_regs + psp->vdata->p2c_cmdresp_reg); + } + } + status = ioread32(psp->io_regs + psp->vdata->intsts_reg); + } + + return IRQ_HANDLED; +} +#endif + +static void hygon_fixup_psp_caps(struct psp_device *psp) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + psp->capability &= ~(PSP_CAPABILITY_TEE | + PSP_CAPABILITY_PSP_SECURITY_REPORTING); +} + static unsigned int psp_get_capability(struct psp_device *psp) { unsigned int val = ioread32(psp->io_regs + psp->vdata->feature_reg); @@ -73,6 +260,12 @@ static unsigned int psp_get_capability(struct psp_device *psp) } psp->capability = val; + /* + * Fix capability of Hygon psp, the meaning of Hygon psp feature + * register is not exactly the same as AMD. + */ + hygon_fixup_psp_caps(psp); + /* Detect if TSME and SME are both enabled */ if (psp->capability & PSP_CAPABILITY_PSP_SECURITY_REPORTING && psp->capability & (PSP_SECURITY_TSME_STATUS << PSP_CAPABILITY_PSP_SECURITY_OFFSET) && @@ -140,12 +333,341 @@ static int psp_init(struct psp_device *psp) if (psp->vdata->platform_access) psp_init_platform_access(psp); +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + ret = tdm_dev_init(); + if (ret) + return ret; + } +#endif + return 0; } +static int mmap_psp(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long page; + + page = virt_to_phys((void *)psp_misc->data_pg_aligned) >> PAGE_SHIFT; + + if (remap_pfn_range(vma, vma->vm_start, page, (vma->vm_end - vma->vm_start), + vma->vm_page_prot)) { + printk(KERN_ERR "remap failed..."); + return -1; + } + vm_flags_mod(vma, VM_DONTDUMP | VM_DONTEXPAND, 0); + printk(KERN_INFO "remap_pfn_rang page:[%lu] ok.\n", page); + return 0; +} + +static ssize_t read_psp(struct file *file, char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_to_user(buf, (char *)psp_misc->data_pg_aligned + *ppos, count); + if (remaining) + return -EFAULT; + + *ppos += count; + + return count; +} + +static ssize_t write_psp(struct file *file, const char __user *buf, size_t count, loff_t *ppos) +{ + ssize_t remaining, written; + + if ((*ppos + count) > PAGE_SIZE) { + printk(KERN_ERR "%s: invalid address range, pos %llx, count %lx\n", + __func__, *ppos, count); + return -EFAULT; + } + + remaining = copy_from_user((char *)psp_misc->data_pg_aligned + *ppos, buf, count); + written = count - remaining; + if (!written) + return -EFAULT; + + *ppos += written; + + return written; +} + +DEFINE_RWLOCK(vpsp_rwlock); + +/* VPSP_VID_MAX_ENTRIES determines the maximum number of vms that can set vid. + * but, the performance of finding vid is determined by g_vpsp_vid_num, + * so VPSP_VID_MAX_ENTRIES can be set larger. + */ +#define VPSP_VID_MAX_ENTRIES 2048 +#define VPSP_VID_NUM_MAX 64 + +struct vpsp_vid_entry { + uint32_t vid; + pid_t pid; +}; +static struct vpsp_vid_entry g_vpsp_vid_array[VPSP_VID_MAX_ENTRIES]; +static uint32_t g_vpsp_vid_num; +static int compare_vid_entries(const void *a, const void *b) +{ + return ((struct vpsp_vid_entry *)a)->pid - ((struct vpsp_vid_entry *)b)->pid; +} +static void swap_vid_entries(void *a, void *b, int size) +{ + struct vpsp_vid_entry entry; + + memcpy(&entry, a, size); + memcpy(a, b, size); + memcpy(b, &entry, size); +} + +/** + * When 'allow_default_vid' is set to 1, + * QEMU is allowed to use 'vid 0' by default + * in the absence of a valid 'vid' setting. + */ +uint32_t allow_default_vid = 1; +void vpsp_set_default_vid_permission(uint32_t is_allow) +{ + allow_default_vid = is_allow; +} + +int vpsp_get_default_vid_permission(void) +{ + return allow_default_vid; +} +EXPORT_SYMBOL_GPL(vpsp_get_default_vid_permission); + +/** + * When the virtual machine executes the 'tkm' command, + * it needs to retrieve the corresponding 'vid' + * by performing a binary search using 'kvm->userspace_pid'. + */ +int vpsp_get_vid(uint32_t *vid, pid_t pid) +{ + struct vpsp_vid_entry new_entry = {.pid = pid}; + struct vpsp_vid_entry *existing_entry = NULL; + + read_lock(&vpsp_rwlock); + existing_entry = bsearch(&new_entry, g_vpsp_vid_array, g_vpsp_vid_num, + sizeof(struct vpsp_vid_entry), compare_vid_entries); + read_unlock(&vpsp_rwlock); + + if (!existing_entry) + return -ENOENT; + + if (vid) { + *vid = existing_entry->vid; + pr_debug("PSP: %s %d, by pid %d\n", __func__, *vid, pid); + } + return 0; +} +EXPORT_SYMBOL_GPL(vpsp_get_vid); + +/** + * Upon qemu startup, this section checks whether + * the '-device psp,vid' parameter is specified. + * If set, it utilizes the 'vpsp_add_vid' function + * to insert the 'vid' and 'pid' values into the 'g_vpsp_vid_array'. + * The insertion is done in ascending order of 'pid'. + */ +static int vpsp_add_vid(uint32_t vid) +{ + pid_t cur_pid = task_pid_nr(current); + struct vpsp_vid_entry new_entry = {.vid = vid, .pid = cur_pid}; + + if (vpsp_get_vid(NULL, cur_pid) == 0) + return -EEXIST; + if (g_vpsp_vid_num == VPSP_VID_MAX_ENTRIES) + return -ENOMEM; + if (vid >= VPSP_VID_NUM_MAX) + return -EINVAL; + + write_lock(&vpsp_rwlock); + memcpy(&g_vpsp_vid_array[g_vpsp_vid_num++], &new_entry, sizeof(struct vpsp_vid_entry)); + sort(g_vpsp_vid_array, g_vpsp_vid_num, sizeof(struct vpsp_vid_entry), + compare_vid_entries, swap_vid_entries); + pr_info("PSP: add vid %d, by pid %d, total vid num is %d\n", vid, cur_pid, g_vpsp_vid_num); + write_unlock(&vpsp_rwlock); + return 0; +} + +/** + * Upon the virtual machine is shut down, + * the 'vpsp_del_vid' function is employed to remove + * the 'vid' associated with the current 'pid'. + */ +static int vpsp_del_vid(void) +{ + pid_t cur_pid = task_pid_nr(current); + int i, ret = -ENOENT; + + write_lock(&vpsp_rwlock); + for (i = 0; i < g_vpsp_vid_num; ++i) { + if (g_vpsp_vid_array[i].pid == cur_pid) { + --g_vpsp_vid_num; + pr_info("PSP: delete vid %d, by pid %d, total vid num is %d\n", + g_vpsp_vid_array[i].vid, cur_pid, g_vpsp_vid_num); + memcpy(&g_vpsp_vid_array[i], &g_vpsp_vid_array[i + 1], + sizeof(struct vpsp_vid_entry) * (g_vpsp_vid_num - i)); + ret = 0; + goto end; + } + } + +end: + write_unlock(&vpsp_rwlock); + return ret; +} + +static int do_vpsp_op_ioctl(struct vpsp_dev_ctrl *ctrl) +{ + int ret = 0; + unsigned char op = ctrl->op; + + switch (op) { + case VPSP_OP_VID_ADD: + ret = vpsp_add_vid(ctrl->data.vid); + break; + + case VPSP_OP_VID_DEL: + ret = vpsp_del_vid(); + break; + + case VPSP_OP_SET_DEFAULT_VID_PERMISSION: + vpsp_set_default_vid_permission(ctrl->data.def_vid_perm); + break; + + case VPSP_OP_GET_DEFAULT_VID_PERMISSION: + ctrl->data.def_vid_perm = vpsp_get_default_vid_permission(); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} + +static long ioctl_psp(struct file *file, unsigned int ioctl, unsigned long arg) +{ + unsigned int opcode = 0; + struct vpsp_dev_ctrl vpsp_ctrl_op; + int ret = -EFAULT; + + if (_IOC_TYPE(ioctl) != HYGON_PSP_IOC_TYPE) { + printk(KERN_ERR "%s: invalid ioctl type: 0x%x\n", __func__, _IOC_TYPE(ioctl)); + return -EINVAL; + } + opcode = _IOC_NR(ioctl); + switch (opcode) { + case HYGON_PSP_MUTEX_ENABLE: + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + // And get the sev lock to make sure no one is using it now. + mutex_lock(&sev_cmd_mutex); + psp_mutex_enabled = 1; + mutex_unlock(&sev_cmd_mutex); + // Wait 10ms just in case someone is right before getting the psp lock. + mdelay(10); + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + ret = 0; + break; + + case HYGON_PSP_MUTEX_DISABLE: + mutex_lock(&sev_cmd_mutex); + // And get the psp lock to make sure no one is using it now. + psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, 0); + psp_mutex_enabled = 0; + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + // Wait 10ms just in case someone is right before getting the sev lock. + mdelay(10); + mutex_unlock(&sev_cmd_mutex); + ret = 0; + break; + + case HYGON_VPSP_CTRL_OPT: + if (copy_from_user(&vpsp_ctrl_op, (void __user *)arg, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + ret = do_vpsp_op_ioctl(&vpsp_ctrl_op); + if (!ret && copy_to_user((void __user *)arg, &vpsp_ctrl_op, + sizeof(struct vpsp_dev_ctrl))) + return -EFAULT; + break; + + default: + printk(KERN_ERR "%s: invalid ioctl number: %d\n", __func__, opcode); + return -EINVAL; + } + return ret; +} + +static const struct file_operations psp_fops = { + .owner = THIS_MODULE, + .mmap = mmap_psp, + .read = read_psp, + .write = write_psp, + .unlocked_ioctl = ioctl_psp, +}; + +static int hygon_psp_additional_setup(struct sp_device *sp) +{ + struct device *dev = sp->dev; + int ret = 0; + + if (!psp_misc) { + struct miscdevice *misc; + + psp_misc = devm_kzalloc(dev, sizeof(*psp_misc), GFP_KERNEL); + if (!psp_misc) + return -ENOMEM; + psp_misc->data_pg_aligned = (struct psp_dev_data *)get_zeroed_page(GFP_KERNEL); + if (!psp_misc->data_pg_aligned) { + dev_err(dev, "alloc psp data page failed\n"); + devm_kfree(dev, psp_misc); + psp_misc = NULL; + return -ENOMEM; + } + SetPageReserved(virt_to_page(psp_misc->data_pg_aligned)); + psp_mutex_init(&psp_misc->data_pg_aligned->mb_mutex); + + *(uint32_t *)((void *)psp_misc->data_pg_aligned + 8) = 0xdeadbeef; + misc = &psp_misc->misc; + misc->minor = MISC_DYNAMIC_MINOR; + misc->name = "hygon_psp_config"; + misc->fops = &psp_fops; + + ret = misc_register(misc); + if (ret) + return ret; + kref_init(&psp_misc->refcount); + } else { + kref_get(&psp_misc->refcount); + } + + return ret; +} + +static void hygon_psp_exit(struct kref *ref) +{ + struct psp_misc_dev *misc_dev = container_of(ref, struct psp_misc_dev, refcount); + + misc_deregister(&misc_dev->misc); + ClearPageReserved(virt_to_page(misc_dev->data_pg_aligned)); + free_page((unsigned long)misc_dev->data_pg_aligned); + psp_misc = NULL; +} + int psp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; + struct pci_dev *pdev = to_pci_dev(dev); struct psp_device *psp; int ret; @@ -173,8 +695,26 @@ int psp_dev_init(struct sp_device *sp) iowrite32(0, psp->io_regs + psp->vdata->inten_reg); iowrite32(-1, psp->io_regs + psp->vdata->intsts_reg); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { + is_hygon_psp = 1; + psp_mutex_enabled = 0; + ret = hygon_psp_additional_setup(sp); + if (ret) { + dev_err(dev, "psp: unable to do additional setup\n"); + goto e_err; + } + } + /* Request an irq */ - ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + if (pdev->vendor == PCI_VENDOR_ID_HYGON) { +#ifdef CONFIG_HYGON_PSP2CPU_CMD + ret = sp_request_psp_irq(psp->sp, psp_irq_handler_hygon, psp->name, psp); +#else + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); +#endif + } else { + ret = sp_request_psp_irq(psp->sp, psp_irq_handler, psp->name, psp); + } if (ret) { dev_err(dev, "psp: unable to allocate an IRQ\n"); goto e_err; @@ -220,10 +760,18 @@ void psp_dev_destroy(struct sp_device *sp) if (!psp) return; +#ifdef CONFIG_TDM_DEV_HYGON + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + tdm_dev_destroy(); +#endif + sev_dev_destroy(psp); tee_dev_destroy(psp); + if (is_hygon_psp && psp_misc) + kref_put(&psp_misc->refcount, hygon_psp_exit); + dbc_dev_destroy(psp); platform_access_dev_destroy(psp); diff --git a/drivers/crypto/ccp/psp-dev.h b/drivers/crypto/ccp/psp-dev.h index 8a4de69399c59abc8271ca346c88c30e0dc2682f..694bb3faf8be44b691642f42a69de5f1a1374616 100644 --- a/drivers/crypto/ccp/psp-dev.h +++ b/drivers/crypto/ccp/psp-dev.h @@ -14,11 +14,30 @@ #include #include #include +#include #include "sp-dev.h" +#define PSP_RBCTL_X86_WRITES BIT(31) +#define PSP_RBCTL_RBMODE_ACT BIT(30) +#define PSP_RBCTL_CLR_INTSTAT BIT(29) +#define PSP_RBTAIL_QHI_TAIL_SHIFT 16 +#define PSP_RBTAIL_QHI_TAIL_MASK 0x7FF0000 +#define PSP_RBTAIL_QLO_TAIL_MASK 0x7FF + +#define PSP_RBHEAD_QHI_HEAD_SHIFT 16 +#define PSP_RBHEAD_QHI_HEAD_MASK 0x7FF0000 +#define PSP_RBHEAD_QLO_HEAD_MASK 0x7FF + +#define PSP_RBHEAD_QPAUSE_INT_STAT BIT(30) + #define MAX_PSP_NAME_LEN 16 +#ifdef CONFIG_HYGON_PSP2CPU_CMD +#define PSP_X86_CMD BIT(2) +#define P2C_NOTIFIERS_MAX 16 +#endif + extern struct psp_device *psp_master; typedef void (*psp_irq_handler_t)(int, void *, unsigned int); @@ -45,6 +64,21 @@ struct psp_device { unsigned int capability; }; +#define PSP_MUTEX_TIMEOUT 600000 +struct psp_mutex { + uint64_t locked; +}; + +struct psp_dev_data { + struct psp_mutex mb_mutex; +}; + +struct psp_misc_dev { + struct kref refcount; + struct psp_dev_data *data_pg_aligned; + struct miscdevice misc; +}; + void psp_set_sev_irq_handler(struct psp_device *psp, psp_irq_handler_t handler, void *data); void psp_clear_sev_irq_handler(struct psp_device *psp); diff --git a/drivers/crypto/ccp/psp-ringbuf.c b/drivers/crypto/ccp/psp-ringbuf.c new file mode 100644 index 0000000000000000000000000000000000000000..9b5f886c0b406cc3ad7d886389a6ab107a39addd --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON Platform Security Processor (PSP) interface + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include "psp-ringbuf.h" + +static void enqueue_data(struct csv_queue *queue, + const void *src, + unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + void *data; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + data = (void *)queue->data_align; + memcpy(data + off, src, l); + memcpy(data, src + l, len - l); + + /* + * Make sure that the data in the ring buffer is up to date before + * incrementing the queue->tail index counter. + */ + smp_wmb(); +} + +static unsigned int queue_avail_size(struct csv_queue *queue) +{ + /* + * According to the nature of unsigned Numbers, it always work + * well even though tail < head. Reserved 1 element to distinguish + * full and empty. + */ + return queue->mask - (queue->tail - queue->head); +} + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize) +{ + size /= esize; + + queue->head = 0; + queue->tail = 0; + queue->esize = esize; + queue->data = (u64)buffer; + queue->mask = size - 1; + queue->data_align = ALIGN(queue->data, CSV_RING_BUFFER_ALIGN); + + return 0; +} + +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len) +{ + unsigned int size; + + size = queue_avail_size(queue); + if (len > size) + len = size; + + enqueue_data(queue, buf, len, queue->tail); + queue->tail += len; + return len; +} + +static void dequeue_data(struct csv_queue *queue, + void *dst, unsigned int len, unsigned int off) +{ + unsigned int size = queue->mask + 1; + unsigned int esize = queue->esize; + unsigned int l; + + off &= queue->mask; + if (esize != 1) { + off *= esize; + size *= esize; + len *= esize; + } + l = min(len, size - off); + + memcpy(dst, (void *)(queue->data + off), l); + memcpy((void *)((uintptr_t)dst + l), (void *)queue->data, len - l); + + /* + * Make sure that the data is copied before incrementing the + * queue->tail index counter. + */ + smp_wmb(); +} + +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} + +unsigned int csv_dequeue_cmd(struct csv_queue *queue, + void *buf, unsigned int len) +{ + unsigned int size; + + size = queue->tail - queue->head; + if (len > size) + len = size; + + dequeue_data(queue, buf, len, queue->head); + queue->head += len; + return len; +} + +unsigned int csv_cmd_queue_size(struct csv_queue *queue) +{ + unsigned int free_size; + + free_size = queue_avail_size(queue); + return queue->mask - free_size; +} diff --git a/drivers/crypto/ccp/psp-ringbuf.h b/drivers/crypto/ccp/psp-ringbuf.h new file mode 100644 index 0000000000000000000000000000000000000000..336352cc7a66209466f0453af3517d1c9cff1bdd --- /dev/null +++ b/drivers/crypto/ccp/psp-ringbuf.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * HYGON Platform Security Processor (PSP) interface driver + * + * Copyright (C) 2016-2023 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + */ + +#ifndef __PSP_RINGBUF_H__ +#define __PSP_RINGBUF_H__ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int csv_queue_init(struct csv_queue *queue, + void *buffer, unsigned int size, size_t esize); +unsigned int csv_enqueue_cmd(struct csv_queue *queue, + const void *buf, unsigned int len); +unsigned int csv_dequeue_stat(struct csv_queue *queue, + void *buf, unsigned int len); + +unsigned int csv_dequeue_cmd(struct csv_queue *ring_buf, + void *buf, unsigned int len); + +unsigned int csv_cmd_queue_size(struct csv_queue *ring_buf); +#endif /* __PSP_RINGBUF_H__ */ diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c index f97166fba9d93061737f51766f7bb3d1e4f2757f..ab0fadb2ed90a9fab2de6cdf9ea22cd39aa092ce 100644 --- a/drivers/crypto/ccp/sev-dev.c +++ b/drivers/crypto/ccp/sev-dev.c @@ -26,18 +26,21 @@ #include #include #include +#include #include #include #include "psp-dev.h" #include "sev-dev.h" +#include "csv-dev.h" #define DEVICE_NAME "sev" #define SEV_FW_FILE "amd/sev.fw" +#define CSV_FW_FILE "hygon/csv.fw" #define SEV_FW_NAME_SIZE 64 -static DEFINE_MUTEX(sev_cmd_mutex); +DEFINE_MUTEX(sev_cmd_mutex); static struct sev_misc_dev *misc_dev; static int psp_cmd_timeout = 100; @@ -64,6 +67,28 @@ MODULE_FIRMWARE("amd/amd_sev_fam19h_model1xh.sbin"); /* 4th gen EPYC */ static bool psp_dead; static int psp_timeout; +static int csv_comm_mode = CSV_COMM_MAILBOX_ON; +extern int is_hygon_psp; +extern struct psp_misc_dev *psp_misc; +extern int psp_mutex_lock_timeout(struct psp_mutex *mutex, uint64_t ms); +extern int psp_mutex_trylock(struct psp_mutex *mutex); +extern int psp_mutex_unlock(struct psp_mutex *mutex); +extern int psp_mutex_enabled; + +/* defination of variabled used by virtual psp */ +enum VPSP_RB_CHECK_STATUS { + RB_NOT_CHECK = 0, + RB_CHECKING, + RB_CHECKED, + RB_CHECK_MAX +}; +#define VPSP_RB_IS_SUPPORTED(buildid) (buildid >= 1913) +#define VPSP_CMD_STATUS_RUNNING 0xffff +static DEFINE_MUTEX(vpsp_rb_mutex); +struct csv_ringbuffer_queue vpsp_ring_buffer[CSV_COMMAND_PRIORITY_NUM]; +static uint8_t vpsp_rb_supported; +static atomic_t vpsp_rb_check_status = ATOMIC_INIT(RB_NOT_CHECK); + /* Trusted Memory Region (TMR): * The TMR is a 1MB area that must be 1MB aligned. Use the page allocator * to allocate the memory, which will return aligned memory for the specified @@ -80,6 +105,13 @@ static void *sev_es_tmr; #define NV_LENGTH (32 * 1024) static void *sev_init_ex_buffer; +/* + * Hygon CSV build info: + * Hygon CSV build info is 32-bit in length other than 8-bit as that + * in AMD SEV. + */ +static u32 hygon_csv_build; + static inline bool sev_version_greater_or_equal(u8 maj, u8 min) { struct sev_device *sev = psp_master->sev_data; @@ -93,6 +125,11 @@ static inline bool sev_version_greater_or_equal(u8 maj, u8 min) return false; } +static inline bool csv_version_greater_or_equal(u32 build) +{ + return hygon_csv_build >= build; +} + static void sev_irq_handler(int irq, void *data, unsigned int status) { struct sev_device *sev = data; @@ -104,7 +141,9 @@ static void sev_irq_handler(int irq, void *data, unsigned int status) /* Check if it is SEV command completion: */ reg = ioread32(sev->io_regs + sev->vdata->cmdresp_reg); - if (FIELD_GET(PSP_CMDRESP_RESP, reg)) { + if (FIELD_GET(PSP_CMDRESP_RESP, reg) || + ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && + (csv_comm_mode == CSV_COMM_RINGBUFFER_ON))) { sev->int_rcvd = 1; wake_up(&sev->int_queue); } @@ -125,8 +164,59 @@ static int sev_wait_cmd_ioc(struct sev_device *sev, return 0; } +static int csv_wait_cmd_ioc_ring_buffer(struct sev_device *sev, + unsigned int *reg, + unsigned int timeout) +{ + int ret; + + ret = wait_event_timeout(sev->int_queue, + sev->int_rcvd, timeout * HZ); + if (!ret) + return -ETIMEDOUT; + + *reg = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + return 0; +} + static int sev_cmd_buffer_len(int cmd) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (cmd) { + case CSV_CMD_HGSC_CERT_IMPORT: + return sizeof(struct csv_data_hgsc_cert_import); + case CSV_CMD_RING_BUFFER: + return sizeof(struct csv_data_ring_buffer); + case CSV3_CMD_LAUNCH_ENCRYPT_DATA: + return sizeof(struct csv3_data_launch_encrypt_data); + case CSV3_CMD_LAUNCH_ENCRYPT_VMCB: + return sizeof(struct csv3_data_launch_encrypt_vmcb); + case CSV3_CMD_UPDATE_NPT: + return sizeof(struct csv3_data_update_npt); + case CSV3_CMD_SET_SMR: + return sizeof(struct csv3_data_set_smr); + case CSV3_CMD_SET_SMCR: + return sizeof(struct csv3_data_set_smcr); + case CSV3_CMD_SET_GUEST_PRIVATE_MEMORY: + return sizeof(struct csv3_data_set_guest_private_memory); + case CSV3_CMD_DBG_READ_VMSA: + return sizeof(struct csv3_data_dbg_read_vmsa); + case CSV3_CMD_DBG_READ_MEM: + return sizeof(struct csv3_data_dbg_read_mem); + case CSV3_CMD_SEND_ENCRYPT_DATA: + return sizeof(struct csv3_data_send_encrypt_data); + case CSV3_CMD_SEND_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_send_encrypt_context); + case CSV3_CMD_RECEIVE_ENCRYPT_DATA: + return sizeof(struct csv3_data_receive_encrypt_data); + case CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT: + return sizeof(struct csv3_data_receive_encrypt_context); + default: + break; + } + } + switch (cmd) { case SEV_CMD_INIT: return sizeof(struct sev_data_init); case SEV_CMD_INIT_EX: return sizeof(struct sev_data_init_ex); @@ -389,16 +479,325 @@ static int __sev_do_cmd_locked(int cmd, void *data, int *psp_ret) return ret; } +static int __psp_do_cmd_locked(int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* Get the physical address of the command buffer */ + phys_lsb = data ? lower_32_bits(__psp_pa(data)) : 0; + phys_msb = data ? upper_32_bits(__psp_pa(data)) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + +static int __csv_ring_buffer_enter_locked(int *error) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + int ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int csv_get_cmd_status(struct sev_device *sev, int prio, int index) +{ + struct csv_queue *queue = &sev->ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)queue->data; + + return statval[index].status; +} + +static int __csv_do_ringbuf_cmds_locked(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int rb_tail; + unsigned int rb_ctl; + int last_cmd_index; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (sev->ring_buffer[CSV_COMMAND_PRIORITY_HIGH].cmd_ptr.tail + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= sev->ring_buffer[CSV_COMMAND_PRIORITY_LOW].cmd_ptr.tail; + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = PSP_RBCTL_X86_WRITES | + PSP_RBCTL_RBMODE_ACT | + PSP_RBCTL_CLR_INTSTAT; + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout * 10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + dev_err(sev->dev, "csv ringbuffer mode command timed out, disabling PSP\n"); + psp_dead = true; + + return ret; + } + + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + if (psp_ret) { + last_cmd_index = (reg & PSP_RBHEAD_QHI_HEAD_MASK) + >> PSP_RBHEAD_QHI_HEAD_SHIFT; + *psp_ret = csv_get_cmd_status(sev, CSV_COMMAND_PRIORITY_HIGH, + last_cmd_index); + if (*psp_ret == 0) { + last_cmd_index = reg & PSP_RBHEAD_QLO_HEAD_MASK; + *psp_ret = csv_get_cmd_status(sev, + CSV_COMMAND_PRIORITY_LOW, last_cmd_index); + } + } + + return ret; +} + +static int csv_do_ringbuf_cmds(int *psp_ret) +{ + struct sev_user_data_status data; + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + + rc = __csv_ring_buffer_enter_locked(psp_ret); + if (rc) + goto cmd_unlock; + + rc = __csv_do_ringbuf_cmds_locked(psp_ret); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +cmd_unlock: + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + static int sev_do_cmd(int cmd, void *data, int *psp_ret) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_do_cmd_locked(cmd, data, psp_ret); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +static int __vpsp_do_cmd_locked(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + phys_addr_t phys_addr; + unsigned int phys_lsb, phys_msb; + unsigned int reg, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + if (data && WARN_ON_ONCE(!virt_addr_valid(data))) + return -EINVAL; + + /* Get the physical address of the command buffer */ + phys_addr = PUT_PSP_VID(__psp_pa(data), vid); + phys_lsb = data ? lower_32_bits(phys_addr) : 0; + phys_msb = data ? upper_32_bits(phys_addr) : 0; + + dev_dbg(sev->dev, "sev command id %#x buffer 0x%08x%08x timeout %us\n", + cmd, phys_msb, phys_lsb, psp_timeout); + + print_hex_dump_debug("(in): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + iowrite32(phys_lsb, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + iowrite32(phys_msb, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + sev->int_rcvd = 0; + + reg = FIELD_PREP(SEV_CMDRESP_CMD, cmd) | SEV_CMDRESP_IOC; + iowrite32(reg, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for command completion */ + ret = sev_wait_cmd_ioc(sev, ®, psp_timeout); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(sev->dev, "sev command %#x timed out, disabling PSP\n", cmd); + psp_dead = true; + + return ret; + } + + psp_timeout = psp_cmd_timeout; + + if (psp_ret) + *psp_ret = FIELD_GET(PSP_CMDRESP_STS, reg); + + if (FIELD_GET(PSP_CMDRESP_STS, reg)) { + dev_dbg(sev->dev, "sev command %#x failed (%#010lx)\n", + cmd, FIELD_GET(PSP_CMDRESP_STS, reg)); + ret = -EIO; + } + + print_hex_dump_debug("(out): ", DUMP_PREFIX_OFFSET, 16, 2, data, + sev_cmd_buffer_len(cmd), false); + + return ret; +} + +int psp_do_cmd(int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + rc = __psp_do_cmd_locked(cmd, data, psp_ret); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } +EXPORT_SYMBOL_GPL(psp_do_cmd); static int __sev_init_locked(int *error) { @@ -500,8 +899,12 @@ static int __sev_platform_init_locked(int *error) dev_dbg(sev->dev, "SEV firmware initialized\n"); - dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, - sev->api_minor, sev->build); + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + dev_info(sev->dev, "CSV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, hygon_csv_build); + else + dev_info(sev->dev, "SEV API:%d.%d build:%d\n", sev->api_major, + sev->api_minor, sev->build); return 0; } @@ -509,10 +912,20 @@ static int __sev_platform_init_locked(int *error) int sev_platform_init(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_init_locked(error); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -520,16 +933,27 @@ EXPORT_SYMBOL_GPL(sev_platform_init); static int __sev_platform_shutdown_locked(int *error) { - struct sev_device *sev = psp_master->sev_data; + struct psp_device *psp = psp_master; + struct sev_device *sev; int ret; - if (!sev || sev->state == SEV_STATE_UNINIT) + if (!psp || !psp->sev_data) + return 0; + + sev = psp->sev_data; + + if (sev->state == SEV_STATE_UNINIT) return 0; ret = __sev_do_cmd_locked(SEV_CMD_SHUTDOWN, NULL, error); if (ret) return ret; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + csv_comm_mode = CSV_COMM_MAILBOX_ON; + csv_ring_buffer_queue_free(); + } + sev->state = SEV_STATE_UNINIT; dev_dbg(sev->dev, "SEV firmware shutdown\n"); @@ -539,10 +963,20 @@ static int __sev_platform_shutdown_locked(int *error) static int sev_platform_shutdown(int *error) { int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } rc = __sev_platform_shutdown_locked(NULL); - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return rc; } @@ -717,6 +1151,10 @@ static int sev_get_api_version(void) sev->build = status.build; sev->state = status.state; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + hygon_csv_build = (status.flags >> 9) | + ((u32)status.build << 23); + return 0; } @@ -726,6 +1164,14 @@ static int sev_get_firmware(struct device *dev, char fw_name_specific[SEV_FW_NAME_SIZE]; char fw_name_subset[SEV_FW_NAME_SIZE]; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + /* Check for CSV FW to using generic name: csv.fw */ + if (firmware_request_nowarn(firmware, CSV_FW_FILE, dev) >= 0) + return 0; + else + return -ENOENT; + } + snprintf(fw_name_specific, sizeof(fw_name_specific), "amd/amd_sev_fam%.2xh_model%.2xh.sbin", boot_cpu_data.x86, boot_cpu_data.x86_model); @@ -764,7 +1210,9 @@ static int sev_update_firmware(struct device *dev) struct page *p; u64 data_size; - if (!sev_version_greater_or_equal(0, 15)) { + if (!sev_version_greater_or_equal(0, 15) && + (boot_cpu_data.x86_vendor != X86_VENDOR_HYGON || + !csv_version_greater_or_equal(1667))) { dev_dbg(dev, "DOWNLOAD_FIRMWARE not supported\n"); return -1; } @@ -810,9 +1258,14 @@ static int sev_update_firmware(struct device *dev) ret = sev_do_cmd(SEV_CMD_DOWNLOAD_FIRMWARE, data, &error); if (ret) - dev_dbg(dev, "Failed to update SEV firmware: %#x\n", error); + dev_dbg(dev, "Failed to update %s firmware: %#x\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV", + error); else - dev_info(dev, "SEV firmware update successful\n"); + dev_info(dev, "%s firmware update successful\n", + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + ? "CSV" : "SEV"); __free_pages(p, order); @@ -1064,15 +1517,127 @@ static int sev_ioctl_do_pdh_export(struct sev_issue_cmd *argp, bool writable) return ret; } -static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +static int csv_ioctl_do_download_firmware(struct sev_issue_cmd *argp) { - void __user *argp = (void __user *)arg; - struct sev_issue_cmd input; - int ret = -EFAULT; - bool writable = file->f_mode & FMODE_WRITE; + struct sev_data_download_firmware *data = NULL; + struct csv_user_data_download_firmware input; + int ret, order; + struct page *p; + u64 data_size; - if (!psp_master || !psp_master->sev_data) - return -ENODEV; + /* Only support DOWNLOAD_FIRMWARE if build greater or equal 1667 */ + if (!csv_version_greater_or_equal(1667)) { + pr_err("DOWNLOAD_FIRMWARE not supported\n"); + return -EIO; + } + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + if (!input.address) { + argp->error = SEV_RET_INVALID_ADDRESS; + return -EINVAL; + } + + if (!input.length || input.length > CSV_FW_MAX_SIZE) { + argp->error = SEV_RET_INVALID_LEN; + return -EINVAL; + } + + /* + * CSV FW expects the physical address given to it to be 32 + * byte aligned. Memory allocated has structure placed at the + * beginning followed by the firmware being passed to the CSV + * FW. Allocate enough memory for data structure + alignment + * padding + CSV FW. + */ + data_size = ALIGN(sizeof(struct sev_data_download_firmware), 32); + + order = get_order(input.length + data_size); + p = alloc_pages(GFP_KERNEL, order); + if (!p) + return -ENOMEM; + + /* + * Copy firmware data to a kernel allocated contiguous + * memory region. + */ + data = page_address(p); + if (copy_from_user((void *)(page_address(p) + data_size), + (void *)input.address, input.length)) { + ret = -EFAULT; + goto err_free_page; + } + + data->address = __psp_pa(page_address(p) + data_size); + data->len = input.length; + + ret = __sev_do_cmd_locked(SEV_CMD_DOWNLOAD_FIRMWARE, data, &argp->error); + if (ret) + pr_err("Failed to update CSV firmware: %#x\n", argp->error); + else + pr_info("CSV firmware update successful\n"); + +err_free_page: + __free_pages(p, order); + + return ret; +} + +static int csv_ioctl_do_hgsc_import(struct sev_issue_cmd *argp) +{ + struct csv_user_data_hgsc_cert_import input; + struct csv_data_hgsc_cert_import *data; + void *hgscsk_blob, *hgsc_blob; + int ret; + + if (copy_from_user(&input, (void __user *)argp->data, sizeof(input))) + return -EFAULT; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* copy HGSCSK certificate blobs from userspace */ + hgscsk_blob = psp_copy_user_blob(input.hgscsk_cert_address, input.hgscsk_cert_len); + if (IS_ERR(hgscsk_blob)) { + ret = PTR_ERR(hgscsk_blob); + goto e_free; + } + + data->hgscsk_cert_address = __psp_pa(hgscsk_blob); + data->hgscsk_cert_len = input.hgscsk_cert_len; + + /* copy HGSC certificate blobs from userspace */ + hgsc_blob = psp_copy_user_blob(input.hgsc_cert_address, input.hgsc_cert_len); + if (IS_ERR(hgsc_blob)) { + ret = PTR_ERR(hgsc_blob); + goto e_free_hgscsk; + } + + data->hgsc_cert_address = __psp_pa(hgsc_blob); + data->hgsc_cert_len = input.hgsc_cert_len; + + ret = __sev_do_cmd_locked(CSV_CMD_HGSC_CERT_IMPORT, data, &argp->error); + + kfree(hgsc_blob); +e_free_hgscsk: + kfree(hgscsk_blob); +e_free: + kfree(data); + return ret; +} + +static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sev_issue_cmd input; + int ret = -EFAULT; + bool writable = file->f_mode & FMODE_WRITE; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (!psp_master || !psp_master->sev_data) + return -ENODEV; if (ioctl != SEV_ISSUE_CMD) return -EINVAL; @@ -1080,10 +1645,40 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) if (copy_from_user(&input, argp, sizeof(struct sev_issue_cmd))) return -EFAULT; - if (input.cmd > SEV_MAX) - return -EINVAL; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (input.cmd > CSV_MAX) + return -EINVAL; + } else { + if (input.cmd > SEV_MAX) + return -EINVAL; + } - mutex_lock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) + return -EBUSY; + } else { + mutex_lock(&sev_cmd_mutex); + } + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + switch (input.cmd) { + case CSV_PLATFORM_INIT: + ret = __sev_platform_init_locked(&input.error); + goto result_to_user; + case CSV_PLATFORM_SHUTDOWN: + ret = __sev_platform_shutdown_locked(&input.error); + goto result_to_user; + case CSV_DOWNLOAD_FIRMWARE: + ret = csv_ioctl_do_download_firmware(&input); + goto result_to_user; + case CSV_HGSC_CERT_IMPORT: + ret = csv_ioctl_do_hgsc_import(&input); + goto result_to_user; + default: + break; + } + } switch (input.cmd) { @@ -1120,10 +1715,14 @@ static long sev_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) goto out; } +result_to_user: if (copy_to_user(argp, &input, sizeof(struct sev_issue_cmd))) ret = -EFAULT; out: - mutex_unlock(&sev_cmd_mutex); + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); return ret; } @@ -1163,6 +1762,621 @@ int sev_guest_df_flush(int *error) } EXPORT_SYMBOL_GPL(sev_guest_df_flush); +int csv_ring_buffer_queue_free(void); + +static int __csv_ring_buffer_queue_init(struct csv_ringbuffer_queue *ring_buffer) +{ + int ret = 0; + void *cmd_ptr_buffer = NULL; + void *stat_val_buffer = NULL; + + memset((void *)ring_buffer, 0, sizeof(struct csv_ringbuffer_queue)); + + cmd_ptr_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!cmd_ptr_buffer) + return -ENOMEM; + + csv_queue_init(&ring_buffer->cmd_ptr, cmd_ptr_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + + stat_val_buffer = kzalloc(CSV_RING_BUFFER_LEN, GFP_KERNEL); + if (!stat_val_buffer) { + ret = -ENOMEM; + goto free_cmdptr; + } + + csv_queue_init(&ring_buffer->stat_val, stat_val_buffer, + CSV_RING_BUFFER_SIZE, CSV_RING_BUFFER_ESIZE); + return 0; + +free_cmdptr: + kfree(cmd_ptr_buffer); + + return ret; +} + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_cmdptr_entry cmdptr = { }; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + cmdptr.cmd_buf_ptr = __psp_pa(data); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + if (csv_enqueue_cmd(&sev->ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) + return -EFAULT; + + return 0; +} +EXPORT_SYMBOL_GPL(csv_fill_cmd_queue); + +int csv_check_stat_queue_status(int *psp_ret) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + unsigned int len; + int prio; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (prio = CSV_COMMAND_PRIORITY_HIGH; + prio < CSV_COMMAND_PRIORITY_NUM; prio++) { + do { + struct csv_statval_entry statval; + + len = csv_dequeue_stat(&sev->ring_buffer[prio].stat_val, + &statval, 1); + if (len) { + if (statval.status != 0) { + *psp_ret = statval.status; + return -EFAULT; + } + } + } while (len); + } + + return 0; +} +EXPORT_SYMBOL_GPL(csv_check_stat_queue_status); + +int csv_ring_buffer_queue_init(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + int i, ret = 0; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&sev->ring_buffer[i]); + if (ret) + goto e_free; + } + + return 0; + +e_free: + csv_ring_buffer_queue_free(); + return ret; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_init); + +int csv_ring_buffer_queue_free(void) +{ + struct psp_device *psp = psp_master; + struct sev_device *sev; + struct csv_ringbuffer_queue *ring_buffer; + int i; + + if (!psp || !psp->sev_data) + return -ENODEV; + + sev = psp->sev_data; + + for (i = 0; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ring_buffer = &sev->ring_buffer[i]; + + if (ring_buffer->cmd_ptr.data) { + kfree((void *)ring_buffer->cmd_ptr.data); + ring_buffer->cmd_ptr.data = 0; + } + + if (ring_buffer->stat_val.data) { + kfree((void *)ring_buffer->stat_val.data); + ring_buffer->stat_val.data = 0; + } + } + return 0; +} +EXPORT_SYMBOL_GPL(csv_ring_buffer_queue_free); + +static int get_queue_tail(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.tail & ringbuffer->cmd_ptr.mask; +} + +static int get_queue_head(struct csv_ringbuffer_queue *ringbuffer) +{ + return ringbuffer->cmd_ptr.head & ringbuffer->cmd_ptr.mask; +} + +static void vpsp_set_cmd_status(int prio, int index, int status) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + statval[index].status = status; +} + +static int vpsp_get_cmd_status(int prio, int index) +{ + struct csv_queue *ringbuf = &vpsp_ring_buffer[prio].stat_val; + struct csv_statval_entry *statval = (struct csv_statval_entry *)ringbuf->data; + + return statval[index].status; +} + +static unsigned int vpsp_queue_cmd_size(int prio) +{ + return csv_cmd_queue_size(&vpsp_ring_buffer[prio].cmd_ptr); +} + +static int vpsp_dequeue_cmd(int prio, int index, + struct csv_cmdptr_entry *cmd_ptr) +{ + mutex_lock(&vpsp_rb_mutex); + + /* The status update must be before the head update */ + vpsp_set_cmd_status(prio, index, 0); + csv_dequeue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, (void *)cmd_ptr, 1); + + mutex_unlock(&vpsp_rb_mutex); + + return 0; +} + +/* + * Populate the command from the virtual machine to the queue to + * support execution in ringbuffer mode + */ +static int vpsp_fill_cmd_queue(uint32_t vid, int prio, int cmd, void *data, uint16_t flags) +{ + struct csv_cmdptr_entry cmdptr = { }; + int index = -1; + + cmdptr.cmd_buf_ptr = PUT_PSP_VID(__psp_pa(data), vid); + cmdptr.cmd_id = cmd; + cmdptr.cmd_flags = flags; + + mutex_lock(&vpsp_rb_mutex); + index = get_queue_tail(&vpsp_ring_buffer[prio]); + + /* If status is equal to VPSP_CMD_STATUS_RUNNING, then the queue is full */ + if (vpsp_get_cmd_status(prio, index) == VPSP_CMD_STATUS_RUNNING) { + index = -1; + goto out; + } + + /* The status must be written first, and then the cmd can be enqueued */ + vpsp_set_cmd_status(prio, index, VPSP_CMD_STATUS_RUNNING); + if (csv_enqueue_cmd(&vpsp_ring_buffer[prio].cmd_ptr, &cmdptr, 1) != 1) { + vpsp_set_cmd_status(prio, index, 0); + index = -1; + goto out; + } + +out: + mutex_unlock(&vpsp_rb_mutex); + return index; +} + +static void vpsp_ring_update_head(struct csv_ringbuffer_queue *ring_buffer, + uint32_t new_head) +{ + uint32_t orig_head = get_queue_head(ring_buffer); + uint32_t comple_num = 0; + + if (new_head >= orig_head) + comple_num = new_head - orig_head; + else + comple_num = ring_buffer->cmd_ptr.mask - (orig_head - new_head) + + 1; + + ring_buffer->cmd_ptr.head += comple_num; +} + +static int vpsp_ring_buffer_queue_init(void) +{ + int i; + int ret; + + for (i = CSV_COMMAND_PRIORITY_HIGH; i < CSV_COMMAND_PRIORITY_NUM; i++) { + ret = __csv_ring_buffer_queue_init(&vpsp_ring_buffer[i]); + if (ret) + return ret; + } + + return 0; +} + +static int __vpsp_ring_buffer_enter_locked(int *error) +{ + int ret; + struct csv_data_ring_buffer *data; + struct csv_ringbuffer_queue *low_queue; + struct csv_ringbuffer_queue *hi_queue; + struct sev_device *sev = psp_master->sev_data; + + if (csv_comm_mode == CSV_COMM_RINGBUFFER_ON) + return -EEXIST; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + low_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]; + hi_queue = &vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]; + + data->queue_lo_cmdptr_address = __psp_pa(low_queue->cmd_ptr.data_align); + data->queue_lo_statval_address = __psp_pa(low_queue->stat_val.data_align); + data->queue_hi_cmdptr_address = __psp_pa(hi_queue->cmd_ptr.data_align); + data->queue_hi_statval_address = __psp_pa(hi_queue->stat_val.data_align); + data->queue_lo_size = 1; + data->queue_hi_size = 1; + data->int_on_empty = 1; + + ret = __sev_do_cmd_locked(CSV_CMD_RING_BUFFER, data, error); + if (!ret) { + iowrite32(0, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + csv_comm_mode = CSV_COMM_RINGBUFFER_ON; + } + + kfree(data); + return ret; +} + +static int __vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct psp_device *psp = psp_master; + unsigned int reg, ret = 0; + unsigned int rb_tail, rb_head; + unsigned int rb_ctl; + struct sev_device *sev; + + if (!psp) + return -ENODEV; + + if (psp_dead) + return -EBUSY; + + sev = psp->sev_data; + + /* update rb tail */ + rb_tail = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + rb_tail &= (~PSP_RBTAIL_QHI_TAIL_MASK); + rb_tail |= (get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBTAIL_QHI_TAIL_SHIFT); + rb_tail &= (~PSP_RBTAIL_QLO_TAIL_MASK); + rb_tail |= get_queue_tail(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_tail, sev->io_regs + sev->vdata->cmdbuff_addr_hi_reg); + + /* update rb head */ + rb_head = ioread32(sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + rb_head &= (~PSP_RBHEAD_QHI_HEAD_MASK); + rb_head |= (get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH]) + << PSP_RBHEAD_QHI_HEAD_SHIFT); + rb_head &= (~PSP_RBHEAD_QLO_HEAD_MASK); + rb_head |= get_queue_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW]); + iowrite32(rb_head, sev->io_regs + sev->vdata->cmdbuff_addr_lo_reg); + + /* update rb ctl to trigger psp irq */ + sev->int_rcvd = 0; + /* PSP response to x86 only when all queue is empty or error happends */ + rb_ctl = (PSP_RBCTL_X86_WRITES | PSP_RBCTL_RBMODE_ACT | PSP_RBCTL_CLR_INTSTAT); + iowrite32(rb_ctl, sev->io_regs + sev->vdata->cmdresp_reg); + + /* wait for all commands in ring buffer completed */ + ret = csv_wait_cmd_ioc_ring_buffer(sev, ®, psp_timeout*10); + if (ret) { + if (psp_ret) + *psp_ret = 0; + + dev_err(psp->dev, "sev command in ringbuffer mode timed out, disabling PSP\n"); + psp_dead = true; + return ret; + } + /* cmd error happends */ + if (reg & PSP_RBHEAD_QPAUSE_INT_STAT) + ret = -EFAULT; + + /* update head */ + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_HIGH], + (reg & PSP_RBHEAD_QHI_HEAD_MASK) >> PSP_RBHEAD_QHI_HEAD_SHIFT); + vpsp_ring_update_head(&vpsp_ring_buffer[CSV_COMMAND_PRIORITY_LOW], + reg & PSP_RBHEAD_QLO_HEAD_MASK); + + if (psp_ret) + *psp_ret = vpsp_get_cmd_status(prio, index); + + return ret; +} + +static int vpsp_do_ringbuf_cmds_locked(int *psp_ret, uint8_t prio, int index) +{ + struct sev_user_data_status data; + int rc; + + rc = __vpsp_ring_buffer_enter_locked(psp_ret); + if (rc) + goto end; + + rc = __vpsp_do_ringbuf_cmds_locked(psp_ret, prio, index); + + /* exit ringbuf mode by send CMD in mailbox mode */ + __sev_do_cmd_locked(SEV_CMD_PLATFORM_STATUS, + &data, NULL); + csv_comm_mode = CSV_COMM_MAILBOX_ON; + +end: + return rc; +} + +/** + * struct user_data_status - PLATFORM_STATUS command parameters + * + * @major: major API version + * @minor: minor API version + * @state: platform state + * @owner: self-owned or externally owned + * @chip_secure: ES or MP chip + * @fw_enc: is this FW is encrypted + * @fw_sign: is this FW is signed + * @config_es: platform config flags for csv-es + * @build: Firmware Build ID for this API version + * @bl_version_debug: Bootloader VERSION_DEBUG field + * @bl_version_minor: Bootloader VERSION_MINOR field + * @bl_version_major: Bootloader VERSION_MAJOR field + * @guest_count: number of active guests + * @reserved: should set to zero + */ +struct user_data_status { + uint8_t api_major; /* Out */ + uint8_t api_minor; /* Out */ + uint8_t state; /* Out */ + uint8_t owner : 1, /* Out */ + chip_secure : 1, /* Out */ + fw_enc : 1, /* Out */ + fw_sign : 1, /* Out */ + reserved1 : 4; /*reserved*/ + uint32_t config_es : 1, /* Out */ + build : 31; /* Out */ + uint32_t guest_count; /* Out */ +} __packed; + +/* + * Check whether the firmware supports ringbuffer mode and parse + * commands from the virtual machine + */ +static int vpsp_rb_check_and_cmd_prio_parse(uint8_t *prio, + struct vpsp_cmd *vcmd) +{ + int ret, error; + int rb_supported; + int rb_check_old = RB_NOT_CHECK; + struct user_data_status *status = NULL; + + if (atomic_try_cmpxchg(&vpsp_rb_check_status, &rb_check_old, + RB_CHECKING)) { + /* get buildid to check if the firmware supports ringbuffer mode */ + status = kzalloc(sizeof(*status), GFP_KERNEL); + if (!status) { + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + ret = sev_platform_status((struct sev_user_data_status *)status, + &error); + if (ret) { + pr_warn("failed to get status[%#x], use default command mode.\n", error); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + + /* check if the firmware supports the ringbuffer mode */ + if (VPSP_RB_IS_SUPPORTED(status->build)) { + if (vpsp_ring_buffer_queue_init()) { + pr_warn("vpsp_ring_buffer_queue_init fail, use default command mode\n"); + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + goto end; + } + WRITE_ONCE(vpsp_rb_supported, 1); + } + + atomic_set(&vpsp_rb_check_status, RB_CHECKED); + } + +end: + rb_supported = READ_ONCE(vpsp_rb_supported); + /* parse prio by vcmd */ + if (rb_supported && vcmd->is_high_rb) + *prio = CSV_COMMAND_PRIORITY_HIGH; + else + *prio = CSV_COMMAND_PRIORITY_LOW; + /* clear rb level bit in vcmd */ + vcmd->is_high_rb = 0; + + kfree(status); + return rb_supported; +} + +/* + * Try to obtain the result again by the command index, this + * interface is used in ringbuffer mode + */ +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, void *data, + struct vpsp_ret *psp_ret) +{ + int ret = 0; + struct csv_cmdptr_entry cmd = {0}; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + /* Get the retult directly if the command has been executed */ + if (index >= 0 && vpsp_get_cmd_status(prio, index) != + VPSP_CMD_STATUS_RUNNING) { + psp_ret->pret = vpsp_get_cmd_status(prio, index); + psp_ret->status = VPSP_FINISH; + return 0; + } + + if (is_hygon_psp && mutex_enabled) + ret = psp_mutex_trylock(&psp_misc->data_pg_aligned->mb_mutex); + else + ret = mutex_trylock(&sev_cmd_mutex); + + if (ret) { + /* Use mailbox mode to execute a command if there is only one command */ + if (vpsp_queue_cmd_size(prio) == 1) { + /* dequeue command from queue*/ + vpsp_dequeue_cmd(prio, index, &cmd); + ret = __vpsp_do_cmd_locked(vid, cmd.cmd_id, data, + (int *)psp_ret); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + } else { + ret = vpsp_do_ringbuf_cmds_locked((int *)psp_ret, prio, + index); + psp_ret->status = VPSP_FINISH; + if (unlikely(ret)) { + pr_err("[%s]: vpsp_do_ringbuf_cmds_locked failed %d\n", + __func__, ret); + goto end; + } + } + } else { + /* Change the command to the running state if getting the mutex fails */ + psp_ret->index = index; + psp_ret->status = VPSP_RUNNING; + return 0; + } +end: + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_get_result); + +int vpsp_do_cmd(uint32_t vid, int cmd, void *data, int *psp_ret) +{ + int rc; + int mutex_enabled = READ_ONCE(psp_mutex_enabled); + + if (is_hygon_psp && mutex_enabled) { + if (psp_mutex_lock_timeout(&psp_misc->data_pg_aligned->mb_mutex, + PSP_MUTEX_TIMEOUT) != 1) { + return -EBUSY; + } + } else { + mutex_lock(&sev_cmd_mutex); + } + + rc = __vpsp_do_cmd_locked(vid, cmd, data, psp_ret); + + if (is_hygon_psp && mutex_enabled) + psp_mutex_unlock(&psp_misc->data_pg_aligned->mb_mutex); + else + mutex_unlock(&sev_cmd_mutex); + + return rc; +} + +/* + * Send the virtual psp command to the PSP device and try to get the + * execution result, the interface and the vpsp_try_get_result + * interface are executed asynchronously. If the execution succeeds, + * the result is returned to the VM. If the execution fails, the + * vpsp_try_get_result interface will be used to obtain the result + * later again + */ +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) +{ + int ret = 0; + int rb_supported; + int index = -1; + uint8_t prio = CSV_COMMAND_PRIORITY_LOW; + + /* ringbuffer mode check and parse command prio*/ + rb_supported = vpsp_rb_check_and_cmd_prio_parse(&prio, + (struct vpsp_cmd *)&cmd); + if (rb_supported) { + /* fill command in ringbuffer's queue and get index */ + index = vpsp_fill_cmd_queue(vid, prio, cmd, data, 0); + if (unlikely(index < 0)) { + /* do mailbox command if queuing failed*/ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + goto end; + } + + /* try to get result from the ringbuffer command */ + ret = vpsp_try_get_result(vid, prio, index, data, psp_ret); + if (unlikely(ret)) { + pr_err("[%s]: vpsp_try_get_result failed %d\n", __func__, ret); + goto end; + } + } else { + /* mailbox mode */ + ret = vpsp_do_cmd(vid, cmd, data, (int *)psp_ret); + if (unlikely(ret)) { + if (ret == -EIO) { + ret = 0; + } else { + pr_err("[%s]: psp do cmd error, %d\n", + __func__, psp_ret->pret); + ret = -EIO; + goto end; + } + } + psp_ret->status = VPSP_FINISH; + } + +end: + return ret; +} +EXPORT_SYMBOL_GPL(vpsp_try_do_cmd); + static void sev_exit(struct kref *ref) { misc_deregister(&misc_dev->misc); @@ -1170,6 +2384,13 @@ static void sev_exit(struct kref *ref) misc_dev = NULL; } +/* Code to set all of the function pointers for CSV. */ +static inline void csv_install_hooks(void) +{ + /* Install the hook functions for CSV. */ + csv_hooks.sev_do_cmd = sev_do_cmd; +} + static int sev_misc_init(struct sev_device *sev) { struct device *dev = sev->dev; @@ -1199,6 +2420,9 @@ static int sev_misc_init(struct sev_device *sev) return ret; kref_init(&misc_dev->refcount); + + /* Install the hook functions for CSV */ + csv_install_hooks(); } else { kref_get(&misc_dev->refcount); } @@ -1312,6 +2536,15 @@ int sev_issue_cmd_external_user(struct file *filep, unsigned int cmd, } EXPORT_SYMBOL_GPL(sev_issue_cmd_external_user); +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) +{ + if (!filep || filep->f_op != &sev_fops) + return -EBADF; + + return csv_do_ringbuf_cmds(psp_ret); +} +EXPORT_SYMBOL_GPL(csv_issue_ringbuf_cmds_external_user); + void sev_pci_init(void) { struct sev_device *sev = psp_master->sev_data; @@ -1352,6 +2585,10 @@ void sev_pci_init(void) if (!psp_init_on_probe) return; + /* Set SMR for HYGON CSV3 */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) + csv_platform_cmd_set_secure_memory_region(sev, &error); + /* Initialize the platform */ rc = sev_platform_init(&error); if (rc) diff --git a/drivers/crypto/ccp/sev-dev.h b/drivers/crypto/ccp/sev-dev.h index 778c95155e745becb09ada5f21d83044e6a120e1..372183b8c58f5fcd4017472e4f4c48b3a63b6f81 100644 --- a/drivers/crypto/ccp/sev-dev.h +++ b/drivers/crypto/ccp/sev-dev.h @@ -25,6 +25,8 @@ #include #include +#include "psp-ringbuf.h" + #define SEV_CMDRESP_CMD GENMASK(26, 16) #define SEV_CMD_COMPLETE BIT(1) #define SEV_CMDRESP_IOC BIT(0) @@ -52,6 +54,8 @@ struct sev_device { u8 build; void *cmd_buf; + + struct csv_ringbuffer_queue ring_buffer[CSV_COMMAND_PRIORITY_NUM]; }; int sev_dev_init(struct psp_device *psp); diff --git a/drivers/crypto/ccp/sp-dev.h b/drivers/crypto/ccp/sp-dev.h index 2329ad524b4945b29bac80e1b0843c4de6a72a54..d04d9743b68000cb549d44c33892948209edf2a8 100644 --- a/drivers/crypto/ccp/sp-dev.h +++ b/drivers/crypto/ccp/sp-dev.h @@ -76,6 +76,11 @@ struct psp_vdata { const unsigned int intsts_reg; const unsigned int bootloader_info_reg; const unsigned int platform_features; +#ifdef CONFIG_HYGON_PSP2CPU_CMD + const unsigned int p2c_cmdresp_reg; + const unsigned int p2c_cmdbuff_addr_lo_reg; + const unsigned int p2c_cmdbuff_addr_hi_reg; +#endif }; /* Structure to hold SP device data */ diff --git a/drivers/crypto/ccp/sp-pci.c b/drivers/crypto/ccp/sp-pci.c index b6ab56abeb682f89f913558e957ac364d57fbeec..4f6a0507f7cd00585dad3190f36cc176b3291048 100644 --- a/drivers/crypto/ccp/sp-pci.c +++ b/drivers/crypto/ccp/sp-pci.c @@ -129,9 +129,13 @@ static umode_t psp_firmware_is_visible(struct kobject *kobj, struct attribute *a if (!psp) return 0; - +#ifdef CONFIG_X86 + if (attr == &dev_attr_bootloader_version.attr && + psp->vdata->bootloader_info_reg && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) +#else if (attr == &dev_attr_bootloader_version.attr && psp->vdata->bootloader_info_reg) +#endif val = ioread32(psp->io_regs + psp->vdata->bootloader_info_reg); if (attr == &dev_attr_tee_version.attr && @@ -417,6 +421,12 @@ static const struct sev_vdata sevv2 = { .cmdbuff_addr_hi_reg = 0x109e4, /* C2PMSG_57 */ }; +static const struct sev_vdata csvv1 = { + .cmdresp_reg = 0x10580, + .cmdbuff_addr_lo_reg = 0x105e0, + .cmdbuff_addr_hi_reg = 0x105e4, +}; + static const struct tee_vdata teev1 = { .cmdresp_reg = 0x10544, /* C2PMSG_17 */ .cmdbuff_addr_lo_reg = 0x10548, /* C2PMSG_18 */ @@ -453,6 +463,11 @@ static const struct psp_vdata pspv1 = { .feature_reg = 0x105fc, /* C2PMSG_63 */ .inten_reg = 0x10610, /* P2CMSG_INTEN */ .intsts_reg = 0x10614, /* P2CMSG_INTSTS */ +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif }; static const struct psp_vdata pspv2 = { @@ -498,6 +513,18 @@ static const struct psp_vdata pspv6 = { .intsts_reg = 0x10514, /* P2CMSG_INTSTS */ }; +static const struct psp_vdata psp_csvv1 = { + .sev = &csvv1, + .feature_reg = 0x105fc, + .inten_reg = 0x10670, + .intsts_reg = 0x10674, +#ifdef CONFIG_HYGON_PSP2CPU_CMD + .p2c_cmdresp_reg = 0x105e8, + .p2c_cmdbuff_addr_lo_reg = 0x105ec, + .p2c_cmdbuff_addr_hi_reg = 0x105f0, +#endif +}; + #endif static const struct sp_dev_vdata dev_vdata[] = { @@ -562,6 +589,15 @@ static const struct sp_dev_vdata dev_vdata[] = { .bar = 2, #ifdef CONFIG_CRYPTO_DEV_SP_PSP .psp_vdata = &pspv6, +#endif + }, + { /* 9 */ + .bar = 2, +#ifdef CONFIG_CRYPTO_DEV_SP_CCP + .ccp_vdata = &ccpv5a, +#endif +#ifdef CONFIG_CRYPTO_DEV_SP_PSP + .psp_vdata = &psp_csvv1, #endif }, }; @@ -576,6 +612,11 @@ static const struct pci_device_id sp_pci_table[] = { { PCI_VDEVICE(AMD, 0x1649), (kernel_ulong_t)&dev_vdata[6] }, { PCI_VDEVICE(AMD, 0x17E0), (kernel_ulong_t)&dev_vdata[7] }, { PCI_VDEVICE(AMD, 0x156E), (kernel_ulong_t)&dev_vdata[8] }, + { PCI_VDEVICE(HYGON, 0x1456), (kernel_ulong_t)&dev_vdata[1] }, + { PCI_VDEVICE(HYGON, 0x1468), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x1486), (kernel_ulong_t)&dev_vdata[9] }, + { PCI_VDEVICE(HYGON, 0x14b8), (kernel_ulong_t)&dev_vdata[2] }, + { PCI_VDEVICE(HYGON, 0x14a6), (kernel_ulong_t)&dev_vdata[9] }, /* Last entry must be zero */ { 0, } }; diff --git a/drivers/crypto/ccp/tdm-dev.c b/drivers/crypto/ccp/tdm-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..99f6e8f7416d78377b33ef49229ba2c0ed6c950c --- /dev/null +++ b/drivers/crypto/ccp/tdm-dev.c @@ -0,0 +1,1594 @@ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) "tdm: " fmt + +#define TDM_CMD_ID_MAX 16 +#define TDM2PSP_CMD(id) (0x110 | (id)) +#define TDM_P2C_CMD_ID 1 +#define TDM_C2P_CMD_SIZE (3*PAGE_SIZE) +#define TDM_KFIFO_SIZE 1024 + +#define TDM_IOC_TYPE 'D' +#define TDM_CMD_LEN_LIMIT (1U << 12) + +struct context_message { + uint32_t flag; + uint32_t pid; + uint8_t comm[16]; + uint8_t module_name[64]; +}; + +struct tdm_task_head { + struct list_head head; + rwlock_t lock; +}; + +struct tdm_task_ctx { + uint32_t task_id; + uint32_t cmd_ctx_flag; + measure_exception_handler_t handler; + struct list_head list; +}; + +static struct tdm_task_head dyn_head; +static unsigned int p2c_cmd_id = TDM_P2C_CMD_ID; +static struct task_struct *kthread; +static DECLARE_KFIFO(kfifo_error_task, unsigned char, TDM_KFIFO_SIZE); +static spinlock_t kfifo_lock; +static int tdm_support; +static int tdm_init_flag; +static int tdm_destroy_flag; + +static int list_check_exist(uint32_t task_id) +{ + int found = 0; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + found = 1; + break; + } + } + read_unlock(lock); + + return found; +} + +static int list_enqueue(void *entry) +{ + int ret = 0; + struct list_head *head, *entry_list = NULL; + rwlock_t *lock = NULL; + + if (!entry) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + entry_list = &(((struct tdm_task_ctx *)entry)->list); + + write_lock(lock); + if (entry_list) + list_add_tail(entry_list, head); + write_unlock(lock); + +end: + return 0; +} + +static __maybe_unused int list_print(void) +{ + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + pr_info("id: %d ", task_node->task_id); + } + read_unlock(lock); + pr_info("\n"); + + return 0; +} + +static int measure_exception_handling_thread(void *data) +{ + int ret = 0; + int copied = 0; + uint32_t error_task_id = 0xffffffff; + struct measure_status task_measure_status; + struct list_head *head = NULL; + rwlock_t *lock = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + + head = &dyn_head.head; + lock = &dyn_head.lock; + + pr_info("Thread started for measurement exception handler dispatching...\n"); + while (!kthread_should_stop()) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + + while (!kfifo_is_empty(&kfifo_error_task)) { + copied = kfifo_out_spinlocked(&kfifo_error_task, + (unsigned char *)&error_task_id, sizeof(uint32_t), &kfifo_lock); + if (copied != sizeof(uint32_t)) { + ret = -DYN_ERR_API; + pr_err("kfifio_out exception,return\n"); + goto end; + } + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == error_task_id) + break; + } + read_unlock(lock); + + if (!task_node) { + ret = -DYN_NULL_POINTER; + pr_err("task_node is null,return\n"); + goto end; + } + + if (task_node->task_id == error_task_id) { + if (task_node->handler) { + pr_info("-----Measurement exception handler dispatching " + "thread------\n"); + pr_info("Measurement exception received for task %d\n", + error_task_id); + pr_info("Step1: Query PSP for task %d status to confirm " + "the error.\n", error_task_id); + pr_info("Step2: Error confirmed, CALL measurement " + "exception handler.\n"); + ret = psp_query_measure_status(error_task_id, + &task_measure_status); + if (ret) { + pr_err("task_id %d status query failed\n", + error_task_id); + goto end; + } + + if (task_measure_status.error == MER_ERR) { + /*error--1 normal--0 */ + pr_info("Error detected for task %d, " + "action TODO!\n", error_task_id); + pr_info("----Measurement exception handler----\n"); + task_node->handler(error_task_id); + pr_info("Exit measurement exception handler.\n"); + } else { + pr_info("No error detected for task %d, please " + "check it again!\n", error_task_id); + } + } else { + pr_err("task %d's callback function is not registered, " + "please check it\n", error_task_id); + } + } + } + } +end: + return ret; +} + +static int tdm_interrupt_handler(uint32_t id, uint64_t data) +{ + if (kthread) { + kfifo_in_spinlocked(&kfifo_error_task, (unsigned char *)&data, sizeof(uint32_t), + &kfifo_lock); + wake_up_process(kthread); + } + + return 0; +} + +static int tdm_do_cmd(unsigned int cmd_id, void *cmd_data, int *error) +{ + if (cmd_id >= TDM_CMD_ID_MAX) { + pr_err("%s cmd_id %u beyond limit\n", __func__, cmd_id); + return -DYN_BEYOND_MAX; + } + + return psp_do_cmd(TDM2PSP_CMD(cmd_id), cmd_data, error); +} + +static int calc_task_context_hash(struct context_message context_msg, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + shash = crypto_alloc_shash("sha256", 0, 0); + if (IS_ERR(shash)) { + pr_err("can't alloc hash\n"); + return -DYN_ERR_API; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + + ret = crypto_shash_init(sdesc); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_init failed\n"); + goto end; + } + + if (context_msg.flag & CONTEXT_CHECK_PID) { + ret = crypto_shash_update(sdesc, (uint8_t *)&context_msg.pid, + sizeof(context_msg.pid)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_COMM) { + ret = crypto_shash_update(sdesc, context_msg.comm, + strlen(context_msg.comm)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + if (context_msg.flag & CONTEXT_CHECK_MODNAME) { + ret = crypto_shash_update(sdesc, context_msg.module_name, + strlen(context_msg.module_name)); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_update failed\n"); + goto free_shash; + } + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + ret = -DYN_ERR_API; + pr_err("crypto_shash_final failed\n"); + goto free_shash; + } + } + +free_shash: + crypto_free_shash(shash); +end: + return ret; +} + +static int tdm_get_cmd_context_hash(uint32_t flag, uint8_t *hash) +{ + int ret = 0; + struct context_message ctx_msg = {0}; + unsigned long return_address = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + struct module *p_module = NULL; +#elif IS_ENABLED(CONFIG_KALLSYMS) + char symbol_buf[128] = {0}; + int symbol_len = 0; + char *symbol_begin = NULL; + char *symbol_end = NULL; +#endif + + if (!hash) { + ret = -DYN_NULL_POINTER; + pr_err("Null pointer\n"); + goto end; + } + + ctx_msg.flag = flag; + ctx_msg.pid = current->pid; + memcpy(ctx_msg.comm, current->comm, sizeof(current->comm)); + + return_address = CALLER_ADDR1; + if (return_address) { +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + p_module = __module_address(return_address); + // caller is module + if (p_module) + memcpy(ctx_msg.module_name, p_module->name, sizeof(p_module->name)); + // caller is build-in + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#elif IS_ENABLED(CONFIG_KALLSYMS) + symbol_len = sprint_symbol((char *)symbol_buf, return_address); + if (!symbol_len) { + ret = -DYN_ERR_API; + pr_err("sprint_symbol failed\n"); + goto end; + } + symbol_begin = strchr((char *)symbol_buf, '['); + if (!symbol_begin) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_end = strchr((char *)symbol_buf, ']'); + if (!symbol_end) { + ret = -DYN_NULL_POINTER; + pr_err("module name is not exist\n"); + goto end; + } + symbol_begin++; + if (symbol_end - symbol_begin) + memcpy(ctx_msg.module_name, symbol_begin, symbol_end - symbol_begin); + else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); +#endif + } else + memset(ctx_msg.module_name, 0, sizeof(ctx_msg.module_name)); + + ret = calc_task_context_hash(ctx_msg, hash); + if (ret) { + pr_err("calc_task_context_hash failed\n"); + goto end; + } + +end: + return ret; +} + +static int tdm_verify_phy_addr_valid(struct addr_range_info *range) +{ + int ret = 0; +#if IS_BUILTIN(CONFIG_CRYPTO_DEV_CCP_DD) + int i; + uint64_t phy_addr_start, phy_addr_end; + + for (i = 0; i < range->count; i++) { + phy_addr_start = __sme_clr(range->addr[i].addr_start); + phy_addr_end = __sme_clr(range->addr[i].addr_start + range->addr[i].length); + + if ((PHYS_PFN(phy_addr_start) >= max_pfn) || (PHYS_PFN(phy_addr_end) >= max_pfn)) { + pr_err("phy_addr or length beyond max_pfn\n"); + ret = -DYN_ERR_MEM; + break; + } + } +#else + pr_warn("TDM: Can't get max_pfn, skip physical address check\n"); +#endif + + return ret; +} + +/* Convert the virtual address to physics address,then judge whether it is + * continuous physics memory + */ +static int ptable_virt_to_phy(uint64_t vaddr, struct addr_info *p_addr_info, uint64_t *left_convert) +{ + int ret = 0; + unsigned int level = 0; + pte_t *pte; + uint64_t local_page_mask = 0; + uint64_t local_page_size = 0; + uint64_t now_base = vaddr; + uint64_t last_phy_addr = 0; + uint64_t last_phy_len = 0; + uint64_t now_phy_addr = 0; + + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + local_page_size = page_level_size(level); + local_page_mask = page_level_mask(level); + + switch (level) { + case PG_LEVEL_4K: + p_addr_info->addr_start = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + p_addr_info->addr_start = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + p_addr_info->addr_start = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + if ((p_addr_info->addr_start & ~local_page_mask) == 0) { + /*|--------------page_size-------------------|*/ + /*|-------*left_convert-------|*/ + if (*left_convert < local_page_size) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|-----*/ + /*|---------------------*left_convert-----------------------|*/ + else { + p_addr_info->length = local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + } + } else { + /*|--------------page_size-------------------|------*/ + /* |-------*left_convert---------|*/ + if ((p_addr_info->addr_start + *left_convert) < + ((p_addr_info->addr_start & local_page_mask) + local_page_size)) { + p_addr_info->length = *left_convert; + *left_convert = 0; + } + /*|--------------page_size-------------------|........*/ + /* |-----------------*left_convert-----------------|*/ + else { + p_addr_info->length = (p_addr_info->addr_start & local_page_mask) + + local_page_size - p_addr_info->addr_start; + now_base += p_addr_info->length; + *left_convert -= p_addr_info->length; + } + } + + last_phy_len = p_addr_info->length; + last_phy_addr = p_addr_info->addr_start; + + while (*left_convert) { + pte = lookup_address(now_base, &level); + if (!pte) { + ret = -DYN_ERR_MEM; + pr_err("lookup_address failed!\n"); + goto end; + } + + switch (level) { + case PG_LEVEL_4K: + now_phy_addr = (uint64_t)((pte_val(*pte) & local_page_mask & ~_PAGE_NX) + + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_2M: + now_phy_addr = (uint64_t)((pmd_val(*(pmd_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + case PG_LEVEL_1G: + now_phy_addr = (uint64_t)((pud_val(*(pud_t *)pte) & local_page_mask & + ~_PAGE_NX) + (now_base & ~local_page_mask)); + break; + default: + pr_err("page table level is not supported!\n"); + return -DYN_ERR_MEM; + } + + /*not continuous memory*/ + if ((last_phy_addr + last_phy_len) != now_phy_addr) + break; + + if (*left_convert < local_page_size) { + p_addr_info->length += *left_convert; + *left_convert = 0; + } else { + p_addr_info->length += local_page_size; + now_base += local_page_size; + *left_convert -= local_page_size; + last_phy_addr = now_phy_addr; + last_phy_len = local_page_size; + } + } + +end: + return ret; +} + +int psp_check_tdm_support(void) +{ + int ret = 0; + struct tdm_version version; + + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + if (tdm_support) + goto end; + + ret = psp_get_fw_info(&version); + if (ret) { + tdm_support = 0; + goto end; + } + + tdm_support = 1; + } + +end: + return tdm_support; +} +EXPORT_SYMBOL_GPL(psp_check_tdm_support); + +int psp_get_fw_info(struct tdm_version *version) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_fw_cmd *fw_cmd = NULL; + struct tdm_fw_resp *fw_resp = NULL; + + if (!version) { + ret = -DYN_NULL_POINTER; + pr_err("version is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + fw_cmd = (struct tdm_fw_cmd *)tdm_cmdresp_data; + fw_cmd->cmd_type = TDM_FW_VERSION; + + ret = tdm_do_cmd(0, (void *)fw_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + + if (error) { + ret = -error; + pr_warn("get_fw_info exception: 0x%x\n", error); + goto free_cmdresp; + } + + fw_resp = (struct tdm_fw_resp *)tdm_cmdresp_data; + memcpy(version, &fw_resp->version, sizeof(struct tdm_version)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_get_fw_info); + +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_create_cmd *create_cmd = NULL; + struct tdm_create_resp *create_resp = NULL; + uint32_t addr_range_info_len = 0; + struct addr_range_info *paddr_range_info = NULL; + uint32_t info_index = 0; + uint64_t now_base_vaddr = 0; + uint64_t tf_left_size = 0; + uint32_t count = 0; + + if (!range) { + ret = -DYN_NULL_POINTER; + pr_err("range is null pointer\n"); + goto end; + } + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (range->count > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("range->count %d is beyond RANGE_CNT_MAX %d\n", range->count, RANGE_CNT_MAX); + goto end; + } + if (range->count == 0) { + ret = -DYN_ERR_SIZE_SMALL; + pr_err("range->count is zero!\n"); + goto end; + } + + /*create task by vaddr*/ + if (flag & TASK_CREATE_VADDR) { + paddr_range_info = kzalloc(sizeof(struct addr_range_info) + + RANGE_CNT_MAX * sizeof(struct addr_info), GFP_KERNEL); + if (!paddr_range_info) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for paddr_range_info failed\n"); + goto end; + } + + now_base_vaddr = range->addr[0].addr_start; + tf_left_size = range->addr[0].length; + while (tf_left_size && (count++ < RANGE_CNT_MAX + 1)) { + ret = ptable_virt_to_phy(now_base_vaddr, + &paddr_range_info->addr[info_index], &tf_left_size); + if (ret) { + pr_err("address convert failed!\n"); + goto free_paddr_range_info; + } + + now_base_vaddr = now_base_vaddr + + paddr_range_info->addr[info_index++].length; + if (info_index > RANGE_CNT_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("info_index: %d is beyond %d\n", info_index, RANGE_CNT_MAX); + goto free_paddr_range_info; + } + } + + paddr_range_info->count = info_index; + addr_range_info_len = paddr_range_info->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } else { + /*check if physics address valid*/ + ret = tdm_verify_phy_addr_valid(range); + if (ret) { + pr_err("range address is abnormal!\n"); + goto end; + } + addr_range_info_len = range->count * sizeof(struct addr_info) + + sizeof(struct addr_range_info); + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto free_paddr_range_info; + } + + create_cmd = (struct tdm_create_cmd *)tdm_cmdresp_data; + create_cmd->cmd_type = TDM_TASK_CREATE; + create_cmd->cmd_ctx_flag = flag; + + memcpy(&create_cmd->m_data, data, sizeof(struct measure_data)); + create_cmd->authcode_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + + ret = tdm_get_cmd_context_hash(flag, create_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + if (flag & TASK_CREATE_VADDR) + memcpy(&create_cmd->range_info, paddr_range_info, addr_range_info_len); + else + memcpy(&create_cmd->range_info, range, addr_range_info_len); + + ret = tdm_do_cmd(0, (void *)create_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("create_measure_task exception error: 0x%x\n", error); + goto free_cmdresp; + } + + create_resp = (struct tdm_create_resp *)tdm_cmdresp_data; + code->len = create_resp->authcode_len; + code->len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : code->len; + memcpy(&code->val[0], &create_resp->authcode_val[0], code->len); + + head = &dyn_head.head; + task_node = kzalloc(sizeof(struct tdm_task_ctx), GFP_KERNEL); + if (!task_node) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", sizeof(struct tdm_task_ctx)); + goto free_cmdresp; + } + + task_node->task_id = create_resp->task_id; + task_node->handler = NULL; + task_node->cmd_ctx_flag = flag; + + ret = list_enqueue(task_node); + if (ret) { + pr_err("task %d enqueue failed!!!\n", task_node->task_id); + goto free_task_node; + } + + kfree(tdm_cmdresp_data); + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); + + return task_node->task_id; + +free_task_node: + kfree(task_node); +free_cmdresp: + kfree(tdm_cmdresp_data); +free_paddr_range_info: + if (flag & TASK_CREATE_VADDR) + kfree(paddr_range_info); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_create_measure_task); + +int psp_query_measure_status(uint32_t task_id, struct measure_status *status) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_query_cmd *query_cmd = NULL; + struct tdm_query_resp *query_resp = NULL; + + if (!status) { + ret = -DYN_NULL_POINTER; + pr_err("status is null pointer\n"); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + query_cmd = (struct tdm_query_cmd *)tdm_cmdresp_data; + query_cmd->cmd_type = TDM_TASK_QUERY; + query_cmd->task_id = task_id; + + ret = tdm_do_cmd(0, query_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + query_resp = (struct tdm_query_resp *)tdm_cmdresp_data; + memcpy(status, &query_resp->m_status, sizeof(struct measure_status)); +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_query_measure_status); + +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_register_cmd *register_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + /* check if task_id is registered already */ + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) { + if ((handler && task_node->handler)) { + pr_err("task %d is registered already\n", task_id); + read_unlock(lock); + return -DYN_EEXIST; + } + break; + /* task_node will be used for next context */ + } + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + register_cmd = (struct tdm_register_cmd *)tdm_cmdresp_data; + temp_cmd = ®ister_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_VERIFY_AUTH; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, register_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + write_lock(lock); + task_node->handler = handler; + write_unlock(lock); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_register_measure_exception_handler); + +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_destroy_cmd *destroy_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to destroy!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + destroy_cmd = (struct tdm_destroy_cmd *)tdm_cmdresp_data; + temp_cmd = &destroy_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_DESTROY; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, destroy_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + if (task_node->handler) { + write_lock(lock); + task_node->handler = NULL; + write_unlock(lock); + } + + write_lock(lock); + list_del(&task_node->list); + write_unlock(lock); + + kfree(task_node); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_destroy_measure_task); + +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_update_cmd *update_cmd = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!data) { + ret = -DYN_NULL_POINTER; + pr_err("data is null pointer\n"); + goto end; + } + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + if (task_node->cmd_ctx_flag & TASK_ATTR_NO_UPDATE) { + pr_warn("Task %d is not allowed to update!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + update_cmd = (struct tdm_update_cmd *)tdm_cmdresp_data; + temp_cmd = &update_cmd->cmd; + temp_cmd->cmd_type = TDM_TASK_UPDATE; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + memcpy(&update_cmd->update_data, data, sizeof(struct measure_update_data)); + + ret = tdm_do_cmd(0, tdm_cmdresp_data, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_update_measure_task); + +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start) +{ + int ret = 0; + int error; + struct list_head *head = NULL; + struct tdm_task_ctx *task_node = NULL, *tmp_node = NULL; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_startstop_cmd *startstop_cmd = NULL; + struct tdm_startstop_resp *startstop_resp = NULL; + struct tdm_common_cmd *temp_cmd = NULL; + rwlock_t *lock = NULL; + + if (!code) { + ret = -DYN_NULL_POINTER; + pr_err("code is null pointer\n"); + goto end; + } + if (code->len > AUTHCODE_MAX) { + ret = -DYN_BEYOND_MAX; + pr_err("authcode len %d is beyond AUTHCODE_MAX %d\n", code->len, AUTHCODE_MAX); + goto end; + } + + if (!list_check_exist(task_id)) { + pr_err("task %d isn't created\n", task_id); + return -DYN_NOT_EXIST; + } + + head = &dyn_head.head; + lock = &dyn_head.lock; + + read_lock(lock); + list_for_each_entry_safe(task_node, tmp_node, head, list) { + if (task_node->task_id == task_id) + break; + } + read_unlock(lock); + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + startstop_cmd = (struct tdm_startstop_cmd *)tdm_cmdresp_data; + temp_cmd = &startstop_cmd->cmd; + temp_cmd->cmd_type = start ? TDM_TASK_START : TDM_TASK_STOP; + temp_cmd->task_id = task_id; + temp_cmd->code_len = code->len; + temp_cmd->code_len = code->len > AUTHCODE_MAX ? AUTHCODE_MAX : temp_cmd->code_len; + memcpy(temp_cmd->code_val, code->val, temp_cmd->code_len); + + if ((temp_cmd->cmd_type == TDM_TASK_STOP) && (task_node->cmd_ctx_flag & + TASK_ATTR_NO_UPDATE)) { + pr_warn("Task %d is not allowed to stop!\n", task_node->task_id); + ret = -DYN_NO_ALLOW_UPDATE; + goto free_cmdresp; + } + + ret = tdm_get_cmd_context_hash(task_node->cmd_ctx_flag, temp_cmd->context_hash); + if (ret) { + pr_err("tdm_get_cmd_context_hash failed\n"); + goto free_cmdresp; + } + + ret = tdm_do_cmd(0, startstop_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + startstop_resp = (struct tdm_startstop_resp *)tdm_cmdresp_data; + + kfree(tdm_cmdresp_data); + + return startstop_resp->m_status.status; + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(psp_startstop_measure_task); + +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_export_cert_cmd *cert_cmd = NULL; + struct tdm_export_cert_resp *cert_resp = NULL; + + if (!cert) { + ret = -DYN_NULL_POINTER; + pr_err("cert is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + cert_cmd = (struct tdm_export_cert_cmd *)tdm_cmdresp_data; + cert_cmd->cmd_type = TDM_EXPORT_CERT; + cert_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)cert_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + cert_resp = (struct tdm_export_cert_resp *)tdm_cmdresp_data; + memcpy(cert, &cert_resp->cert, sizeof(struct tdm_cert)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_export_cert); + +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_report *report_resp = NULL; + uint32_t needed_length = 0; + + if (!user_supplied_data) { + ret = -DYN_NULL_POINTER; + pr_err("user_supplied_data is null pointer\n"); + goto end; + } + if (!report_buffer) { + ret = -DYN_NULL_POINTER; + pr_err("report_buffer is null pointer\n"); + goto end; + } + if (!length) { + ret = -DYN_NULL_POINTER; + pr_err("length is null pointer\n"); + goto end; + } + if ((report_type != TDM_REPORT_SUMMARY) && (report_type != TDM_REPORT_DETAIL)) { + ret = -DYN_ERR_REPORT_TYPE; + pr_err("invalid report_type\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + report_cmd = (struct tdm_get_report_cmd *)tdm_cmdresp_data; + + report_cmd->cmd_type = TDM_GET_REPORT; + report_cmd->task_id = task_id; + if (task_id == TDM_TASK_ALL) { + if (!selection) { + ret = -DYN_NULL_POINTER; + pr_err("selection is null pointer\n"); + goto end; + } + report_cmd->selection_len = selection->len; + report_cmd->selection_len = (report_cmd->selection_len > TDM_MAX_TASK_BITMAP) ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + memcpy(&report_cmd->selection_bitmap[0], &selection->bitmap[0], + report_cmd->selection_len); + } + + report_cmd->user_data_len = (user_supplied_data->len > TDM_MAX_NONCE_SIZE) ? + TDM_MAX_NONCE_SIZE : user_supplied_data->len; + memcpy(&report_cmd->user_data_val[0], &user_supplied_data->val[0], + report_cmd->user_data_len); + report_cmd->report_type = report_type; + report_cmd->key_usage_id = key_usage_id; + + ret = tdm_do_cmd(0, (void *)report_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + report_resp = (struct tdm_report *)tdm_cmdresp_data; + if (report_type == TDM_REPORT_SUMMARY) + needed_length = sizeof(struct tdm_report) + sizeof(struct tdm_report_sig); + else + needed_length = sizeof(struct tdm_report) + + report_resp->task_nums * sizeof(struct tdm_detail_task_status) + + sizeof(struct tdm_report_sig); + + if (needed_length > *length) { + pr_warn("needed_length %d is beyond length %d\n", needed_length, *length); + *length = needed_length; + ret = -DYN_ERR_SIZE_SMALL; + } else { + memcpy(report_buffer, report_resp, needed_length); + } + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_report); + +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values) +{ + int ret = 0; + int error; + unsigned char *tdm_cmdresp_data = NULL; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + + if (!digest) { + ret = -DYN_NULL_POINTER; + pr_err("digest is null pointer\n"); + goto end; + } + if (!pcr_values) { + ret = -DYN_NULL_POINTER; + pr_err("pcr_values is null pointer\n"); + goto end; + } + + tdm_cmdresp_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!tdm_cmdresp_data) { + ret = -DYN_ERR_MEM; + pr_err("kzalloc for size %ld failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)tdm_cmdresp_data; + + vpcr_cmd->cmd_type = TDM_VPCR_AUDIT; + memcpy(&vpcr_cmd->pcr, &pcr, sizeof(struct pcr_select)); + + ret = tdm_do_cmd(0, (void *)vpcr_cmd, &error); + if (ret && ret != -EIO) { + pr_err("tdm_do_cmd failed cmd id: 0x%x, error: 0x%x\n", TDM2PSP_CMD(0), error); + goto free_cmdresp; + } + if (error) { + ret = -error; + pr_err("%s exception error: 0x%x\n", __func__, error); + goto free_cmdresp; + } + + vpcr_resp = (struct tdm_get_vpcr_resp *)tdm_cmdresp_data; + memcpy(digest, &vpcr_resp->digest, sizeof(struct tpm2b_digest)); + pcr_values->task_nums = vpcr_resp->pcr_values.task_nums; + memcpy(&pcr_values->task_data[0], &vpcr_resp->pcr_values.task_data[0], + pcr_values->task_nums * sizeof(struct tdm_task_data)); + +free_cmdresp: + kfree(tdm_cmdresp_data); +end: + return ret; +} +EXPORT_SYMBOL_GPL(tdm_get_vpcr_audit); + +static long tdm_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) +{ + int ret = 0; + void __user *argp = (void __user *)arg; + unsigned int tdm_cmd = 0; + unsigned char *temp_cmd_data = NULL; + struct task_selection_2b *selection = NULL; + struct data_2b *data = NULL; + uint32_t data_to_user_len = 0; + uint16_t selection_len = 0; + uint16_t user_data_len = 0; + struct tdm_get_report_cmd *report_cmd = NULL; + struct tdm_user_report_cmd *user_report_cmd = NULL; + uint32_t needed_length = 0; + struct tdm_get_vpcr_cmd *vpcr_cmd = NULL; + struct tdm_get_vpcr_resp *vpcr_resp = NULL; + uint32_t pcr_num = 0; + + if (_IOC_TYPE(ioctl) != TDM_IOC_TYPE) { + ret = -EINVAL; + pr_err("ioctl 0x%08x is invalid\n", ioctl); + goto end; + } + + temp_cmd_data = kzalloc(TDM_C2P_CMD_SIZE, GFP_KERNEL); + if (!temp_cmd_data) { + ret = -ENOMEM; + pr_err("kzalloc for size 0x%lx failed\n", TDM_C2P_CMD_SIZE); + goto end; + } + + tdm_cmd = _IOC_NR(ioctl); + + switch (tdm_cmd) { + case USER_EXPORT_CERT: + ret = tdm_export_cert(TDM_AK_USAGE_ID, (struct tdm_cert *)temp_cmd_data); + if (ret) { + pr_err("Execute tdm export cert command failed!\n"); + goto free_mem; + } + data_to_user_len = sizeof(struct tdm_cert); + break; + + case USER_GET_REPORT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_user_report_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + user_report_cmd = (struct tdm_user_report_cmd *)temp_cmd_data; + needed_length = user_report_cmd->needed_length; + report_cmd = &user_report_cmd->report_cmd; + selection_len = report_cmd->selection_len > TDM_MAX_TASK_BITMAP ? + TDM_MAX_TASK_BITMAP : report_cmd->selection_len; + + selection = kzalloc(sizeof(struct task_selection_2b) + + selection_len * sizeof(uint8_t), GFP_KERNEL); + if (!selection) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + selection->len = selection_len; + memcpy(&selection->bitmap[0], &report_cmd->selection_bitmap[0], selection->len); + + user_data_len = report_cmd->user_data_len > TDM_MAX_NONCE_SIZE ? + TDM_MAX_NONCE_SIZE : report_cmd->user_data_len; + data = kzalloc(sizeof(struct data_2b) + + user_data_len * sizeof(uint8_t), GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + pr_err("kzalloc failed\n"); + goto free_mem; + } + + data->len = user_data_len; + memcpy(&data->val[0], &report_cmd->user_data_val[0], data->len); + + ret = tdm_get_report(report_cmd->task_id, selection, data, report_cmd->report_type, + report_cmd->key_usage_id, temp_cmd_data, &needed_length); + if (ret) { + pr_err("Execute tdm report command failed!\n"); + goto free_mem; + } + + data_to_user_len = needed_length; + break; + + case USER_VPCR_AUDIT: + if (copy_from_user(temp_cmd_data, argp, sizeof(struct tdm_get_vpcr_cmd))) { + pr_err("%s copy from user failed\n", __func__); + ret = -EFAULT; + goto end; + } + + vpcr_cmd = (struct tdm_get_vpcr_cmd *)temp_cmd_data; + vpcr_resp = (struct tdm_get_vpcr_resp *)temp_cmd_data; + pcr_num = vpcr_cmd->pcr.pcr; + + ret = tdm_get_vpcr_audit(vpcr_cmd->pcr, &vpcr_resp->digest, &vpcr_resp->pcr_values); + if (ret) { + pr_err("Execute tdm vpcr audit command failed!\n"); + goto free_mem; + } + + vpcr_resp->pcr = pcr_num; + data_to_user_len = sizeof(struct tdm_get_vpcr_resp) + + vpcr_resp->pcr_values.task_nums * sizeof(struct tdm_task_data); + break; + + case USER_SHOW_DEVICE: + ret = psp_get_fw_info(&((struct tdm_show_device *)temp_cmd_data)->version); + if (ret) { + pr_err("firmware version get failed!\n"); + goto free_mem; + } + + data_to_user_len = sizeof(struct tdm_show_device); + break; + + default: + pr_err("invalid tdm_cmd: %d from user\n", tdm_cmd); + ret = -EINVAL; + goto free_mem; + } + + if (copy_to_user(argp, temp_cmd_data, data_to_user_len)) { + pr_err("%s copy to user failed\n", __func__); + ret = -EFAULT; + goto free_mem; + } + +free_mem: + kfree(temp_cmd_data); + kfree(selection); + kfree(data); +end: + return ret; +} + +static const struct file_operations tdm_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = tdm_ioctl, +}; + +static struct miscdevice misc = { + .minor = MISC_DYNAMIC_MINOR, + .name = "tdm", + .fops = &tdm_fops, +}; + +int tdm_dev_init(void) +{ + int ret = 0; + + if (tdm_init_flag) + return 0; + + INIT_KFIFO(kfifo_error_task); + INIT_LIST_HEAD(&dyn_head.head); + rwlock_init(&dyn_head.lock); + spin_lock_init(&kfifo_lock); + + ret = psp_register_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + if (ret) { + pr_err("notifier function registration failed\n"); + return ret; + } + + kthread = kthread_create(measure_exception_handling_thread, NULL, + "measure_exception_handling_thread"); + if (IS_ERR(kthread)) { + pr_err("kthread_create fail\n"); + ret = PTR_ERR(kthread); + goto unreg; + } + + wake_up_process(kthread); + + ret = misc_register(&misc); + if (ret) { + pr_err("misc_register for tdm failed\n"); + goto stop_kthread; + } + + tdm_init_flag = 1; + pr_info("TDM driver loaded successfully!\n"); + + return ret; + +stop_kthread: + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } +unreg: + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + return ret; +} + +int tdm_dev_destroy(void) +{ + if (tdm_destroy_flag) + goto end; + + if (kthread) { + kthread_stop(kthread); + kthread = NULL; + } + + psp_unregister_cmd_notifier(p2c_cmd_id, tdm_interrupt_handler); + + misc_deregister(&misc); + tdm_destroy_flag = 1; +end: + return 0; +} + diff --git a/drivers/crypto/ccp/tdm-dev.h b/drivers/crypto/ccp/tdm-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..afc4761a7e81778bb8e0bb9c4984504a540766bc --- /dev/null +++ b/drivers/crypto/ccp/tdm-dev.h @@ -0,0 +1,504 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * The Hygon TDM CPU-to-PSP communication driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* Change log: + * Version: 0.7 (fw version 1.4) + * 1.Adjust the TDM driver to accommodate multiple versions of the kernel. + * Version: 0.6 (fw version 1.4) + * 1.remove psp_get_fw_info from hygon_tdm_init, add tdm show device support to ioctl for hag. + * Version: 0.5 (fw version 1.4) + * 1.add support for hanging machine when task exception with special attribute. + * Version: 0.4 (fw version 1.3) + * 1.add vpcr support. + * 2.add task create by vaddr. + * Version: 0.3 (fw version 1.2) + * 1.add remote authentication support. + */ +#ifndef __TDM_DEV_H__ +#define __TDM_DEV_H__ + +#include +#include + +#define MIN_VPCR 10 +#define MAX_VPCR 16 + +/*Macro definition for measurement*/ +#define TDM_MAX_TASK_BITMAP 16 +#define TDM_MAX_NONCE_SIZE 32 + +#define RANGE_CNT_MAX 0x80 +#define MEASURE_TASK_MAX 100 +#define AUTHCODE_MAX 16 +#define AUTH_TRY_DELAY 1 + +#define HASH_ALGO_SM3 0 +#define HASH_ALGO_SHA1 1 +#define HASH_ALGO_SHA256 2 +#define HASH_ALGO_SHA384 3 +#define HASH_ALGO_SHA512 4 + +#define SM3_256_DIGEST_SIZE 32 +#define SHA1_DIGEST_SIZE 20 +#define SHA256_DIGEST_SIZE 32 +#define SHA384_DIGEST_SIZE 48 +#define SHA512_DIGEST_SIZE 64 + +#define CONTEXT_CHECK_PID 0x1 +#define CONTEXT_CHECK_COMM 0x2 +#define CONTEXT_CHECK_MODNAME 0x4 +#define TASK_ATTR_NO_UPDATE 0x10000 +#define TASK_SUPPORT_VPCR 0x20000 +#define TASK_CREATE_VADDR 0x40000 +#define TASK_EXCEPTION_CRASH 0x80000 + +#define MEASURE_UPDATE_ALGO 0x1 +#define MEASURE_UPDATE_EXPECTED_MEASUREMENT 0x2 + +/*Macro definition for tdm certificate*/ +#define TDM_MAX_CHIP_ID_LEN 40 +#define TDM_CURVE_SM2_ID 0x3 +#define TDM_PUBKEY_LEN 32 +#define TDM_MAX_USER_ID_LEN 126 +#define TDM_SIG_LEN 32 +#define TDM_HEADER_AND_PUBKEY_LEN 284 + +/*Macro definition for tdm report*/ +#define TDM_TASK_ALL 0xffffffff +#define TDM_REPORT_SUMMARY 0 +#define TDM_REPORT_DETAIL 1 + +/* CPU to psp command declaration */ +enum C2P_CMD_TYPE { + TDM_TASK_CREATE = 0x0, + TDM_TASK_VERIFY_AUTH, + TDM_TASK_QUERY, + TDM_TASK_DESTROY, + TDM_TASK_UPDATE, + TDM_TASK_STOP, + TDM_TASK_START, + TDM_FW_VERSION, + TDM_EXPORT_CERT, + TDM_GET_REPORT, + TDM_VPCR_AUDIT, + TDM_MAX_CMD +}; + +/* User interaction command declaration */ +enum USER_CMD_TYPE { + USER_EXPORT_CERT = 0x80, + USER_GET_REPORT, + USER_VPCR_AUDIT, + USER_SHOW_DEVICE, + USER_MAX_CMD +}; + +/*Public usage id definition for tdm certificate*/ +enum _tdm_key_usage_id { + TDM_INVALID_USAGE_ID = 0x1000, + TDM_CEK_USAGE_ID = 0x1004, + TDM_AK_USAGE_ID = 0x2001, + TDM_MAX_USAGE_ID +}; + +/*Public status ans type declaration*/ +enum TDM_TASK_STATUS { + DYN_INIT = 0x0, + DYN_TO_RUN, + DYN_RUN, + DYN_TO_STOP, + DYN_STOP +}; + +enum TDM_MEASURE_STATUS { + MER_NORMAL = 0x0, + MER_ERR +}; + +enum DYN_ERROR_TYPE { + DYN_NORMAL = 0x0, + DYN_NOT_EXIST, + DYN_AUTH_FAIL, + DYN_STATUS_NOT_SUIT, + DYN_BEYOND_MAX, + DYN_DA_PERIOD, + DYN_NULL_POINTER, + DYN_ERR_API, + DYN_EEXIST, + DYN_ERR_MEM, + DYN_ERR_AUTH_LEN, + DYN_ERR_KEY_ID, + DYN_NO_ALLOW_UPDATE, + DYN_ERR_HASH_ALGO, + DYN_ERR_REPORT_TYPE, + DYN_ERR_SIZE_SMALL, + DYN_ERR_ADDR_MAPPING, + DYN_ERR_PCR_NUM, + DYN_ERR_ORIG_TPM_PCR, + DYN_MAX_ERR_TYPE +}; + +/*Data structure declaration for measurement*/ +struct addr_info { + uint64_t addr_start; + uint64_t length; +} __packed; + +struct addr_range_info { + uint32_t count; + struct addr_info addr[]; +} __packed; + +struct measure_data { + uint32_t hash_algo; + uint8_t expected_measurement[32]; + uint32_t period_ms; + uint32_t pcr; +} __packed; + +struct authcode_2b { + uint16_t len; + uint8_t val[]; +} __packed; + +struct measure_status { + uint8_t status; + uint8_t error; + uint64_t count; +} __packed; + +struct measure_update_data { + uint32_t update_flag; + uint32_t algo; + uint8_t expected_measurement[32]; +} __packed; + +struct da_status { + uint64_t err_time; + uint16_t interval_time; + uint16_t err_cnt; +} __packed; + +struct tdm_version { + uint8_t api_major; + uint8_t api_minor; + uint32_t buildId; + uint32_t task_max; + uint32_t range_max_per_task; +} __packed; + +struct task_selection_2b { + uint16_t len; + uint8_t bitmap[]; +}; + +struct data_2b { + uint16_t len; + uint8_t val[]; +}; + +/*Data structure declaration for vpcr*/ +struct pcr_select { + uint16_t hash; + uint32_t pcr; +} __packed; + +union tpmu_ha { + uint8_t sha1[SHA1_DIGEST_SIZE]; + uint8_t sha256[SHA256_DIGEST_SIZE]; + uint8_t sha384[SHA384_DIGEST_SIZE]; + uint8_t sha512[SHA512_DIGEST_SIZE]; + uint8_t sm3_256[SM3_256_DIGEST_SIZE]; +}; + +struct tpm2b_digest { + uint16_t size; + uint8_t buffer[sizeof(union tpmu_ha)]; +} __packed; + +struct tdm_task_data { + uint32_t task_id; + uint8_t hash[32]; +} __packed; + +struct tdm_pcr_value_2b { + uint32_t task_nums; + struct tdm_task_data task_data[]; +} __packed; + +/*Data structure declaration for tdm certificate*/ +struct _tdm_ecc_pubkey { + uint32_t curve_id; + uint8_t pubkey_qx[TDM_PUBKEY_LEN]; + uint8_t pubkey_qy[TDM_PUBKEY_LEN]; + uint16_t user_id_len; + uint8_t user_id[TDM_MAX_USER_ID_LEN]; +} __packed; + +struct _tdm_ecc_signature { + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/* + ************************ Hygon TDM Certificate - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |7:0 |- |Reserved. Set to zero | + *|06h |7:0 |CHIP_ID_LEN | | + *|08h |319:0 |CHIP_ID |Unique ID of every chip. | + *|30h |31:0 |KEY_USAGE_ID |Usage id of the key. | + *|34h |63:0 |- |Reserved. Set to zero. | + *|3Ch |31:0 |CURVE_ID |ECC curve id | + *|40h |255:0 |Qx |Public key Qx | + *|60h |255:0 |Qy |Public key Qy | + *|80h |7:0 |USER_ID_LEN |GM user id len | + *|82h |1007:0 |USER_ID |GM user id | + *|100h|223:0 |- |Reserved. Set to zero. | + *|11Ch|31:0 |SIG1_KEY_USAGE_ID|Key type for sig1. | + *|120h|255:0 |SIG1_R |Signature R of key1. | + *|140h|255:0 |SIG1_S |Signature S of key1. | + *|160h|223:0 |- |Reserved. Set to zero | + *|17Ch|31:0 |SIG2_KEY_USAGE_ID|Key type for sig2. | + *|180h|255:0 |SIG2_R |Signature R of key2. | + *|1A0h|255:0 |SIG2_S |Signature S of key2. | + ************************************************************************************* + */ +struct tdm_cert { + uint32_t version; + uint8_t reserved_0[2]; + uint16_t chip_id_len; + uint8_t chip_id[TDM_MAX_CHIP_ID_LEN]; + uint32_t key_usage_id; + uint8_t reserved_1[8]; + struct _tdm_ecc_pubkey ecc_pubkey; + uint8_t reserved_2[28]; + uint32_t sig1_key_usage_id; + struct _tdm_ecc_signature ecc_sig1; + uint8_t reserved_3[28]; + uint32_t sig2_key_usage_id; + struct _tdm_ecc_signature ecc_sig2; +} __packed; + +/*Data structure declaration for tdm measurement report*/ +/* + ******************** Hygon TDM Report for Single Task - ECC256*********************** + *|+(00h) |31:0 |TASK_ID |Measured task ID | + *|+(04h) |31:0 |PERIOD_MS |Meaured period time for the related task | + *|+(08h) |63:0 |MEAURED_COUNT |Meaured count for the related task | + *|+(10h) |31:0 |LAST_MEASURE_ELAPSED_MS|Meaured time for last mesurement. | + *|+(14h) |95:0 |- |Reserved. Set to zero | + *|+(20h) |255:0 |MEASURED_HASH |Mesured hash for the related task. | + ************************************************************************************* + */ +struct tdm_detail_task_status { + uint32_t task_id; + uint32_t period_ms; + uint64_t measured_count; + uint32_t last_measure_elapsed_ms; + uint8_t reserved[12]; + uint8_t measured_hash[32]; +} __packed; + +/* + ************************ Hygon TDM Report - ECC256*************************** + *|00h |31:0 |VERSION |Certificate version. 0... | + *|04h |31:0 |FW_VERSION |Firmware verfion,BUILD_ID | + *|08h |7:0 |REPORT_TYPE |Summary report:0, Detailed report:1 | + *|09h |39:0 |- |Reserved. Set to zero. | + *|0Eh |15:0 |TASK_NUMS |ALL task numbers. | + *|10h |127:0 |TASK_BITMAP |ALL task bitmap. | + *|20h |127:0 |TASK_ERROR_BITMAP |Bitmap for error tasks | + *|30h |127:0 |TASK_RUNNING_BITMAP|Bitmap for runnint tasks | + *|40h |239:0 |- |Reserved. Set to zero. | + *|5Eh |15:0 |USER_DATA_LEN |User supplied data length. | + *|60h |255:0 |USER_DATA |User supplied data. | + *|80h |255:0 |AGGREGATE_HASH |Aggregate hash for tasks | + ************************************************************************************* + */ +struct tdm_report { + uint32_t version; + uint32_t fw_version; + uint8_t report_type; + uint8_t reserved_0[5]; + uint16_t task_nums; + uint8_t task_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_error_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t task_running_bitmap[TDM_MAX_TASK_BITMAP]; + uint8_t reserved_1[30]; + uint16_t user_supplied_data_len; + uint8_t user_supplied_data[TDM_MAX_NONCE_SIZE]; + uint8_t aggregate_hash[32]; + struct tdm_detail_task_status detailed_task_status[]; +} __packed; + +/* + ************************ Hygon TDM Report Signature - ECC256************************* + *|A0h |223:0 |- |Reserved. Set to zero | + *|BCh |31:0 |SIG_KEY_USAGE_ID |Key type for sig. | + *|C0h |255:0 |SIG_R |Signature R of key. | + *|E0h |255:0 |SIG_S |Signature S of key. | + ************************************************************************************* + */ +struct tdm_report_sig { + uint8_t reserved[28]; + uint32_t sig_key_usage_id; + uint8_t sig_r[TDM_SIG_LEN]; + uint8_t sig_s[TDM_SIG_LEN]; +} __packed; + +/*Data structure declaration for tdm command/response interface*/ +/* + * The following commands use this structure: + * psp_register_measure_exception_handler + * psp_destroy_measure_task + * psp_update_measure_task + * psp_startstop_measure_task + */ +struct tdm_common_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t code_len; + uint8_t code_val[AUTHCODE_MAX]; + uint8_t context_hash[32]; +} __packed; + +/*TASK_CREATE*/ +struct tdm_create_cmd { + uint32_t cmd_type; + uint32_t cmd_ctx_flag; + struct measure_data m_data; + uint16_t authcode_len; + uint8_t context_hash[32]; + struct addr_range_info range_info; +} __packed; + +struct tdm_create_resp { + uint32_t task_id; + uint16_t authcode_len; + uint8_t authcode_val[AUTHCODE_MAX]; +} __packed; + +/*TASK_VERIFY_AUTH*/ +struct tdm_register_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_QUERY*/ +struct tdm_query_cmd { + uint32_t cmd_type; + uint32_t task_id; +} __packed; + +struct tdm_query_resp { + struct measure_status m_status; +} __packed; + +/*TASK_DESTROY*/ +struct tdm_destroy_cmd { + struct tdm_common_cmd cmd; +} __packed; + +/*TASK_UPDATE*/ +struct tdm_update_cmd { + struct tdm_common_cmd cmd; + struct measure_update_data update_data; +} __packed; + +/*TASK_STOP,TASK_START*/ +struct tdm_startstop_cmd { + struct tdm_common_cmd cmd; +} __packed; + +struct tdm_startstop_resp { + struct measure_status m_status; +} __packed; + +/*TDM_VERSION*/ +struct tdm_fw_cmd { + uint32_t cmd_type; +} __packed; + +struct tdm_fw_resp { + struct tdm_version version; +} __packed; + +/*TDM_EXPORT_CERT*/ +struct tdm_export_cert_cmd { + uint32_t cmd_type; + uint32_t key_usage_id; +} __packed; + +struct tdm_export_cert_resp { + struct tdm_cert cert; +} __packed; + +/*TDM_GET_REPORT*/ +struct tdm_get_report_cmd { + uint32_t cmd_type; + uint32_t task_id; + uint16_t selection_len; + uint8_t selection_bitmap[TDM_MAX_TASK_BITMAP]; + uint16_t user_data_len; + uint8_t user_data_val[TDM_MAX_NONCE_SIZE]; + uint8_t report_type; + uint32_t key_usage_id; +} __packed; + +/* Resopnse: + * struct tdm_report measure_report; + * struct tdm_report_sig measure_report_sig; + */ + +struct tdm_user_report_cmd { + struct tdm_get_report_cmd report_cmd; + uint32_t needed_length; +} __packed; + +/*TDM_VPCR_AUDIT*/ +struct tdm_get_vpcr_cmd { + uint32_t cmd_type; + struct pcr_select pcr; +} __packed; + +struct tdm_get_vpcr_resp { + uint32_t pcr; + struct tpm2b_digest digest; + struct tdm_pcr_value_2b pcr_values; +} __packed; + +struct tdm_show_device { + struct tdm_version version; +} __packed; + +/*Public api definition for tdm*/ +typedef int (*measure_exception_handler_t)(uint32_t task_id); + +int psp_check_tdm_support(void); +int psp_get_fw_info(struct tdm_version *version); +int psp_create_measure_task(struct addr_range_info *range, struct measure_data *data, + uint32_t flag, struct authcode_2b *code); +int psp_query_measure_status(uint32_t task_id, struct measure_status *status); +int psp_register_measure_exception_handler(uint32_t task_id, struct authcode_2b *code, + measure_exception_handler_t handler); +int psp_destroy_measure_task(uint32_t task_id, struct authcode_2b *code); +int psp_update_measure_task(uint32_t task_id, struct authcode_2b *code, + struct measure_update_data *data); +int psp_startstop_measure_task(uint32_t task_id, struct authcode_2b *code, bool start); +int tdm_export_cert(uint32_t key_usage_id, struct tdm_cert *cert); +int tdm_get_report(uint32_t task_id, struct task_selection_2b *selection, + struct data_2b *user_supplied_data, uint8_t report_type, uint32_t key_usage_id, + uint8_t *report_buffer, uint32_t *length); +int tdm_get_vpcr_audit(struct pcr_select pcr, struct tpm2b_digest *digest, + struct tdm_pcr_value_2b *pcr_values); + +int tdm_dev_init(void); +int tdm_dev_destroy(void); +#endif /* __TDM_DEV_H__*/ diff --git a/drivers/crypto/ccp/tdm-kernel-guard.c b/drivers/crypto/ccp/tdm-kernel-guard.c new file mode 100644 index 0000000000000000000000000000000000000000..c3afe888ea04d77ed2eb53d67364423915e9b300 --- /dev/null +++ b/drivers/crypto/ccp/tdm-kernel-guard.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The Hygon TDM KERNEL GUARD module driver + * + * Copyright (C) 2022 Hygon Info Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include +#include "tdm-dev.h" + +#ifdef pr_fmt +#undef pr_fmt +#endif +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +static int eh_obj = -1; +module_param(eh_obj, int, 0644); +MODULE_PARM_DESC(eh_obj, "security enhance object for TDM"); + +/* Objects are protected by TDM now + * SCT: 0 + * IDT: 1 + */ +enum ENHANCE_OBJS { + SCT = 0, + IDT, + MAX_OBJ +}; + +static char *obj_names[MAX_OBJ] = { + "SCT", + "IDT", +}; + +struct tdm_security_enhance { + uint64_t vaddr; + uint32_t size; + struct addr_range_info *mem_range; + struct authcode_2b *authcode; + struct measure_data mdata; + uint32_t context; + uint32_t task_id; + char *obj_name; +} __packed; + +static struct tdm_security_enhance eh_objs[MAX_OBJ]; + +static int tdm_regi_callback_handler(uint32_t task_id) +{ + int i = 0; + int ret = 0; + + for (i = 0; i < MAX_OBJ; i++) { + if (task_id == eh_objs[i].task_id) { + pr_warn("Obj: %s, Task:%d, corruption detected!\n", eh_objs[i].obj_name, + task_id); + pr_warn("Please check if it's intended, or your machine may be on danger!\n"); + break; + } + } + return ret; +} + +static int calc_expected_hash(uint8_t *base_addr, uint32_t size, uint8_t *hash) +{ + int ret = 0; + struct crypto_shash *shash = NULL; + + shash = crypto_alloc_shash("sm3", 0, 0); + if (IS_ERR(shash)) { + ret = PTR_ERR(shash); + return ret; + } + + { + SHASH_DESC_ON_STACK(sdesc, shash); + + sdesc->tfm = shash; + ret = crypto_shash_init(sdesc); + if (ret) { + pr_err("crypto_shash_init failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_update(sdesc, base_addr, size); + if (ret) { + pr_err("crypto_shash_update failed\n"); + ret = -1; + goto out; + } + + ret = crypto_shash_final(sdesc, hash); + if (ret) { + pr_err("crypto_shash_final failed\n"); + ret = -1; + goto out; + } + } + +out: + crypto_free_shash(shash); + return ret; +} + +static int tdm_task_create_and_run(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + data->task_id = psp_create_measure_task(data->mem_range, &data->mdata, data->context, + data->authcode); + if (data->task_id < 0) { + ret = data->task_id < 0; + pr_err("create measurement task failed with 0x%x!\n", data->task_id); + goto end; + } + + ret = psp_register_measure_exception_handler(data->task_id, data->authcode, + tdm_regi_callback_handler); + if (ret < 0) { + pr_err("task_id %d callback function register failed with 0x%x\n", data->task_id, + ret); + goto release_task; + } + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, true); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d start failed with 0x%x\n", data->task_id, ret); + goto release_task; + } + + return ret; + +release_task: + psp_destroy_measure_task(data->task_id, data->authcode); +end: + return ret; +} + +int tdm_service_run(struct tdm_security_enhance *data) +{ + int ret = 0; + struct addr_range_info *addr_range = NULL; + + // Allocate memory for addr_range + addr_range = kzalloc(sizeof(struct addr_range_info) + sizeof(struct addr_info), GFP_KERNEL); + if (!addr_range) { + ret = -DYN_ERR_MEM; + pr_err("addr_range kzalloc memory failed\n"); + goto end; + } + + // Fill in addr_range + addr_range->count = 1; + addr_range->addr[0].addr_start = data->vaddr; + addr_range->addr[0].length = data->size; + data->mem_range = addr_range; + + // Context configuration + data->context |= TASK_CREATE_VADDR; + + // Allocate memory for authcode + data->authcode = kzalloc(sizeof(struct authcode_2b) + AUTHCODE_MAX, GFP_KERNEL); + if (!data->authcode) { + ret = -DYN_ERR_MEM; + pr_err("authcode_2b kzalloc memory failed\n"); + goto free_addr_range_info; + } + + data->authcode->len = AUTHCODE_MAX; + + // Measurement data configuration + data->mdata.hash_algo = HASH_ALGO_SM3; + data->mdata.period_ms = 0; + ret = calc_expected_hash((uint8_t *)data->vaddr, data->size, + data->mdata.expected_measurement); + if (ret) { + pr_err("calculate expected hash failed!\n"); + goto free_authcode; + } + + // Create and start tdm task + ret = tdm_task_create_and_run(data); + if (ret) { + pr_err("tdm_task_create_and_run failed!\n"); + goto free_authcode; + } + + return ret; + +free_authcode: + kfree(data->authcode); + data->authcode = NULL; +free_addr_range_info: + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +int tdm_service_exit(struct tdm_security_enhance *data) +{ + int ret = 0; + int task_status = 0; + + task_status = psp_startstop_measure_task(data->task_id, data->authcode, false); + if (task_status < 0) { + ret = task_status; + pr_err("task_id %d stop failed with 0x%x\n", data->task_id, ret); + goto end; + } + + // Waiting for the task to end + msleep(40); + + psp_destroy_measure_task(data->task_id, data->authcode); + + kfree(data->authcode); + data->authcode = NULL; + kfree(data->mem_range); + data->mem_range = NULL; +end: + return ret; +} + +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) +static int p_tmp_kprobe_handler(struct kprobe *p_ri, struct pt_regs *p_regs) +{ + return 0; +} + +unsigned long kprobe_symbol_address_byname(const char *name) +{ + int p_ret; + struct kprobe p_kprobe; + unsigned long addr = 0; + + memset(&p_kprobe, 0, sizeof(p_kprobe)); + + p_kprobe.pre_handler = p_tmp_kprobe_handler; + p_kprobe.symbol_name = name; + + p_ret = register_kprobe(&p_kprobe); + if (p_ret < 0) { + pr_err("register_kprobe error [%d] :(\n", p_ret); + return 0; + } + + addr = (unsigned long)p_kprobe.addr; + unregister_kprobe(&p_kprobe); + + return addr; +} +#endif + +static int __init kernel_security_enhance_init(void) +{ + int i = 0; + int ret = 0; + unsigned long *sct_addr; + struct desc_ptr idtr; +#if !IS_BUILTIN(CONFIG_TDM_KERNEL_GUARD) + unsigned long (*f_kallsyms_lookup_name)(const char *); + + f_kallsyms_lookup_name = (unsigned long (*)(const char *))kprobe_symbol_address_byname( + "kallsyms_lookup_name"); + if (!f_kallsyms_lookup_name) { + ret = -DYN_ERR_API; + pr_err("kprobe_symbol_address_byname failed!"); + goto end; + } + + sct_addr = (unsigned long *)f_kallsyms_lookup_name("sys_call_table"); +#else + + sct_addr = (unsigned long *)kallsyms_lookup_name("sys_call_table"); +#endif + if (!sct_addr) { + ret = -DYN_ERR_API; + pr_err("kallsyms_lookup_name for sys_call_table failed!"); + goto end; + } + + asm("sidt %0":"=m"(idtr)); + + if (!psp_check_tdm_support()) + return 0; + + for (i = 0; i < MAX_OBJ; i++) { + memset(&eh_objs[i], 0, sizeof(eh_objs[i])); + eh_objs[i].context = CONTEXT_CHECK_MODNAME; + eh_objs[i].obj_name = obj_names[i]; + } + + if ((eh_obj == -1) || (eh_obj & (1 << SCT))) { + eh_objs[SCT].vaddr = (uint64_t)sct_addr; + eh_objs[SCT].size = NR_syscalls * sizeof(char *); + } + if ((eh_obj == -1) || (eh_obj & (1 << IDT))) { + eh_objs[IDT].vaddr = idtr.address; + eh_objs[IDT].size = idtr.size; + } + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_run(&eh_objs[i]); + } + + pr_info("Hygon TDM guard load successfully!\n"); + +end: + return ret; +} + +static void __exit kernel_security_enhance_exit(void) +{ + int i = 0; + + if (!psp_check_tdm_support()) + return; + + for (i = 0; i < MAX_OBJ; i++) { + if (eh_objs[i].vaddr) + tdm_service_exit(&eh_objs[i]); + } + pr_info("Hygon TDM guard unload successfully!\n"); +} + +MODULE_AUTHOR("niuyongwen@hygon.cn"); +MODULE_LICENSE("GPL"); +MODULE_VERSION("0.1"); +MODULE_DESCRIPTION("Kernel security enhancement module by TDM"); + +/* + * kernel_security_enhance_init must be done after ccp module init. + * That's why we use a device_initcall_sync which is + * called after all the device_initcall(includes ccp) but before the + * late_initcall(includes ima). + */ +device_initcall_sync(kernel_security_enhance_init); +module_exit(kernel_security_enhance_exit); diff --git a/drivers/crypto/intel/qat/Kconfig b/drivers/crypto/intel/qat/Kconfig index 1220cc86f9100af58dca9d639080c3de4b9f84e8..02fb8abe4e6ed3eae100f9be715ea276452a0653 100644 --- a/drivers/crypto/intel/qat/Kconfig +++ b/drivers/crypto/intel/qat/Kconfig @@ -59,6 +59,17 @@ config CRYPTO_DEV_QAT_4XXX To compile this as a module, choose M here: the module will be called qat_4xxx. +config CRYPTO_DEV_QAT_420XX + tristate "Support for Intel(R) QAT_420XX" + depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) + select CRYPTO_DEV_QAT + help + Support for Intel(R) QuickAssist Technology QAT_420xx + for accelerating crypto and compression workloads. + + To compile this as a module, choose M here: the module + will be called qat_420xx. + config CRYPTO_DEV_QAT_DH895xCCVF tristate "Support for Intel(R) DH895xCC Virtual Function" depends on PCI && (!CPU_BIG_ENDIAN || COMPILE_TEST) @@ -95,3 +106,17 @@ config CRYPTO_DEV_QAT_C62XVF To compile this as a module, choose M here: the module will be called qat_c62xvf. + +config CRYPTO_DEV_QAT_ERROR_INJECTION + bool "Support for Intel(R) QAT Devices Heartbeat Error Injection" + depends on CRYPTO_DEV_QAT + depends on DEBUG_FS + help + Enables a mechanism that allows to inject a heartbeat error on + Intel(R) QuickAssist devices for testing purposes. + + This is intended for developer use only. + If unsure, say N. + + This functionality is available via debugfs entry of the Intel(R) + QuickAssist device diff --git a/drivers/crypto/intel/qat/Makefile b/drivers/crypto/intel/qat/Makefile index 258c8a626ce04989925e5be69fe1a7c5c2327577..235b69f4f3f72a29b7e3a6b914c08cdcac4d4dd2 100644 --- a/drivers/crypto/intel/qat/Makefile +++ b/drivers/crypto/intel/qat/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCC) += qat_dh895xcc/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXX) += qat_c3xxx/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62X) += qat_c62x/ obj-$(CONFIG_CRYPTO_DEV_QAT_4XXX) += qat_4xxx/ +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx/ obj-$(CONFIG_CRYPTO_DEV_QAT_DH895xCCVF) += qat_dh895xccvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C3XXXVF) += qat_c3xxxvf/ obj-$(CONFIG_CRYPTO_DEV_QAT_C62XVF) += qat_c62xvf/ diff --git a/drivers/crypto/intel/qat/qat_420xx/Makefile b/drivers/crypto/intel/qat/qat_420xx/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a90fbe00b3c88fd47548f0a0a9a57e95bd79a09e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/Makefile @@ -0,0 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0-only +ccflags-y := -I $(srctree)/$(src)/../qat_common +obj-$(CONFIG_CRYPTO_DEV_QAT_420XX) += qat_420xx.o +qat_420xx-objs := adf_drv.o adf_420xx_hw_data.o diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c new file mode 100644 index 0000000000000000000000000000000000000000..1102c47f8293d572bf0a65836e72665b818e86e2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.c @@ -0,0 +1,496 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "adf_420xx_hw_data.h" +#include "icp_qat_hw.h" + +#define ADF_AE_GROUP_0 GENMASK(3, 0) +#define ADF_AE_GROUP_1 GENMASK(7, 4) +#define ADF_AE_GROUP_2 GENMASK(11, 8) +#define ADF_AE_GROUP_3 GENMASK(15, 12) +#define ADF_AE_GROUP_4 BIT(16) + +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_SYM GENMASK(3, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) + +static const char * const adf_420xx_fw_objs[] = { + [ADF_FW_SYM_OBJ] = ADF_420XX_SYM_OBJ, + [ADF_FW_ASYM_OBJ] = ADF_420XX_ASYM_OBJ, + [ADF_FW_DC_OBJ] = ADF_420XX_DC_OBJ, + [ADF_FW_ADMIN_OBJ] = ADF_420XX_ADMIN_OBJ, +}; + +static const struct adf_fw_config adf_fw_cy_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_asym_dc_config[] = { + {ADF_AE_GROUP_3, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_2, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_ASYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_sym_dc_config[] = { + {ADF_AE_GROUP_2, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + +static const struct adf_fw_config adf_fw_dcc_config[] = { + {ADF_AE_GROUP_1, ADF_FW_DC_OBJ}, + {ADF_AE_GROUP_0, ADF_FW_SYM_OBJ}, + {ADF_AE_GROUP_4, ADF_FW_ADMIN_OBJ}, +}; + + +static struct adf_hw_device_class adf_420xx_class = { + .name = ADF_420XX_DEVICE_NAME, + .type = DEV_420XX, + .instances = 0, +}; + +static u32 get_ae_mask(struct adf_hw_device_data *self) +{ + u32 me_disable = self->fuses; + + return ~me_disable & ADF_420XX_ACCELENGINES_MASK; +} + +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return ARRAY_SIZE(adf_fw_cy_config); + case SVC_DC: + return ARRAY_SIZE(adf_fw_dc_config); + case SVC_DCC: + return ARRAY_SIZE(adf_fw_dcc_config); + case SVC_SYM: + return ARRAY_SIZE(adf_fw_sym_config); + case SVC_ASYM: + return ARRAY_SIZE(adf_fw_asym_config); + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return ARRAY_SIZE(adf_fw_asym_dc_config); + case SVC_SYM_DC: + case SVC_DC_SYM: + return ARRAY_SIZE(adf_fw_sym_dc_config); + default: + return 0; + } +} + +static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) +{ + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return adf_fw_cy_config; + case SVC_DC: + return adf_fw_dc_config; + case SVC_DCC: + return adf_fw_dcc_config; + case SVC_SYM: + return adf_fw_sym_config; + case SVC_ASYM: + return adf_fw_asym_config; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return adf_fw_asym_dc_config; + case SVC_SYM_DC: + case SVC_DC_SYM: + return adf_fw_sym_dc_config; + default: + return NULL; + } +} + +static void update_ae_mask(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + const struct adf_fw_config *fw_config; + u32 config_ae_mask = 0; + u32 ae_mask, num_objs; + int i; + + ae_mask = get_ae_mask(hw_data); + + /* Modify the AE mask based on the firmware configuration loaded */ + fw_config = get_fw_config(accel_dev); + num_objs = uof_get_num_objs(accel_dev); + + config_ae_mask |= ADF_420XX_ADMIN_AE_MASK; + for (i = 0; i < num_objs; i++) + config_ae_mask |= fw_config[i].ae_mask; + + hw_data->ae_mask = ae_mask & config_ae_mask; +} + +static u32 get_accel_cap(struct adf_accel_dev *accel_dev) +{ + u32 capabilities_sym, capabilities_asym, capabilities_dc; + struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; + u32 capabilities_dcc; + u32 fusectl1; + + /* As a side effect, update ae_mask based on configuration */ + update_ae_mask(accel_dev); + + /* Read accelerator capabilities mask */ + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); + + capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | + ICP_ACCEL_CAPABILITIES_CIPHER | + ICP_ACCEL_CAPABILITIES_AUTHENTICATION | + ICP_ACCEL_CAPABILITIES_SHA3 | + ICP_ACCEL_CAPABILITIES_SHA3_EXT | + ICP_ACCEL_CAPABILITIES_HKDF | + ICP_ACCEL_CAPABILITIES_CHACHA_POLY | + ICP_ACCEL_CAPABILITIES_AESGCM_SPC | + ICP_ACCEL_CAPABILITIES_SM3 | + ICP_ACCEL_CAPABILITIES_SM4 | + ICP_ACCEL_CAPABILITIES_AES_V2 | + ICP_ACCEL_CAPABILITIES_ZUC | + ICP_ACCEL_CAPABILITIES_ZUC_256 | + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT | + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN; + + /* A set bit in fusectl1 means the feature is OFF in this SKU */ + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_EIA3_SLICE) { + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC; + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + } + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE) + capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_ZUC_256; + + capabilities_asym = ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC | + ICP_ACCEL_CAPABILITIES_SM2 | + ICP_ACCEL_CAPABILITIES_ECEDMONT; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; + capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; + } + + capabilities_dc = ICP_ACCEL_CAPABILITIES_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION | + ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | + ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; + capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; + } + + switch (adf_get_service_enabled(accel_dev)) { + case SVC_CY: + case SVC_CY2: + return capabilities_sym | capabilities_asym; + case SVC_DC: + return capabilities_dc; + case SVC_DCC: + /* + * Sym capabilities are available for chaining operations, + * but sym crypto instances cannot be supported + */ + capabilities_dcc = capabilities_dc | capabilities_sym; + capabilities_dcc &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; + return capabilities_dcc; + case SVC_SYM: + return capabilities_sym; + case SVC_ASYM: + return capabilities_asym; + case SVC_ASYM_DC: + case SVC_DC_ASYM: + return capabilities_asym | capabilities_dc; + case SVC_SYM_DC: + case SVC_DC_SYM: + return capabilities_sym | capabilities_dc; + default: + return 0; + } +} + +static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) +{ + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); + + return GET_HW_DATA(accel_dev)->thd_to_arb_map; +} + +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) +{ + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_420XX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_420XX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_420XX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_420XX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_420XX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_420XX_RL_SLICE_REF; +} + +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + case ADF_AE_GROUP_3: + return RP_GROUP_1; + case ADF_AE_GROUP_2: + if (get_fw_config(accel_dev) == adf_fw_cy_config) + return RP_GROUP_0; + else + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} + +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } +} + +static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, + const char * const fw_objs[], int num_objs) +{ + const struct adf_fw_config *fw_config; + int id; + + fw_config = get_fw_config(accel_dev); + if (fw_config) + id = fw_config[obj_num].obj; + else + id = -EINVAL; + + if (id < 0 || id > num_objs) + return NULL; + + return fw_objs[id]; +} + +static const char *uof_get_name_420xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + int num_fw_objs = ARRAY_SIZE(adf_420xx_fw_objs); + + return uof_get_name(accel_dev, obj_num, adf_420xx_fw_objs, num_fw_objs); +} + +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + +static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return 0; + + return fw_config[obj_num].ae_mask; +} + +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_420XX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_420XX_SSMFEATREN_MASK; +} + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id) +{ + hw_data->dev_class = &adf_420xx_class; + hw_data->instance_id = adf_420xx_class.instances++; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; + hw_data->num_engines = ADF_420XX_MAX_ACCELENGINES; + hw_data->num_logical_accel = 1; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; + hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; + hw_data->alloc_irq = adf_isr_resource_alloc; + hw_data->free_irq = adf_isr_resource_free; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; + hw_data->get_ae_mask = get_ae_mask; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; + hw_data->get_accel_cap = get_accel_cap; + hw_data->get_sku = adf_gen4_get_sku; + hw_data->init_admin_comms = adf_init_admin_comms; + hw_data->exit_admin_comms = adf_exit_admin_comms; + hw_data->send_admin_init = adf_send_admin_init; + hw_data->init_arb = adf_init_arb; + hw_data->exit_arb = adf_exit_arb; + hw_data->get_arb_mapping = adf_get_arbiter_mapping; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; + hw_data->reset_device = adf_reset_flr; + hw_data->admin_ae_mask = ADF_420XX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; + hw_data->fw_name = ADF_420XX_FW; + hw_data->fw_mmp_name = ADF_420XX_MMP; + hw_data->uof_get_name = uof_get_name_420xx; + hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; + hw_data->uof_get_ae_mask = uof_get_ae_mask; + hw_data->get_rp_group = get_rp_group; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; + hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; + hw_data->disable_iov = adf_disable_sriov; + hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; + hw_data->enable_pm = adf_gen4_enable_pm; + hw_data->handle_pm_interrupt = adf_gen4_handle_pm_interrupt; + hw_data->dev_config = adf_gen4_dev_config; + hw_data->start_timer = adf_gen4_timer_start; + hw_data->stop_timer = adf_gen4_timer_stop; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; + hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_420XX_AE_FREQ; + + adf_gen4_set_err_mask(&hw_data->dev_err_mask); + adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); + adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); + adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); + adf_init_rl_data(&hw_data->rl_data); +} + +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data) +{ + hw_data->dev_class->instances--; +} diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h new file mode 100644 index 0000000000000000000000000000000000000000..99abbfc1482063c4f3aacc942e8605510538160d --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_420xx_hw_data.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_420XX_HW_DATA_H_ +#define ADF_420XX_HW_DATA_H_ + +#include + +#define ADF_420XX_MAX_ACCELENGINES 17 + +#define ADF_420XX_ACCELENGINES_MASK 0x1FFFF +#define ADF_420XX_ADMIN_AE_MASK 0x10000 + +#define ADF_420XX_HICPPAGENTCMDPARERRLOG_MASK (0xFF) +#define ADF_420XX_PARITYERRORMASK_ATH_CPH_MASK (0xFF00FF) +#define ADF_420XX_PARITYERRORMASK_CPR_XLT_MASK (0x10001) +#define ADF_420XX_PARITYERRORMASK_DCPR_UCS_MASK (0xF0007) +#define ADF_420XX_PARITYERRORMASK_PKE_MASK (0xFFF) +#define ADF_420XX_PARITYERRORMASK_WAT_WCP_MASK (0x3FF03FF) + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(27) - enable parity detection on SPPs + */ +#define ADF_420XX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26) | BIT(27)) + +/* Firmware Binaries */ +#define ADF_420XX_FW "qat_420xx.bin" +#define ADF_420XX_MMP "qat_420xx_mmp.bin" +#define ADF_420XX_SYM_OBJ "qat_420xx_sym.bin" +#define ADF_420XX_DC_OBJ "qat_420xx_dc.bin" +#define ADF_420XX_ASYM_OBJ "qat_420xx_asym.bin" +#define ADF_420XX_ADMIN_OBJ "qat_420xx_admin.bin" + +/* RL constants */ +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_420XX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_420XX_RL_DCPR_CORRECTION 1 +#define ADF_420XX_RL_SCANS_PER_SEC 954 +#define ADF_420XX_RL_MAX_TP_ASYM 173750UL +#define ADF_420XX_RL_MAX_TP_SYM 95000UL +#define ADF_420XX_RL_MAX_TP_DC 40000UL +#define ADF_420XX_RL_SLICE_REF 1000UL + +/* Clocks frequency */ +#define ADF_420XX_AE_FREQ (1000 * HZ_PER_MHZ) + +void adf_init_hw_data_420xx(struct adf_hw_device_data *hw_data, u32 dev_id); +void adf_clean_hw_data_420xx(struct adf_hw_device_data *hw_data); + +#endif diff --git a/drivers/crypto/intel/qat/qat_420xx/adf_drv.c b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..2a3598409eeb5132056a535f0863b627224cc2c4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_420xx/adf_drv.c @@ -0,0 +1,202 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "adf_420xx_hw_data.h" + +static const struct pci_device_id adf_pci_tbl[] = { + { PCI_VDEVICE(INTEL, ADF_420XX_PCI_DEVICE_ID), }, + { } +}; +MODULE_DEVICE_TABLE(pci, adf_pci_tbl); + +static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->hw_device) { + adf_clean_hw_data_420xx(accel_dev->hw_device); + accel_dev->hw_device = NULL; + } + adf_dbgfs_exit(accel_dev); + adf_cfg_dev_remove(accel_dev); + adf_devmgr_rm_dev(accel_dev, NULL); +} + +static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct adf_accel_dev *accel_dev; + struct adf_accel_pci *accel_pci_dev; + struct adf_hw_device_data *hw_data; + unsigned int i, bar_nr; + unsigned long bar_mask; + struct adf_bar *bar; + int ret; + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + /* + * If the accelerator is connected to a node with no memory + * there is no point in using the accelerator since the remote + * memory transaction will be very slow. + */ + dev_err(&pdev->dev, "Invalid NUMA configuration.\n"); + return -EINVAL; + } + + accel_dev = devm_kzalloc(&pdev->dev, sizeof(*accel_dev), GFP_KERNEL); + if (!accel_dev) + return -ENOMEM; + + INIT_LIST_HEAD(&accel_dev->crypto_list); + accel_pci_dev = &accel_dev->accel_pci_dev; + accel_pci_dev->pci_dev = pdev; + + /* + * Add accel device to accel table + * This should be called before adf_cleanup_accel is called + */ + if (adf_devmgr_add_dev(accel_dev, NULL)) { + dev_err(&pdev->dev, "Failed to add new accelerator device.\n"); + return -EFAULT; + } + + accel_dev->owner = THIS_MODULE; + /* Allocate and initialise device hardware meta-data structure */ + hw_data = devm_kzalloc(&pdev->dev, sizeof(*hw_data), GFP_KERNEL); + if (!hw_data) { + ret = -ENOMEM; + goto out_err; + } + + accel_dev->hw_device = hw_data; + adf_init_hw_data_420xx(accel_dev->hw_device, ent->device); + + pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); + + /* Get Accelerators and Accelerators Engines masks */ + hw_data->accel_mask = hw_data->get_accel_mask(hw_data); + hw_data->ae_mask = hw_data->get_ae_mask(hw_data); + accel_pci_dev->sku = hw_data->get_sku(hw_data); + /* If the device has no acceleration engines then ignore it */ + if (!hw_data->accel_mask || !hw_data->ae_mask || + (~hw_data->ae_mask & 0x01)) { + dev_err(&pdev->dev, "No acceleration units found.\n"); + ret = -EFAULT; + goto out_err; + } + + /* Create device configuration table */ + ret = adf_cfg_dev_add(accel_dev); + if (ret) + goto out_err; + + /* Enable PCI device */ + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "Can't enable PCI device.\n"); + goto out_err; + } + + /* Set DMA identifier */ + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "No usable DMA configuration.\n"); + goto out_err; + } + + ret = adf_gen4_cfg_dev_init(accel_dev); + if (ret) { + dev_err(&pdev->dev, "Failed to initialize configuration.\n"); + goto out_err; + } + + /* Get accelerator capabilities mask */ + hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); + if (!hw_data->accel_capabilities_mask) { + dev_err(&pdev->dev, "Failed to get capabilities mask.\n"); + ret = -EINVAL; + goto out_err; + } + + /* Find and map all the device's BARS */ + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; + + ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "Failed to map pci regions.\n"); + goto out_err; + } + + i = 0; + for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { + bar = &accel_pci_dev->pci_bars[i++]; + bar->virt_addr = pcim_iomap_table(pdev)[bar_nr]; + } + + pci_set_master(pdev); + + if (pci_save_state(pdev)) { + dev_err(&pdev->dev, "Failed to save pci state.\n"); + ret = -ENOMEM; + goto out_err; + } + + accel_dev->ras_errors.enabled = true; + adf_dbgfs_init(accel_dev); + + ret = adf_dev_up(accel_dev, true); + if (ret) + goto out_err_dev_stop; + + ret = adf_sysfs_init(accel_dev); + if (ret) + goto out_err_dev_stop; + + return ret; + +out_err_dev_stop: + adf_dev_down(accel_dev, false); +out_err: + adf_cleanup_accel(accel_dev); + return ret; +} + +static void adf_remove(struct pci_dev *pdev) +{ + struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + + if (!accel_dev) { + pr_err("QAT: Driver removal failed\n"); + return; + } + adf_dev_down(accel_dev, false); + adf_cleanup_accel(accel_dev); +} + +static struct pci_driver adf_driver = { + .id_table = adf_pci_tbl, + .name = ADF_420XX_DEVICE_NAME, + .probe = adf_probe, + .remove = adf_remove, + .sriov_configure = adf_sriov_configure, + .err_handler = &adf_err_handler, +}; + +module_pci_driver(adf_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Intel"); +MODULE_FIRMWARE(ADF_420XX_FW); +MODULE_FIRMWARE(ADF_420XX_MMP); +MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); +MODULE_VERSION(ADF_DRV_VERSION); +MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c index a5691ba0b7244bc4dc20c00bb8048e521913d787..927506cf271d00d37b619d6594e8d6f5aeef5689 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.c @@ -2,28 +2,31 @@ /* Copyright(c) 2020 - 2021 Intel Corporation */ #include #include +#include #include +#include #include #include +#include +#include #include #include #include #include +#include "adf_gen4_ras.h" #include +#include #include "adf_4xxx_hw_data.h" -#include "adf_cfg_services.h" #include "icp_qat_hw.h" #define ADF_AE_GROUP_0 GENMASK(3, 0) #define ADF_AE_GROUP_1 GENMASK(7, 4) #define ADF_AE_GROUP_2 BIT(8) -enum adf_fw_objs { - ADF_FW_SYM_OBJ, - ADF_FW_ASYM_OBJ, - ADF_FW_DC_OBJ, - ADF_FW_ADMIN_OBJ, -}; +#define ENA_THD_MASK_ASYM GENMASK(1, 0) +#define ENA_THD_MASK_ASYM_401XX GENMASK(5, 0) +#define ENA_THD_MASK_SYM GENMASK(6, 0) +#define ENA_THD_MASK_DC GENMASK(1, 0) static const char * const adf_4xxx_fw_objs[] = { [ADF_FW_SYM_OBJ] = ADF_4XXX_SYM_OBJ, @@ -39,11 +42,6 @@ static const char * const adf_402xx_fw_objs[] = { [ADF_FW_ADMIN_OBJ] = ADF_402XX_ADMIN_OBJ, }; -struct adf_fw_config { - u32 ae_mask; - enum adf_fw_objs obj; -}; - static const struct adf_fw_config adf_fw_cy_config[] = { {ADF_AE_GROUP_1, ADF_FW_SYM_OBJ}, {ADF_AE_GROUP_0, ADF_FW_ASYM_OBJ}, @@ -93,59 +91,12 @@ static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_asym_dc_config)) static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_sym_dc_config)); static_assert(ARRAY_SIZE(adf_fw_cy_config) == ARRAY_SIZE(adf_fw_dcc_config)); -/* Worker thread to service arbiter mappings */ -static const u32 default_thrd_to_arb_map[ADF_4XXX_MAX_ACCELENGINES] = { - 0x5555555, 0x5555555, 0x5555555, 0x5555555, - 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, 0xAAAAAAA, - 0x0 -}; - -static const u32 thrd_to_arb_map_dc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF, - 0x0 -}; - -static const u32 thrd_to_arb_map_dcc[ADF_4XXX_MAX_ACCELENGINES] = { - 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, - 0x0 -}; - static struct adf_hw_device_class adf_4xxx_class = { .name = ADF_4XXX_DEVICE_NAME, .type = DEV_4XXX, .instances = 0, }; -static int get_service_enabled(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) { - dev_err(&GET_DEV(accel_dev), - ADF_SERVICES_ENABLED " param not found\n"); - return ret; - } - - ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), - services); - if (ret < 0) - dev_err(&GET_DEV(accel_dev), - "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", - services); - - return ret; -} - -static u32 get_accel_mask(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ACCELERATORS_MASK; -} - static u32 get_ae_mask(struct adf_hw_device_data *self) { u32 me_disable = self->fuses; @@ -153,55 +104,6 @@ static u32 get_ae_mask(struct adf_hw_device_data *self) return ~me_disable & ADF_4XXX_ACCELENGINES_MASK; } -static u32 get_num_accels(struct adf_hw_device_data *self) -{ - return ADF_4XXX_MAX_ACCELERATORS; -} - -static u32 get_num_aes(struct adf_hw_device_data *self) -{ - if (!self || !self->ae_mask) - return 0; - - return hweight32(self->ae_mask); -} - -static u32 get_misc_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_PMISC_BAR; -} - -static u32 get_etr_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_ETR_BAR; -} - -static u32 get_sram_bar_id(struct adf_hw_device_data *self) -{ - return ADF_4XXX_SRAM_BAR; -} - -/* - * The vector routing table is used to select the MSI-X entry to use for each - * interrupt source. - * The first ADF_4XXX_ETR_MAX_BANKS entries correspond to ring interrupts. - * The final entry corresponds to VF2PF or error interrupts. - * This vector table could be used to configure one MSI-X entry to be shared - * between multiple interrupt sources. - * - * The default routing is set to have a one to one correspondence between the - * interrupt source and the MSI-X entry used. - */ -static void set_msix_default_rttable(struct adf_accel_dev *accel_dev) -{ - void __iomem *csr; - int i; - - csr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - for (i = 0; i <= ADF_4XXX_ETR_MAX_BANKS; i++) - ADF_CSR_WR(csr, ADF_4XXX_MSIX_RTTABLE_OFFSET(i), i); -} - static u32 get_accel_cap(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev; @@ -210,7 +112,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) u32 fusectl1; /* Read accelerator capabilities mask */ - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL1_OFFSET, &fusectl1); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL1_OFFSET, &fusectl1); capabilities_sym = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC | ICP_ACCEL_CAPABILITIES_CIPHER | @@ -225,27 +127,27 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_AES_V2; /* A set bit in fusectl1 means the feature is OFF in this SKU */ - if (fusectl1 & ICP_ACCEL_4XXX_MASK_CIPHER_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_CIPHER_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_HKDF; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_UCS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_UCS_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CHACHA_POLY; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AESGCM_SPC; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AES_V2; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_AUTH_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_AUTH_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SHA3_EXT; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_CIPHER; } - if (fusectl1 & ICP_ACCEL_4XXX_MASK_SMX_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_SMX_SLICE) { capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM3; capabilities_sym &= ~ICP_ACCEL_CAPABILITIES_SM4; } @@ -255,7 +157,7 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_SM2 | ICP_ACCEL_CAPABILITIES_ECEDMONT; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_PKE_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_PKE_SLICE) { capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_SM2; capabilities_asym &= ~ICP_ACCEL_CAPABILITIES_ECEDMONT; @@ -266,14 +168,14 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION | ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; - if (fusectl1 & ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE) { + if (fusectl1 & ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE) { capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION; capabilities_dc &= ~ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64; } - switch (get_service_enabled(accel_dev)) { + switch (adf_get_service_enabled(accel_dev)) { case SVC_CY: case SVC_CY2: return capabilities_sym | capabilities_asym; @@ -302,105 +204,41 @@ static u32 get_accel_cap(struct adf_accel_dev *accel_dev) } } -static enum dev_sku_info get_sku(struct adf_hw_device_data *self) -{ - return DEV_SKU_1; -} - static const u32 *adf_get_arbiter_mapping(struct adf_accel_dev *accel_dev) { - switch (get_service_enabled(accel_dev)) { - case SVC_DC: - return thrd_to_arb_map_dc; - case SVC_DCC: - return thrd_to_arb_map_dcc; - default: - return default_thrd_to_arb_map; - } -} + if (adf_gen4_init_thd2arb_map(accel_dev)) + dev_warn(&GET_DEV(accel_dev), + "Generate of the thread to arbiter map failed"); -static void get_arb_info(struct arb_info *arb_info) -{ - arb_info->arb_cfg = ADF_4XXX_ARB_CONFIG; - arb_info->arb_offset = ADF_4XXX_ARB_OFFSET; - arb_info->wt2sam_offset = ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET; + return GET_HW_DATA(accel_dev)->thd_to_arb_map; } -static void get_admin_info(struct admin_info *admin_csrs_info) +static void adf_init_rl_data(struct adf_rl_hw_data *rl_data) { - admin_csrs_info->mailbox_offset = ADF_4XXX_MAILBOX_BASE_OFFSET; - admin_csrs_info->admin_msg_ur = ADF_4XXX_ADMINMSGUR_OFFSET; - admin_csrs_info->admin_msg_lr = ADF_4XXX_ADMINMSGLR_OFFSET; + rl_data->pciout_tb_offset = ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET; + rl_data->pciin_tb_offset = ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET; + rl_data->r2l_offset = ADF_GEN4_RL_R2L_OFFSET; + rl_data->l2c_offset = ADF_GEN4_RL_L2C_OFFSET; + rl_data->c2s_offset = ADF_GEN4_RL_C2S_OFFSET; + + rl_data->pcie_scale_div = ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV; + rl_data->pcie_scale_mul = ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL; + rl_data->dcpr_correction = ADF_4XXX_RL_DCPR_CORRECTION; + rl_data->max_tp[ADF_SVC_ASYM] = ADF_4XXX_RL_MAX_TP_ASYM; + rl_data->max_tp[ADF_SVC_SYM] = ADF_4XXX_RL_MAX_TP_SYM; + rl_data->max_tp[ADF_SVC_DC] = ADF_4XXX_RL_MAX_TP_DC; + rl_data->scan_interval = ADF_4XXX_RL_SCANS_PER_SEC; + rl_data->scale_ref = ADF_4XXX_RL_SLICE_REF; } -static u32 get_heartbeat_clock(struct adf_hw_device_data *self) -{ - /* - * 4XXX uses KPT counter for HB - */ - return ADF_4XXX_KPT_COUNTER_FREQ; -} - -static void adf_enable_error_correction(struct adf_accel_dev *accel_dev) -{ - struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR]; - void __iomem *csr = misc_bar->virt_addr; - - /* Enable all in errsou3 except VFLR notification on host */ - ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); -} - -static void adf_enable_ints(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Enable bundle interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET, 0); - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET, 0); - - /* Enable misc interrupts */ - ADF_CSR_WR(addr, ADF_4XXX_SMIAPF_MASK_OFFSET, 0); -} - -static int adf_init_device(struct adf_accel_dev *accel_dev) -{ - void __iomem *addr; - u32 status; - u32 csr; - int ret; - - addr = (&GET_BARS(accel_dev)[ADF_4XXX_PMISC_BAR])->virt_addr; - - /* Temporarily mask PM interrupt */ - csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); - csr |= ADF_GEN4_PM_SOU; - ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); - - /* Set DRV_ACTIVE bit to power up the device */ - ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); - - /* Poll status register to make sure the device is powered up */ - ret = read_poll_timeout(ADF_CSR_RD, status, - status & ADF_GEN4_PM_INIT_STATE, - ADF_GEN4_PM_POLL_DELAY_US, - ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, - ADF_GEN4_PM_STATUS); - if (ret) - dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); - - return ret; -} - -static u32 uof_get_num_objs(void) +static u32 uof_get_num_objs(struct adf_accel_dev *accel_dev) { return ARRAY_SIZE(adf_fw_cy_config); } static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev) { - switch (get_service_enabled(accel_dev)) { + switch (adf_get_service_enabled(accel_dev)) { case SVC_CY: case SVC_CY2: return adf_fw_cy_config; @@ -423,57 +261,63 @@ static const struct adf_fw_config *get_fw_config(struct adf_accel_dev *accel_dev } } -enum adf_rp_groups { - RP_GROUP_0 = 0, - RP_GROUP_1, - RP_GROUP_COUNT -}; +static int get_rp_group(struct adf_accel_dev *accel_dev, u32 ae_mask) +{ + switch (ae_mask) { + case ADF_AE_GROUP_0: + return RP_GROUP_0; + case ADF_AE_GROUP_1: + return RP_GROUP_1; + default: + dev_dbg(&GET_DEV(accel_dev), "ae_mask not recognized"); + return -EINVAL; + } +} -static u16 get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +static u32 get_ena_thd_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { - enum adf_cfg_service_type rps[RP_GROUP_COUNT]; const struct adf_fw_config *fw_config; - u16 ring_to_svc_map; - int i, j; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; fw_config = get_fw_config(accel_dev); if (!fw_config) - return 0; - - for (i = 0; i < RP_GROUP_COUNT; i++) { - switch (fw_config[i].ae_mask) { - case ADF_AE_GROUP_0: - j = RP_GROUP_0; - break; - case ADF_AE_GROUP_1: - j = RP_GROUP_1; - break; - default: - return 0; - } - - switch (fw_config[i].obj) { - case ADF_FW_SYM_OBJ: - rps[j] = SYM; - break; - case ADF_FW_ASYM_OBJ: - rps[j] = ASYM; - break; - case ADF_FW_DC_OBJ: - rps[j] = COMP; - break; - default: - rps[j] = 0; - break; - } + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; } +} - ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | - rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | - rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; +static u32 get_ena_thd_mask_401xx(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return ADF_GEN4_ENA_THD_MASK_ERROR; - return ring_to_svc_map; + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return ADF_GEN4_ENA_THD_MASK_ERROR; + + switch (fw_config[obj_num].obj) { + case ADF_FW_ASYM_OBJ: + return ENA_THD_MASK_ASYM_401XX; + case ADF_FW_SYM_OBJ: + return ENA_THD_MASK_SYM; + case ADF_FW_DC_OBJ: + return ENA_THD_MASK_DC; + default: + return ADF_GEN4_ENA_THD_MASK_ERROR; + } } static const char *uof_get_name(struct adf_accel_dev *accel_dev, u32 obj_num, @@ -508,6 +352,20 @@ static const char *uof_get_name_402xx(struct adf_accel_dev *accel_dev, u32 obj_n return uof_get_name(accel_dev, obj_num, adf_402xx_fw_objs, num_fw_objs); } +static int uof_get_obj_type(struct adf_accel_dev *accel_dev, u32 obj_num) +{ + const struct adf_fw_config *fw_config; + + if (obj_num >= uof_get_num_objs(accel_dev)) + return -EINVAL; + + fw_config = get_fw_config(accel_dev); + if (!fw_config) + return -EINVAL; + + return fw_config[obj_num].obj; +} + static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) { const struct adf_fw_config *fw_config; @@ -519,60 +377,81 @@ static u32 uof_get_ae_mask(struct adf_accel_dev *accel_dev, u32 obj_num) return fw_config[obj_num].ae_mask; } +static void adf_gen4_set_err_mask(struct adf_dev_err_mask *dev_err_mask) +{ + dev_err_mask->cppagentcmdpar_mask = ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK; + dev_err_mask->parerr_ath_cph_mask = ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK; + dev_err_mask->parerr_cpr_xlt_mask = ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK; + dev_err_mask->parerr_dcpr_ucs_mask = ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK; + dev_err_mask->parerr_pke_mask = ADF_4XXX_PARITYERRORMASK_PKE_MASK; + dev_err_mask->ssmfeatren_mask = ADF_4XXX_SSMFEATREN_MASK; +} + void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) { hw_data->dev_class = &adf_4xxx_class; hw_data->instance_id = adf_4xxx_class.instances++; - hw_data->num_banks = ADF_4XXX_ETR_MAX_BANKS; - hw_data->num_banks_per_vf = ADF_4XXX_NUM_BANKS_PER_VF; - hw_data->num_rings_per_bank = ADF_4XXX_NUM_RINGS_PER_BANK; - hw_data->num_accel = ADF_4XXX_MAX_ACCELERATORS; + hw_data->num_banks = ADF_GEN4_ETR_MAX_BANKS; + hw_data->num_banks_per_vf = ADF_GEN4_NUM_BANKS_PER_VF; + hw_data->num_rings_per_bank = ADF_GEN4_NUM_RINGS_PER_BANK; + hw_data->num_accel = ADF_GEN4_MAX_ACCELERATORS; hw_data->num_engines = ADF_4XXX_MAX_ACCELENGINES; hw_data->num_logical_accel = 1; - hw_data->tx_rx_gap = ADF_4XXX_RX_RINGS_OFFSET; - hw_data->tx_rings_mask = ADF_4XXX_TX_RINGS_MASK; + hw_data->tx_rx_gap = ADF_GEN4_RX_RINGS_OFFSET; + hw_data->tx_rings_mask = ADF_GEN4_TX_RINGS_MASK; hw_data->ring_to_svc_map = ADF_GEN4_DEFAULT_RING_TO_SRV_MAP; hw_data->alloc_irq = adf_isr_resource_alloc; hw_data->free_irq = adf_isr_resource_free; - hw_data->enable_error_correction = adf_enable_error_correction; - hw_data->get_accel_mask = get_accel_mask; + hw_data->enable_error_correction = adf_gen4_enable_error_correction; + hw_data->get_accel_mask = adf_gen4_get_accel_mask; hw_data->get_ae_mask = get_ae_mask; - hw_data->get_num_accels = get_num_accels; - hw_data->get_num_aes = get_num_aes; - hw_data->get_sram_bar_id = get_sram_bar_id; - hw_data->get_etr_bar_id = get_etr_bar_id; - hw_data->get_misc_bar_id = get_misc_bar_id; - hw_data->get_arb_info = get_arb_info; - hw_data->get_admin_info = get_admin_info; + hw_data->get_num_accels = adf_gen4_get_num_accels; + hw_data->get_num_aes = adf_gen4_get_num_aes; + hw_data->get_sram_bar_id = adf_gen4_get_sram_bar_id; + hw_data->get_etr_bar_id = adf_gen4_get_etr_bar_id; + hw_data->get_misc_bar_id = adf_gen4_get_misc_bar_id; + hw_data->get_arb_info = adf_gen4_get_arb_info; + hw_data->get_admin_info = adf_gen4_get_admin_info; hw_data->get_accel_cap = get_accel_cap; - hw_data->get_sku = get_sku; + hw_data->get_sku = adf_gen4_get_sku; hw_data->init_admin_comms = adf_init_admin_comms; hw_data->exit_admin_comms = adf_exit_admin_comms; hw_data->send_admin_init = adf_send_admin_init; hw_data->init_arb = adf_init_arb; hw_data->exit_arb = adf_exit_arb; hw_data->get_arb_mapping = adf_get_arbiter_mapping; - hw_data->enable_ints = adf_enable_ints; - hw_data->init_device = adf_init_device; + hw_data->enable_ints = adf_gen4_enable_ints; + hw_data->init_device = adf_gen4_init_device; hw_data->reset_device = adf_reset_flr; hw_data->admin_ae_mask = ADF_4XXX_ADMIN_AE_MASK; + hw_data->num_rps = ADF_GEN4_MAX_RPS; switch (dev_id) { case ADF_402XX_PCI_DEVICE_ID: hw_data->fw_name = ADF_402XX_FW; hw_data->fw_mmp_name = ADF_402XX_MMP; hw_data->uof_get_name = uof_get_name_402xx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; + case ADF_401XX_PCI_DEVICE_ID: + hw_data->fw_name = ADF_4XXX_FW; + hw_data->fw_mmp_name = ADF_4XXX_MMP; + hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask_401xx; break; - default: hw_data->fw_name = ADF_4XXX_FW; hw_data->fw_mmp_name = ADF_4XXX_MMP; hw_data->uof_get_name = uof_get_name_4xxx; + hw_data->get_ena_thd_mask = get_ena_thd_mask; + break; } hw_data->uof_get_num_objs = uof_get_num_objs; + hw_data->uof_get_obj_type = uof_get_obj_type; hw_data->uof_get_ae_mask = uof_get_ae_mask; - hw_data->set_msix_rttable = set_msix_default_rttable; + hw_data->get_rp_group = get_rp_group; + hw_data->set_msix_rttable = adf_gen4_set_msix_default_rttable; hw_data->set_ssm_wdtimer = adf_gen4_set_ssm_wdtimer; - hw_data->get_ring_to_svc_map = get_ring_to_svc_map; + hw_data->get_ring_to_svc_map = adf_gen4_get_ring_to_svc_map; hw_data->disable_iov = adf_disable_sriov; hw_data->ring_pair_reset = adf_gen4_ring_pair_reset; hw_data->enable_pm = adf_gen4_enable_pm; @@ -580,12 +459,17 @@ void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id) hw_data->dev_config = adf_gen4_dev_config; hw_data->start_timer = adf_gen4_timer_start; hw_data->stop_timer = adf_gen4_timer_stop; - hw_data->get_hb_clock = get_heartbeat_clock; + hw_data->get_hb_clock = adf_gen4_get_heartbeat_clock; hw_data->num_hb_ctrs = ADF_NUM_HB_CNT_PER_AE; + hw_data->clock_frequency = ADF_4XXX_AE_FREQ; + adf_gen4_set_err_mask(&hw_data->dev_err_mask); adf_gen4_init_hw_csr_ops(&hw_data->csr_ops); adf_gen4_init_pf_pfvf_ops(&hw_data->pfvf_ops); adf_gen4_init_dc_ops(&hw_data->dc_ops); + adf_gen4_init_ras_ops(&hw_data->ras_ops); + adf_gen4_init_tl_data(&hw_data->tl_data); + adf_init_rl_data(&hw_data->rl_data); } void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data) diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h index bb3d95a8fb2129db35b07964825f90a562c6e0d1..76388363ea8776e4646772f6f9569509fcfac311 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_4xxx_hw_data.h @@ -6,49 +6,27 @@ #include #include -/* PCIe configuration space */ -#define ADF_4XXX_SRAM_BAR 0 -#define ADF_4XXX_PMISC_BAR 1 -#define ADF_4XXX_ETR_BAR 2 -#define ADF_4XXX_RX_RINGS_OFFSET 1 -#define ADF_4XXX_TX_RINGS_MASK 0x1 -#define ADF_4XXX_MAX_ACCELERATORS 1 #define ADF_4XXX_MAX_ACCELENGINES 9 -#define ADF_4XXX_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) -/* Physical function fuses */ -#define ADF_4XXX_FUSECTL0_OFFSET (0x2C8) -#define ADF_4XXX_FUSECTL1_OFFSET (0x2CC) -#define ADF_4XXX_FUSECTL2_OFFSET (0x2D0) -#define ADF_4XXX_FUSECTL3_OFFSET (0x2D4) -#define ADF_4XXX_FUSECTL4_OFFSET (0x2D8) -#define ADF_4XXX_FUSECTL5_OFFSET (0x2DC) - -#define ADF_4XXX_ACCELERATORS_MASK (0x1) #define ADF_4XXX_ACCELENGINES_MASK (0x1FF) #define ADF_4XXX_ADMIN_AE_MASK (0x100) -#define ADF_4XXX_ETR_MAX_BANKS 64 - -/* MSIX interrupt */ -#define ADF_4XXX_SMIAPF_RP_X0_MASK_OFFSET (0x41A040) -#define ADF_4XXX_SMIAPF_RP_X1_MASK_OFFSET (0x41A044) -#define ADF_4XXX_SMIAPF_MASK_OFFSET (0x41A084) -#define ADF_4XXX_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) - -/* Bank and ring configuration */ -#define ADF_4XXX_NUM_RINGS_PER_BANK 2 -#define ADF_4XXX_NUM_BANKS_PER_VF 4 - -/* Arbiter configuration */ -#define ADF_4XXX_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) -#define ADF_4XXX_ARB_OFFSET (0x0) -#define ADF_4XXX_ARB_WRK_2_SER_MAP_OFFSET (0x400) - -/* Admin Interface Reg Offset */ -#define ADF_4XXX_ADMINMSGUR_OFFSET (0x500574) -#define ADF_4XXX_ADMINMSGLR_OFFSET (0x500578) -#define ADF_4XXX_MAILBOX_BASE_OFFSET (0x600970) +#define ADF_4XXX_HICPPAGENTCMDPARERRLOG_MASK 0x1F +#define ADF_4XXX_PARITYERRORMASK_ATH_CPH_MASK 0xF000F +#define ADF_4XXX_PARITYERRORMASK_CPR_XLT_MASK 0x10001 +#define ADF_4XXX_PARITYERRORMASK_DCPR_UCS_MASK 0x30007 +#define ADF_4XXX_PARITYERRORMASK_PKE_MASK 0x3F + +/* + * SSMFEATREN bit mask + * BIT(4) - enables parity detection on CPP + * BIT(12) - enables the logging of push/pull data errors + * in pperr register + * BIT(16) - BIT(23) - enable parity detection on SPPs + */ +#define ADF_4XXX_SSMFEATREN_MASK \ + (BIT(4) | BIT(12) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21) | BIT(22) | BIT(23)) /* Firmware Binaries */ #define ADF_4XXX_FW "qat_4xxx.bin" @@ -65,22 +43,20 @@ #define ADF_402XX_ASYM_OBJ "qat_402xx_asym.bin" #define ADF_402XX_ADMIN_OBJ "qat_402xx_admin.bin" -/* Clocks frequency */ -#define ADF_4XXX_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) +/* RL constants */ +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_DIV 100 +#define ADF_4XXX_RL_PCIE_SCALE_FACTOR_MUL 102 +#define ADF_4XXX_RL_DCPR_CORRECTION 1 +#define ADF_4XXX_RL_SCANS_PER_SEC 954 +#define ADF_4XXX_RL_MAX_TP_ASYM 173750UL +#define ADF_4XXX_RL_MAX_TP_SYM 95000UL +#define ADF_4XXX_RL_MAX_TP_DC 45000UL +#define ADF_4XXX_RL_SLICE_REF 1000UL -/* qat_4xxx fuse bits are different from old GENs, redefine them */ -enum icp_qat_4xxx_slice_mask { - ICP_ACCEL_4XXX_MASK_CIPHER_SLICE = BIT(0), - ICP_ACCEL_4XXX_MASK_AUTH_SLICE = BIT(1), - ICP_ACCEL_4XXX_MASK_PKE_SLICE = BIT(2), - ICP_ACCEL_4XXX_MASK_COMPRESS_SLICE = BIT(3), - ICP_ACCEL_4XXX_MASK_UCS_SLICE = BIT(4), - ICP_ACCEL_4XXX_MASK_EIA3_SLICE = BIT(5), - ICP_ACCEL_4XXX_MASK_SMX_SLICE = BIT(7), -}; +/* Clocks frequency */ +#define ADF_4XXX_AE_FREQ (1000 * HZ_PER_MHZ) void adf_init_hw_data_4xxx(struct adf_hw_device_data *hw_data, u32 dev_id); void adf_clean_hw_data_4xxx(struct adf_hw_device_data *hw_data); -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c index 90f5c1ca7b8d855ac8af38acea702da87b1e0be8..9762f2bf7727f1ce5288b00bd8bac502d09935cf 100644 --- a/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_4xxx/adf_drv.c @@ -8,13 +8,10 @@ #include #include #include -#include +#include +#include #include "adf_4xxx_hw_data.h" -#include "adf_cfg_services.h" -#include "qat_compression.h" -#include "qat_crypto.h" -#include "adf_transport_access_macros.h" static const struct pci_device_id adf_pci_tbl[] = { { PCI_VDEVICE(INTEL, ADF_4XXX_PCI_DEVICE_ID), }, @@ -35,270 +32,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) adf_devmgr_rm_dev(accel_dev, NULL); } -static int adf_cfg_dev_init(struct adf_accel_dev *accel_dev) -{ - const char *config; - int ret; - - config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; - - ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); - if (ret) - return ret; - - /* Default configuration is crypto only for even devices - * and compression for odd devices - */ - ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, config, - ADF_STR); - if (ret) - return ret; - - adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); - - return 0; -} - -static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long bank, val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_crypto(accel_dev)) - instances = min(cpus, banks / 2); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - bank = i * 2; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - bank += 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &bank, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, - i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); - val = 128; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); - return ret; -} - -static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) -{ - char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; - int banks = GET_MAX_BANKS(accel_dev); - int cpus = num_online_cpus(); - unsigned long val; - int instances; - int ret; - int i; - - if (adf_hw_dev_has_compression(accel_dev)) - instances = min(cpus, banks); - else - instances = 0; - - for (i = 0; i < instances; i++) { - val = i; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 512; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = 1; - snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, - key, &val, ADF_DEC); - if (ret) - goto err; - - val = ADF_COALESCING_DEF_TIME; - snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); - ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", - key, &val, ADF_DEC); - if (ret) - goto err; - } - - val = i; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - goto err; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); - if (ret) - goto err; - - return 0; -err: - dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); - return ret; -} - -static int adf_no_dev_config(struct adf_accel_dev *accel_dev) -{ - unsigned long val; - int ret; - - val = 0; - ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, - &val, ADF_DEC); - if (ret) - return ret; - - return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, - &val, ADF_DEC); -} - -int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) -{ - char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; - int ret; - - ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); - if (ret) - goto err; - - ret = adf_cfg_section_add(accel_dev, "Accelerator0"); - if (ret) - goto err; - - ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, - ADF_SERVICES_ENABLED, services); - if (ret) - goto err; - - ret = sysfs_match_string(adf_cfg_services, services); - if (ret < 0) - goto err; - - switch (ret) { - case SVC_CY: - case SVC_CY2: - ret = adf_crypto_dev_config(accel_dev); - break; - case SVC_DC: - case SVC_DCC: - ret = adf_comp_dev_config(accel_dev); - break; - default: - ret = adf_no_dev_config(accel_dev); - break; - } - - if (ret) - goto err; - - set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); - - return ret; - -err: - dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); - return ret; -} - static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct adf_accel_dev *accel_dev; @@ -348,7 +81,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adf_init_hw_data_4xxx(accel_dev->hw_device, ent->device); pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid); - pci_read_config_dword(pdev, ADF_4XXX_FUSECTL4_OFFSET, &hw_data->fuses); + pci_read_config_dword(pdev, ADF_GEN4_FUSECTL4_OFFSET, &hw_data->fuses); /* Get Accelerators and Accelerators Engines masks */ hw_data->accel_mask = hw_data->get_accel_mask(hw_data); @@ -381,7 +114,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } - ret = adf_cfg_dev_init(accel_dev); + ret = adf_gen4_cfg_dev_init(accel_dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize configuration.\n"); goto out_err; @@ -396,7 +129,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } /* Find and map all the device's BARS */ - bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_4XXX_BAR_MASK; + bar_mask = pci_select_bars(pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; ret = pcim_iomap_regions_request_all(pdev, bar_mask, pci_name(pdev)); if (ret) { @@ -418,6 +151,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_err; } + accel_dev->ras_errors.enabled = true; adf_dbgfs_init(accel_dev); ret = adf_dev_up(accel_dev, true); @@ -467,3 +201,4 @@ MODULE_FIRMWARE(ADF_4XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); MODULE_SOFTDEP("pre: crypto-intel_qat"); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c index 9c00c441b602d2d2a5b22e73be20dac1eca552fc..a882e0ea2279629dc19d55d444340202afd3aa17 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_c3xxx_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c index 468c9102093fce93303fe2cfeb3ed27df10a8598..956a4c85609a9504b8e73f23eed3c3f7add81b7a 100644 --- a/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxx/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C3XXX_FW); MODULE_FIRMWARE(ADF_C3XXX_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c index d5a0ecca9d0bba4929863448d479d52258d00a8d..a8de9cd09c05a2608ce2c1d918ba3e9edbbcfb12 100644 --- a/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c3xxxvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c index 355a781693eb3fbd7af8ef01ae432168a0459d64..48cf3eb7c73499f01dd56de7192b9586a199d67b 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_c62x_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c index 0186921be93689d041b0e0b7a88ef4437ae49907..ad0ca4384998524db6a4b1a89f3a3c94fb8b522b 100644 --- a/drivers/crypto/intel/qat/qat_c62x/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62x/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_C62X_FW); MODULE_FIRMWARE(ADF_C62X_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c index c9ae6c0d0dca2ec39b872de9d0e8e443c8605b3a..53b8ddb63364197278c945e257b473a81ff4913f 100644 --- a/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_c62xvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_common/Makefile b/drivers/crypto/intel/qat/qat_common/Makefile index 43622c7fca712c0266a845b75a1d1e9471b02d30..5915cde8a7aa4d72f45e5b233dd65dc588a912ac 100644 --- a/drivers/crypto/intel/qat/qat_common/Makefile +++ b/drivers/crypto/intel/qat/qat_common/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_CRYPTO_DEV_QAT) += intel_qat.o +ccflags-y += -DDEFAULT_SYMBOL_NAMESPACE=CRYPTO_QAT intel_qat-objs := adf_cfg.o \ adf_isr.o \ adf_ctl_drv.o \ + adf_cfg_services.o \ adf_dev_mgr.o \ adf_init.o \ adf_accel_engine.o \ @@ -11,12 +13,15 @@ intel_qat-objs := adf_cfg.o \ adf_admin.o \ adf_hw_arbiter.o \ adf_sysfs.o \ + adf_sysfs_ras_counters.o \ adf_gen2_hw_data.o \ adf_gen2_config.o \ + adf_gen4_config.o \ adf_gen4_hw_data.o \ adf_gen4_pm.o \ adf_gen2_dc.o \ adf_gen4_dc.o \ + adf_gen4_ras.o \ adf_gen4_timer.o \ adf_clock.o \ qat_crypto.o \ @@ -25,17 +30,28 @@ intel_qat-objs := adf_cfg.o \ qat_algs.o \ qat_asym_algs.o \ qat_algs_send.o \ + adf_rl.o \ + adf_rl_admin.o \ + adf_sysfs_rl.o \ qat_uclo.o \ qat_hal.o \ qat_bl.o intel_qat-$(CONFIG_DEBUG_FS) += adf_transport_debug.o \ adf_fw_counters.o \ + adf_cnv_dbgfs.o \ + adf_gen4_pm_debugfs.o \ + adf_gen4_tl.o \ adf_heartbeat.o \ adf_heartbeat_dbgfs.o \ + adf_pm_dbgfs.o \ + adf_telemetry.o \ + adf_tl_debugfs.o \ adf_dbgfs.o intel_qat-$(CONFIG_PCI_IOV) += adf_sriov.o adf_vf_isr.o adf_pfvf_utils.o \ adf_pfvf_pf_msg.o adf_pfvf_pf_proto.o \ adf_pfvf_vf_msg.o adf_pfvf_vf_proto.o \ adf_gen2_pfvf.o adf_gen4_pfvf.o + +intel_qat-$(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION) += adf_heartbeat_inject.o diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h index 79d5a1535eda34df0d5be747ff5eef8244e030d7..08658c3a01e9bcde41ecef655fbdab0e8291ac99 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_devices.h @@ -6,9 +6,14 @@ #include #include #include +#include #include +#include #include "adf_cfg_common.h" +#include "adf_rl.h" +#include "adf_telemetry.h" #include "adf_pfvf_msg.h" +#include "icp_qat_hw.h" #define ADF_DH895XCC_DEVICE_NAME "dh895xcc" #define ADF_DH895XCCVF_DEVICE_NAME "dh895xccvf" @@ -17,12 +22,15 @@ #define ADF_C3XXX_DEVICE_NAME "c3xxx" #define ADF_C3XXXVF_DEVICE_NAME "c3xxxvf" #define ADF_4XXX_DEVICE_NAME "4xxx" +#define ADF_420XX_DEVICE_NAME "420xx" #define ADF_4XXX_PCI_DEVICE_ID 0x4940 #define ADF_4XXXIOV_PCI_DEVICE_ID 0x4941 #define ADF_401XX_PCI_DEVICE_ID 0x4942 #define ADF_401XXIOV_PCI_DEVICE_ID 0x4943 #define ADF_402XX_PCI_DEVICE_ID 0x4944 #define ADF_402XXIOV_PCI_DEVICE_ID 0x4945 +#define ADF_420XX_PCI_DEVICE_ID 0x4946 +#define ADF_420XXIOV_PCI_DEVICE_ID 0x4947 #define ADF_DEVICE_FUSECTL_OFFSET 0x40 #define ADF_DEVICE_LEGFUSE_OFFSET 0x4C #define ADF_DEVICE_FUSECTL_MASK 0x80000000 @@ -81,6 +89,19 @@ enum dev_sku_info { DEV_SKU_UNKNOWN, }; +enum ras_errors { + ADF_RAS_CORR, + ADF_RAS_UNCORR, + ADF_RAS_FATAL, + ADF_RAS_ERRORS, +}; + +struct adf_error_counters { + atomic_t counter[ADF_RAS_ERRORS]; + bool sysfs_added; + bool enabled; +}; + static inline const char *get_sku_info(enum dev_sku_info info) { switch (info) { @@ -152,6 +173,13 @@ struct adf_accel_dev; struct adf_etr_data; struct adf_etr_ring_data; +struct adf_ras_ops { + void (*enable_ras_errors)(struct adf_accel_dev *accel_dev); + void (*disable_ras_errors)(struct adf_accel_dev *accel_dev); + bool (*handle_interrupt)(struct adf_accel_dev *accel_dev, + bool *reset_required); +}; + struct adf_pfvf_ops { int (*enable_comms)(struct adf_accel_dev *accel_dev); u32 (*get_pf2vf_offset)(u32 i); @@ -169,6 +197,16 @@ struct adf_dc_ops { void (*build_deflate_ctx)(void *ctx); }; +struct adf_dev_err_mask { + u32 cppagentcmdpar_mask; + u32 parerr_ath_cph_mask; + u32 parerr_cpr_xlt_mask; + u32 parerr_dcpr_ucs_mask; + u32 parerr_pke_mask; + u32 parerr_wat_wcp_mask; + u32 ssmfeatren_mask; +}; + struct adf_hw_device_data { struct adf_hw_device_class *dev_class; u32 (*get_accel_mask)(struct adf_hw_device_data *self); @@ -209,18 +247,26 @@ struct adf_hw_device_data { void (*reset_device)(struct adf_accel_dev *accel_dev); void (*set_msix_rttable)(struct adf_accel_dev *accel_dev); const char *(*uof_get_name)(struct adf_accel_dev *accel_dev, u32 obj_num); - u32 (*uof_get_num_objs)(void); + u32 (*uof_get_num_objs)(struct adf_accel_dev *accel_dev); + int (*uof_get_obj_type)(struct adf_accel_dev *accel_dev, u32 obj_num); u32 (*uof_get_ae_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); + int (*get_rp_group)(struct adf_accel_dev *accel_dev, u32 ae_mask); + u32 (*get_ena_thd_mask)(struct adf_accel_dev *accel_dev, u32 obj_num); int (*dev_config)(struct adf_accel_dev *accel_dev); struct adf_pfvf_ops pfvf_ops; struct adf_hw_csr_ops csr_ops; struct adf_dc_ops dc_ops; + struct adf_ras_ops ras_ops; + struct adf_dev_err_mask dev_err_mask; + struct adf_rl_hw_data rl_data; + struct adf_tl_hw_data tl_data; const char *fw_name; const char *fw_mmp_name; u32 fuses; u32 straps; u32 accel_capabilities_mask; u32 extended_dc_capabilities; + u16 fw_capabilities; u32 clock_frequency; u32 instance_id; u16 accel_mask; @@ -228,6 +274,7 @@ struct adf_hw_device_data { u32 admin_ae_mask; u16 tx_rings_mask; u16 ring_to_svc_map; + u32 thd_to_arb_map[ICP_QAT_HW_AE_DELIMITER]; u8 tx_rx_gap; u8 num_banks; u16 num_banks_per_vf; @@ -236,6 +283,7 @@ struct adf_hw_device_data { u8 num_logical_accel; u8 num_engines; u32 num_hb_ctrs; + u8 num_rps; }; /* CSR write macro */ @@ -263,10 +311,12 @@ struct adf_hw_device_data { #define GET_SRV_TYPE(accel_dev, idx) \ (((GET_HW_DATA(accel_dev)->ring_to_svc_map) >> (ADF_SRV_TYPE_BIT_LEN * (idx))) \ & ADF_SRV_TYPE_MASK) +#define GET_ERR_MASK(accel_dev) (&GET_HW_DATA(accel_dev)->dev_err_mask) #define GET_MAX_ACCELENGINES(accel_dev) (GET_HW_DATA(accel_dev)->num_engines) #define GET_CSR_OPS(accel_dev) (&(accel_dev)->hw_device->csr_ops) #define GET_PFVF_OPS(accel_dev) (&(accel_dev)->hw_device->pfvf_ops) #define GET_DC_OPS(accel_dev) (&(accel_dev)->hw_device->dc_ops) +#define GET_TL_DATA(accel_dev) GET_HW_DATA(accel_dev)->tl_data #define accel_to_pci_dev(accel_ptr) accel_ptr->accel_pci_dev.pci_dev struct adf_admin_comms; @@ -283,6 +333,7 @@ struct adf_accel_vf_info { struct ratelimit_state vf2pf_ratelimit; u32 vf_nr; bool init; + bool restarting; u8 vf_compat_ver; }; @@ -292,24 +343,46 @@ struct adf_dc_data { dma_addr_t ovf_buff_p; }; +struct adf_pm { + struct dentry *debugfs_pm_status; + bool present; + int idle_irq_counters; + int throttle_irq_counters; + int fw_irq_counters; + int host_ack_counter; + int host_nack_counter; + ssize_t (*print_pm_status)(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, loff_t *pos); +}; + +struct adf_sysfs { + int ring_num; + struct rw_semaphore lock; /* protects access to the fields in this struct */ +}; + struct adf_accel_dev { struct adf_etr_data *transport; struct adf_hw_device_data *hw_device; struct adf_cfg_device_data *cfg; struct adf_fw_loader_data *fw_loader; struct adf_admin_comms *admin; + struct adf_telemetry *telemetry; struct adf_dc_data *dc_data; + struct adf_pm power_management; struct list_head crypto_list; struct list_head compression_list; unsigned long status; atomic_t ref_count; struct dentry *debugfs_dir; struct dentry *fw_cntr_dbgfile; + struct dentry *cnv_dbgfile; struct list_head list; struct module *owner; struct adf_accel_pci accel_pci_dev; struct adf_timer *timer; struct adf_heartbeat *heartbeat; + struct adf_rl *rate_limiting; + struct adf_sysfs sysfs; union { struct { /* protects VF2PF interrupts access */ @@ -327,8 +400,10 @@ struct adf_accel_dev { u8 pf_compat_ver; } vf; }; + struct adf_error_counters ras_errors; struct mutex state_lock; /* protect state of the device */ bool is_vf; + bool autoreset_on_error; u32 accel_id; }; #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c index 6be064dc64c8e8daa769baf9c28d0d453eb4d749..4b5d0350fc2ef1bd92d307f0b43a98ae2a45a8ce 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c +++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c @@ -19,7 +19,7 @@ static int adf_ae_fw_load_images(struct adf_accel_dev *accel_dev, void *fw_addr, int i; loader = loader_data->fw_loader; - num_objs = hw_device->uof_get_num_objs(); + num_objs = hw_device->uof_get_num_objs(accel_dev); for (i = 0; i < num_objs; i++) { obj_name = hw_device->uof_get_name(accel_dev, i); diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.c b/drivers/crypto/intel/qat/qat_common/adf_admin.c index 194d64d4b99a1b4ac350df8fee3c19064e835cee..acad526eb741683b350665333d3a047f5d252613 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_admin.c +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.c @@ -7,6 +7,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_cfg.h" #include "adf_heartbeat.h" @@ -309,6 +310,73 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) return !strcmp(services, "dcc"); } +static int adf_get_fw_capabilities(struct adf_accel_dev *accel_dev, u16 *caps) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + if (!ae_mask) + return 0; + + req.cmd_id = ICP_QAT_FW_CAPABILITIES_GET; + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + *caps = resp.fw_capabilities; + + return 0; +} + +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_RL_INIT; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slices, &resp.slices, sizeof(*slices)); + + return 0; +} + +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + + /* + * req struct filled in rl implementation. Used commands + * ICP_QAT_FW_RL_ADD for a new SLA + * ICP_QAT_FW_RL_UPDATE for update SLA + */ + return adf_send_admin(accel_dev, req, &resp, ae_mask); +} + +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + u32 ae_mask = accel_dev->hw_device->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + + req.cmd_id = ICP_QAT_FW_RL_REMOVE; + req.node_id = node_id; + req.node_type = node_type; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + /** * adf_send_admin_init() - Function sends init message to FW * @accel_dev: Pointer to acceleration device. @@ -319,6 +387,7 @@ static bool is_dcc_enabled(struct adf_accel_dev *accel_dev) */ int adf_send_admin_init(struct adf_accel_dev *accel_dev) { + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); u32 dc_capabilities = 0; int ret; @@ -339,6 +408,8 @@ int adf_send_admin_init(struct adf_accel_dev *accel_dev) } accel_dev->hw_device->extended_dc_capabilities = dc_capabilities; + adf_get_fw_capabilities(accel_dev, &hw_data->fw_capabilities); + return adf_init_ae(accel_dev); } EXPORT_SYMBOL_GPL(adf_send_admin_init); @@ -379,6 +450,91 @@ int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay) return adf_send_admin(accel_dev, &req, &resp, ae_mask); } +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, + size_t buff_size) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + u32 ae_mask = hw_data->admin_ae_mask; + int ret; + + /* Query pm info via init/admin cmd */ + if (!accel_dev->admin) { + dev_err(&GET_DEV(accel_dev), "adf_admin is not available\n"); + return -EFAULT; + } + + req.cmd_id = ICP_QAT_FW_PM_INFO; + req.init_cfg_sz = buff_size; + req.init_cfg_ptr = p_state_addr; + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to query power-management info\n"); + + return ret; +} + +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, + u16 *latest_err) +{ + struct icp_qat_fw_init_admin_req req = { }; + struct icp_qat_fw_init_admin_resp resp; + int ret; + + req.cmd_id = ICP_QAT_FW_CNV_STATS_GET; + + ret = adf_put_admin_msg_sync(accel_dev, ae, &req, &resp); + if (ret) + return ret; + if (resp.status) + return -EPROTONOSUPPORT; + + *err_cnt = resp.error_count; + *latest_err = resp.latest_error; + + return ret; +} + +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->admin_ae_mask; + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + int ret; + + req.cmd_id = ICP_QAT_FW_TL_START; + req.init_cfg_ptr = tl_dma_addr; + req.init_cfg_sz = layout_sz; + + if (rp_indexes) + memcpy(&req.rp_indexes, rp_indexes, sizeof(req.rp_indexes)); + + ret = adf_send_admin(accel_dev, &req, &resp, ae_mask); + if (ret) + return ret; + + memcpy(slice_count, &resp.slices, sizeof(*slice_count)); + + return 0; +} + +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct icp_qat_fw_init_admin_resp resp = { }; + struct icp_qat_fw_init_admin_req req = { }; + u32 ae_mask = hw_data->admin_ae_mask; + + req.cmd_id = ICP_QAT_FW_TL_STOP; + + return adf_send_admin(accel_dev, &req, &resp, ae_mask); +} + int adf_init_admin_comms(struct adf_accel_dev *accel_dev) { struct adf_admin_comms *admin; diff --git a/drivers/crypto/intel/qat/qat_common/adf_admin.h b/drivers/crypto/intel/qat/qat_common/adf_admin.h new file mode 100644 index 0000000000000000000000000000000000000000..647c8e196752104f009bceeda30e5ae71b79b96e --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_admin.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_ADMIN +#define ADF_ADMIN + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; + +int adf_init_admin_comms(struct adf_accel_dev *accel_dev); +void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); +int adf_send_admin_init(struct adf_accel_dev *accel_dev); +int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); +int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); +int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); +int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); +int adf_send_admin_rl_init(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_slice_cnt *slices); +int adf_send_admin_rl_add_update(struct adf_accel_dev *accel_dev, + struct icp_qat_fw_init_admin_req *req); +int adf_send_admin_rl_delete(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); +int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); +int adf_get_pm_info(struct adf_accel_dev *accel_dev, dma_addr_t p_state_addr, size_t buff_size); +int adf_get_cnv_stats(struct adf_accel_dev *accel_dev, u16 ae, u16 *err_cnt, u16 *latest_err); +int adf_send_admin_tl_start(struct adf_accel_dev *accel_dev, + dma_addr_t tl_dma_addr, size_t layout_sz, u8 *rp_indexes, + struct icp_qat_fw_init_admin_slice_cnt *slice_count); +int adf_send_admin_tl_stop(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_aer.c b/drivers/crypto/intel/qat/qat_common/adf_aer.c index 04af32a2811c8f77b438a375fac184f63de48ed1..9da2278bd5b7dc594076478abf5387ed7e7ddbe0 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_aer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_aer.c @@ -7,8 +7,15 @@ #include #include "adf_accel_devices.h" #include "adf_common_drv.h" +#include "adf_pfvf_pf_msg.h" + +struct adf_fatal_error_data { + struct adf_accel_dev *accel_dev; + struct work_struct work; +}; static struct workqueue_struct *device_reset_wq; +static struct workqueue_struct *device_sriov_wq; static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, pci_channel_state_t state) @@ -26,6 +33,19 @@ static pci_ers_result_t adf_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_DISCONNECT; } + set_bit(ADF_STATUS_RESTARTING, &accel_dev->status); + if (accel_dev->hw_device->exit_arb) { + dev_dbg(&pdev->dev, "Disabling arbitration\n"); + accel_dev->hw_device->exit_arb(accel_dev); + } + adf_error_notifier(accel_dev); + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_restarting_notify(accel_dev); + adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); + pci_clear_master(pdev); + adf_dev_down(accel_dev, false); + return PCI_ERS_RESULT_NEED_RESET; } @@ -37,6 +57,13 @@ struct adf_reset_dev_data { struct work_struct reset_work; }; +/* sriov dev data */ +struct adf_sriov_dev_data { + struct adf_accel_dev *accel_dev; + struct completion compl; + struct work_struct sriov_work; +}; + void adf_reset_sbr(struct adf_accel_dev *accel_dev) { struct pci_dev *pdev = accel_to_pci_dev(accel_dev); @@ -82,28 +109,57 @@ void adf_dev_restore(struct adf_accel_dev *accel_dev) } } +static void adf_device_sriov_worker(struct work_struct *work) +{ + struct adf_sriov_dev_data *sriov_data = + container_of(work, struct adf_sriov_dev_data, sriov_work); + + adf_reenable_sriov(sriov_data->accel_dev); + complete(&sriov_data->compl); +} + static void adf_device_reset_worker(struct work_struct *work) { struct adf_reset_dev_data *reset_data = container_of(work, struct adf_reset_dev_data, reset_work); struct adf_accel_dev *accel_dev = reset_data->accel_dev; + unsigned long wait_jiffies = msecs_to_jiffies(10000); + struct adf_sriov_dev_data sriov_data; adf_dev_restarting_notify(accel_dev); if (adf_dev_restart(accel_dev)) { /* The device hanged and we can't restart it so stop here */ dev_err(&GET_DEV(accel_dev), "Restart device failed\n"); - kfree(reset_data); + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) + kfree(reset_data); WARN(1, "QAT: device restart failed. Device is unusable\n"); return; } + + sriov_data.accel_dev = accel_dev; + init_completion(&sriov_data.compl); + INIT_WORK(&sriov_data.sriov_work, adf_device_sriov_worker); + queue_work(device_sriov_wq, &sriov_data.sriov_work); + if (wait_for_completion_timeout(&sriov_data.compl, wait_jiffies)) + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); - /* The dev is back alive. Notify the caller if in sync mode */ - if (reset_data->mode == ADF_DEV_RESET_SYNC) - complete(&reset_data->compl); - else + /* + * The dev is back alive. Notify the caller if in sync mode + * + * If device restart will take a more time than expected, + * the schedule_reset() function can timeout and exit. This can be + * detected by calling the completion_done() function. In this case + * the reset_data structure needs to be freed here. + */ + if (reset_data->mode == ADF_DEV_RESET_ASYNC || + completion_done(&reset_data->compl)) kfree(reset_data); + else + complete(&reset_data->compl); } static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, @@ -136,8 +192,9 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, dev_err(&GET_DEV(accel_dev), "Reset device timeout expired\n"); ret = -EFAULT; + } else { + kfree(reset_data); } - kfree(reset_data); return ret; } return 0; @@ -146,14 +203,25 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev, static pci_ers_result_t adf_slot_reset(struct pci_dev *pdev) { struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev); + int res = 0; if (!accel_dev) { pr_err("QAT: Can't find acceleration device\n"); return PCI_ERS_RESULT_DISCONNECT; } - if (adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_SYNC)) + + if (!pdev->is_busmaster) + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + res = adf_dev_up(accel_dev, false); + if (res && res != -EALREADY) return PCI_ERS_RESULT_DISCONNECT; + adf_reenable_sriov(accel_dev); + adf_pf2vf_notify_restarted(accel_dev); + adf_dev_restarted_notify(accel_dev); + clear_bit(ADF_STATUS_RESTARTING, &accel_dev->status); return PCI_ERS_RESULT_RECOVERED; } @@ -170,11 +238,62 @@ const struct pci_error_handlers adf_err_handler = { }; EXPORT_SYMBOL_GPL(adf_err_handler); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev) +{ + if (accel_dev->autoreset_on_error) + return adf_dev_aer_schedule_reset(accel_dev, ADF_DEV_RESET_ASYNC); + + return 0; +} + +static void adf_notify_fatal_error_worker(struct work_struct *work) +{ + struct adf_fatal_error_data *wq_data = + container_of(work, struct adf_fatal_error_data, work); + struct adf_accel_dev *accel_dev = wq_data->accel_dev; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + + adf_error_notifier(accel_dev); + + if (!accel_dev->is_vf) { + /* Disable arbitration to stop processing of new requests */ + if (accel_dev->autoreset_on_error && hw_device->exit_arb) + hw_device->exit_arb(accel_dev); + if (accel_dev->pf.vf_info) + adf_pf2vf_notify_fatal_error(accel_dev); + adf_dev_autoreset(accel_dev); + } + + kfree(wq_data); +} + +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct adf_fatal_error_data *wq_data; + + wq_data = kzalloc(sizeof(*wq_data), GFP_ATOMIC); + if (!wq_data) + return -ENOMEM; + + wq_data->accel_dev = accel_dev; + INIT_WORK(&wq_data->work, adf_notify_fatal_error_worker); + adf_misc_wq_queue_work(&wq_data->work); + + return 0; +} + int adf_init_aer(void) { device_reset_wq = alloc_workqueue("qat_device_reset_wq", WQ_MEM_RECLAIM, 0); - return !device_reset_wq ? -EFAULT : 0; + if (!device_reset_wq) + return -EFAULT; + + device_sriov_wq = alloc_workqueue("qat_device_sriov_wq", 0, 0); + if (!device_sriov_wq) + return -EFAULT; + + return 0; } void adf_exit_aer(void) @@ -182,4 +301,8 @@ void adf_exit_aer(void) if (device_reset_wq) destroy_workqueue(device_reset_wq); device_reset_wq = NULL; + + if (device_sriov_wq) + destroy_workqueue(device_sriov_wq); + device_sriov_wq = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h index 6e5de1dab97b4f3402fee40d62dbfc1ba9f1443f..89df3888d7eac7791c6b2ce47f9e1c8387e370cb 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_common.h @@ -47,6 +47,7 @@ enum adf_device_type { DEV_C3XXX, DEV_C3XXXVF, DEV_4XXX, + DEV_420XX, }; struct adf_dev_status_info { diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c new file mode 100644 index 0000000000000000000000000000000000000000..268052294468437ff0ce8c2775f86926b1d92262 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.c @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include +#include "adf_cfg.h" +#include "adf_cfg_services.h" +#include "adf_cfg_strings.h" + +const char *const adf_cfg_services[] = { + [SVC_CY] = ADF_CFG_CY, + [SVC_CY2] = ADF_CFG_ASYM_SYM, + [SVC_DC] = ADF_CFG_DC, + [SVC_DCC] = ADF_CFG_DCC, + [SVC_SYM] = ADF_CFG_SYM, + [SVC_ASYM] = ADF_CFG_ASYM, + [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, + [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, + [SVC_DC_SYM] = ADF_CFG_DC_SYM, + [SVC_SYM_DC] = ADF_CFG_SYM_DC, +}; +EXPORT_SYMBOL_GPL(adf_cfg_services); + +int adf_get_service_enabled(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) { + dev_err(&GET_DEV(accel_dev), + ADF_SERVICES_ENABLED " param not found\n"); + return ret; + } + + ret = match_string(adf_cfg_services, ARRAY_SIZE(adf_cfg_services), + services); + if (ret < 0) + dev_err(&GET_DEV(accel_dev), + "Invalid value of " ADF_SERVICES_ENABLED " param: %s\n", + services); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_get_service_enabled); diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h index b353d40c5c6d0ac60ab5a647d0759fbb85512296..c6b0328b0f5b0375dceb8f3383d7502c42991465 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_services.h @@ -5,6 +5,8 @@ #include "adf_cfg_strings.h" +struct adf_accel_dev; + enum adf_services { SVC_CY = 0, SVC_CY2, @@ -16,19 +18,11 @@ enum adf_services { SVC_ASYM_DC, SVC_DC_SYM, SVC_SYM_DC, + SVC_COUNT }; -static const char *const adf_cfg_services[] = { - [SVC_CY] = ADF_CFG_CY, - [SVC_CY2] = ADF_CFG_ASYM_SYM, - [SVC_DC] = ADF_CFG_DC, - [SVC_DCC] = ADF_CFG_DCC, - [SVC_SYM] = ADF_CFG_SYM, - [SVC_ASYM] = ADF_CFG_ASYM, - [SVC_DC_ASYM] = ADF_CFG_DC_ASYM, - [SVC_ASYM_DC] = ADF_CFG_ASYM_DC, - [SVC_DC_SYM] = ADF_CFG_DC_SYM, - [SVC_SYM_DC] = ADF_CFG_SYM_DC, -}; +extern const char *const adf_cfg_services[SVC_COUNT]; + +int adf_get_service_enabled(struct adf_accel_dev *accel_dev); #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h index 322b76903a737d4e0fce0371a355f6a1f17fb0b0..e015ad6cace2b22afae87fdeda773fa260dcd6ba 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h +++ b/drivers/crypto/intel/qat/qat_common/adf_cfg_strings.h @@ -49,5 +49,6 @@ ADF_ETRMGR_BANK "%d" ADF_ETRMGR_CORE_AFFINITY #define ADF_ACCEL_STR "Accelerator%d" #define ADF_HEARTBEAT_TIMER "HeartbeatTimer" +#define ADF_SRIOV_ENABLED "SriovEnabled" #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_clock.c b/drivers/crypto/intel/qat/qat_common/adf_clock.c index dc0778691eb0ba9b7e4767c0da18c78ea921217a..cf89f57de2a7021494faefc006ca4c4eda038e2b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_clock.c +++ b/drivers/crypto/intel/qat/qat_common/adf_clock.c @@ -10,6 +10,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_clock.h" #include "adf_common_drv.h" @@ -82,6 +83,9 @@ static int measure_clock(struct adf_accel_dev *accel_dev, u32 *frequency) } delta_us = timespec_to_us(&ts3) - timespec_to_us(&ts1); + if (!delta_us) + return -EINVAL; + temp = (timestamp2 - timestamp1) * ME_CLK_DIVIDER * 10; temp = DIV_ROUND_CLOSEST_ULL(temp, delta_us); /* diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c new file mode 100644 index 0000000000000000000000000000000000000000..627953a72d4784c82b53c9ddc7d8e64002a9f524 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.c @@ -0,0 +1,299 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" +#include "qat_compression.h" + +#define CNV_DEBUGFS_FILENAME "cnv_errors" +#define CNV_MIN_PADDING 16 + +#define CNV_ERR_INFO_MASK GENMASK(11, 0) +#define CNV_ERR_TYPE_MASK GENMASK(15, 12) +#define CNV_SLICE_ERR_SIGN_BIT_INDEX 7 +#define CNV_DELTA_ERR_SIGN_BIT_INDEX 11 + +enum cnv_error_type { + CNV_ERR_TYPE_NONE, + CNV_ERR_TYPE_CHECKSUM, + CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH, + CNV_ERR_TYPE_DECOMPRESSION, + CNV_ERR_TYPE_TRANSLATION, + CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH, + CNV_ERR_TYPE_UNKNOWN, + CNV_ERR_TYPES_COUNT +}; + +#define CNV_ERROR_TYPE_GET(latest_err) \ + min_t(u16, u16_get_bits(latest_err, CNV_ERR_TYPE_MASK), CNV_ERR_TYPE_UNKNOWN) + +#define CNV_GET_DELTA_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_DELTA_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_SLICE_ERR_INFO(latest_error) \ + sign_extend32(latest_error, CNV_SLICE_ERR_SIGN_BIT_INDEX) + +#define CNV_GET_DEFAULT_ERR_INFO(latest_error) \ + u16_get_bits(latest_error, CNV_ERR_INFO_MASK) + +enum cnv_fields { + CNV_ERR_COUNT, + CNV_LATEST_ERR, + CNV_FIELDS_COUNT +}; + +static const char * const cnv_field_names[CNV_FIELDS_COUNT] = { + [CNV_ERR_COUNT] = "Total Errors", + [CNV_LATEST_ERR] = "Last Error", +}; + +static const char * const cnv_error_names[CNV_ERR_TYPES_COUNT] = { + [CNV_ERR_TYPE_NONE] = "No Error", + [CNV_ERR_TYPE_CHECKSUM] = "Checksum Error", + [CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH] = "Length Error-P", + [CNV_ERR_TYPE_DECOMPRESSION] = "Decomp Error", + [CNV_ERR_TYPE_TRANSLATION] = "Xlat Error", + [CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH] = "Length Error-C", + [CNV_ERR_TYPE_UNKNOWN] = "Unknown Error", +}; + +struct ae_cnv_errors { + u16 ae; + u16 err_cnt; + u16 latest_err; + bool is_comp_ae; +}; + +struct cnv_err_stats { + u16 ae_count; + struct ae_cnv_errors ae_cnv_errors[]; +}; + +static s16 get_err_info(u8 error_type, u16 latest) +{ + switch (error_type) { + case CNV_ERR_TYPE_DECOMP_PRODUCED_LENGTH: + case CNV_ERR_TYPE_DECOMP_CONSUMED_LENGTH: + return CNV_GET_DELTA_ERR_INFO(latest); + case CNV_ERR_TYPE_DECOMPRESSION: + case CNV_ERR_TYPE_TRANSLATION: + return CNV_GET_SLICE_ERR_INFO(latest); + default: + return CNV_GET_DEFAULT_ERR_INFO(latest); + } +} + +static void *qat_cnv_errors_seq_start(struct seq_file *sfile, loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + if (*pos == 0) + return SEQ_START_TOKEN; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void *qat_cnv_errors_seq_next(struct seq_file *sfile, void *v, + loff_t *pos) +{ + struct cnv_err_stats *err_stats = sfile->private; + + (*pos)++; + + if (*pos > err_stats->ae_count) + return NULL; + + return &err_stats->ae_cnv_errors[*pos - 1]; +} + +static void qat_cnv_errors_seq_stop(struct seq_file *sfile, void *v) +{ +} + +static int qat_cnv_errors_seq_show(struct seq_file *sfile, void *v) +{ + struct ae_cnv_errors *ae_errors; + unsigned int i; + s16 err_info; + u8 err_type; + + if (v == SEQ_START_TOKEN) { + seq_puts(sfile, "AE "); + for (i = 0; i < CNV_FIELDS_COUNT; ++i) + seq_printf(sfile, " %*s", CNV_MIN_PADDING, + cnv_field_names[i]); + } else { + ae_errors = v; + + if (!ae_errors->is_comp_ae) + return 0; + + err_type = CNV_ERROR_TYPE_GET(ae_errors->latest_err); + err_info = get_err_info(err_type, ae_errors->latest_err); + + seq_printf(sfile, "%d:", ae_errors->ae); + seq_printf(sfile, " %*d", CNV_MIN_PADDING, ae_errors->err_cnt); + seq_printf(sfile, "%*s [%d]", CNV_MIN_PADDING, + cnv_error_names[err_type], err_info); + } + seq_putc(sfile, '\n'); + + return 0; +} + +static const struct seq_operations qat_cnv_errors_sops = { + .start = qat_cnv_errors_seq_start, + .next = qat_cnv_errors_seq_next, + .stop = qat_cnv_errors_seq_stop, + .show = qat_cnv_errors_seq_show, +}; + +/** + * cnv_err_stats_alloc() - Get CNV stats for the provided device. + * @accel_dev: Pointer to a QAT acceleration device + * + * Allocates and populates table of CNV errors statistics for each non-admin AE + * available through the supplied acceleration device. The caller becomes the + * owner of such memory and is responsible for the deallocation through a call + * to kfree(). + * + * Returns: a pointer to a dynamically allocated struct cnv_err_stats on success + * or a negative value on error. + */ +static struct cnv_err_stats *cnv_err_stats_alloc(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct cnv_err_stats *err_stats; + unsigned long ae_count; + unsigned long ae_mask; + size_t err_stats_size; + unsigned long ae; + unsigned int i; + u16 latest_err; + u16 err_cnt; + int ret; + + if (!adf_dev_started(accel_dev)) { + dev_err(&GET_DEV(accel_dev), "QAT Device not started\n"); + return ERR_PTR(-EBUSY); + } + + /* Ignore the admin AEs */ + ae_mask = hw_data->ae_mask & ~hw_data->admin_ae_mask; + ae_count = hweight_long(ae_mask); + if (unlikely(!ae_count)) + return ERR_PTR(-EINVAL); + + err_stats_size = struct_size(err_stats, ae_cnv_errors, ae_count); + err_stats = kmalloc(err_stats_size, GFP_KERNEL); + if (!err_stats) + return ERR_PTR(-ENOMEM); + + err_stats->ae_count = ae_count; + + i = 0; + for_each_set_bit(ae, &ae_mask, GET_MAX_ACCELENGINES(accel_dev)) { + ret = adf_get_cnv_stats(accel_dev, ae, &err_cnt, &latest_err); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "Failed to get CNV stats for ae %ld, [%d].\n", + ae, ret); + err_stats->ae_cnv_errors[i++].is_comp_ae = false; + continue; + } + err_stats->ae_cnv_errors[i].is_comp_ae = true; + err_stats->ae_cnv_errors[i].latest_err = latest_err; + err_stats->ae_cnv_errors[i].err_cnt = err_cnt; + err_stats->ae_cnv_errors[i].ae = ae; + i++; + } + + return err_stats; +} + +static int qat_cnv_errors_file_open(struct inode *inode, struct file *file) +{ + struct adf_accel_dev *accel_dev = inode->i_private; + struct seq_file *cnv_errors_seq_file; + struct cnv_err_stats *cnv_err_stats; + int ret; + + cnv_err_stats = cnv_err_stats_alloc(accel_dev); + if (IS_ERR(cnv_err_stats)) + return PTR_ERR(cnv_err_stats); + + ret = seq_open(file, &qat_cnv_errors_sops); + if (unlikely(ret)) { + kfree(cnv_err_stats); + return ret; + } + + cnv_errors_seq_file = file->private_data; + cnv_errors_seq_file->private = cnv_err_stats; + return ret; +} + +static int qat_cnv_errors_file_release(struct inode *inode, struct file *file) +{ + struct seq_file *cnv_errors_seq_file = file->private_data; + + kfree(cnv_errors_seq_file->private); + cnv_errors_seq_file->private = NULL; + + return seq_release(inode, file); +} + +static const struct file_operations qat_cnv_fops = { + .owner = THIS_MODULE, + .open = qat_cnv_errors_file_open, + .read = seq_read, + .llseek = seq_lseek, + .release = qat_cnv_errors_file_release, +}; + +static ssize_t no_comp_file_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + char *file_msg = "No engine configured for comp\n"; + + return simple_read_from_buffer(buf, count, pos, file_msg, + strlen(file_msg)); +} + +static const struct file_operations qat_cnv_no_comp_fops = { + .owner = THIS_MODULE, + .read = no_comp_file_read, +}; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + const struct file_operations *fops; + void *data; + + if (adf_hw_dev_has_compression(accel_dev)) { + fops = &qat_cnv_fops; + data = accel_dev; + } else { + fops = &qat_cnv_no_comp_fops; + data = NULL; + } + + accel_dev->cnv_dbgfile = debugfs_create_file(CNV_DEBUGFS_FILENAME, 0400, + accel_dev->debugfs_dir, + data, fops); +} + +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + debugfs_remove(accel_dev->cnv_dbgfile); + accel_dev->cnv_dbgfile = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h new file mode 100644 index 0000000000000000000000000000000000000000..b02b0961c43308a1e73b4220c0f0bcc7da0dd1ec --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_cnv_dbgfs.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_CNV_DBG_H +#define ADF_CNV_DBG_H + +struct adf_accel_dev; + +void adf_cnv_dbgfs_add(struct adf_accel_dev *accel_dev); +void adf_cnv_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h index 79ff7982378d9fef9d48ca1d37f3d9bc85de7bd7..57328249c89e7a6f4ae7c7754bf2bbb218c6f651 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_common_drv.h +++ b/drivers/crypto/intel/qat/qat_common/adf_common_drv.h @@ -40,6 +40,7 @@ enum adf_event { ADF_EVENT_SHUTDOWN, ADF_EVENT_RESTARTING, ADF_EVENT_RESTARTED, + ADF_EVENT_FATAL_ERROR, }; struct service_hndl { @@ -60,6 +61,8 @@ int adf_dev_restart(struct adf_accel_dev *accel_dev); void adf_devmgr_update_class_index(struct adf_hw_device_data *hw_data); void adf_clean_vf_map(bool); +int adf_notify_fatal_error(struct adf_accel_dev *accel_dev); +void adf_error_notifier(struct adf_accel_dev *accel_dev); int adf_devmgr_add_dev(struct adf_accel_dev *accel_dev, struct adf_accel_dev *pf); void adf_devmgr_rm_dev(struct adf_accel_dev *accel_dev, @@ -84,20 +87,14 @@ int adf_ae_stop(struct adf_accel_dev *accel_dev); extern const struct pci_error_handlers adf_err_handler; void adf_reset_sbr(struct adf_accel_dev *accel_dev); void adf_reset_flr(struct adf_accel_dev *accel_dev); +int adf_dev_autoreset(struct adf_accel_dev *accel_dev); void adf_dev_restore(struct adf_accel_dev *accel_dev); int adf_init_aer(void); void adf_exit_aer(void); -int adf_init_admin_comms(struct adf_accel_dev *accel_dev); -void adf_exit_admin_comms(struct adf_accel_dev *accel_dev); -int adf_send_admin_init(struct adf_accel_dev *accel_dev); -int adf_get_ae_fw_counters(struct adf_accel_dev *accel_dev, u16 ae, u64 *reqs, u64 *resps); -int adf_init_admin_pm(struct adf_accel_dev *accel_dev, u32 idle_delay); -int adf_send_admin_tim_sync(struct adf_accel_dev *accel_dev, u32 cnt); -int adf_send_admin_hb_timer(struct adf_accel_dev *accel_dev, uint32_t ticks); -int adf_get_fw_timestamp(struct adf_accel_dev *accel_dev, u64 *timestamp); int adf_init_arb(struct adf_accel_dev *accel_dev); void adf_exit_arb(struct adf_accel_dev *accel_dev); void adf_update_ring_arb(struct adf_etr_ring_data *ring); +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr); int adf_dev_get(struct adf_accel_dev *accel_dev); void adf_dev_put(struct adf_accel_dev *accel_dev); @@ -196,6 +193,7 @@ bool adf_misc_wq_queue_delayed_work(struct delayed_work *work, #if defined(CONFIG_PCI_IOV) int adf_sriov_configure(struct pci_dev *pdev, int numvfs); void adf_disable_sriov(struct adf_accel_dev *accel_dev); +void adf_reenable_sriov(struct adf_accel_dev *accel_dev); void adf_enable_vf2pf_interrupts(struct adf_accel_dev *accel_dev, u32 vf_mask); void adf_disable_all_vf2pf_interrupts(struct adf_accel_dev *accel_dev); bool adf_recv_and_handle_pf2vf_msg(struct adf_accel_dev *accel_dev); @@ -216,6 +214,10 @@ static inline void adf_disable_sriov(struct adf_accel_dev *accel_dev) { } +static inline void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ +} + static inline int adf_init_pf_wq(void) { return 0; @@ -246,4 +248,14 @@ static inline void __iomem *adf_get_pmisc_base(struct adf_accel_dev *accel_dev) return pmisc->virt_addr; } +static inline void __iomem *adf_get_aram_base(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + struct adf_bar *param; + + param = &GET_BARS(accel_dev)[hw_data->get_sram_bar_id(hw_data)]; + + return param->virt_addr; +} + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c index 04845f8d72be6fee817e73232e1e69b73af4525b..c42f5c25aabdfad04e214209268549035e7fe11c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dbgfs.c @@ -5,9 +5,12 @@ #include "adf_accel_devices.h" #include "adf_cfg.h" #include "adf_common_drv.h" +#include "adf_cnv_dbgfs.h" #include "adf_dbgfs.h" #include "adf_fw_counters.h" #include "adf_heartbeat_dbgfs.h" +#include "adf_pm_dbgfs.h" +#include "adf_tl_debugfs.h" /** * adf_dbgfs_init() - add persistent debugfs entries @@ -62,6 +65,9 @@ void adf_dbgfs_add(struct adf_accel_dev *accel_dev) if (!accel_dev->is_vf) { adf_fw_counters_dbgfs_add(accel_dev); adf_heartbeat_dbgfs_add(accel_dev); + adf_pm_dbgfs_add(accel_dev); + adf_cnv_dbgfs_add(accel_dev); + adf_tl_dbgfs_add(accel_dev); } } @@ -75,6 +81,9 @@ void adf_dbgfs_rm(struct adf_accel_dev *accel_dev) return; if (!accel_dev->is_vf) { + adf_tl_dbgfs_rm(accel_dev); + adf_cnv_dbgfs_rm(accel_dev); + adf_pm_dbgfs_rm(accel_dev); adf_heartbeat_dbgfs_rm(accel_dev); adf_fw_counters_dbgfs_rm(accel_dev); } diff --git a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c index 86ee36feefad34692fa5e3d1c3a935c510a6364b..f07b748795f7b79af65f2979eb6bfd2c4c39546b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_dev_mgr.c @@ -60,10 +60,10 @@ static int adf_get_vf_real_id(u32 fake) /** * adf_clean_vf_map() - Cleans VF id mapings - * - * Function cleans internal ids for virtual functions. * @vf: flag indicating whether mappings is cleaned * for vfs only or for vfs and pfs + * + * Function cleans internal ids for virtual functions. */ void adf_clean_vf_map(bool vf) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_config.h b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h new file mode 100644 index 0000000000000000000000000000000000000000..4f86696800c97f55de8579b9c3cabf7a4d943b65 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_config.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_FW_CONFIG_H_ +#define ADF_FW_CONFIG_H_ + +enum adf_fw_objs { + ADF_FW_SYM_OBJ, + ADF_FW_ASYM_OBJ, + ADF_FW_DC_OBJ, + ADF_FW_ADMIN_OBJ, +}; + +struct adf_fw_config { + u32 ae_mask; + enum adf_fw_objs obj; +}; + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c index cb6e09ef5c9ff92241f426586d6e6ea0ac900bd5..98fb7ccfed9fc30ab3dbbef17838eacaaf78cce3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c +++ b/drivers/crypto/intel/qat/qat_common/adf_fw_counters.c @@ -9,6 +9,7 @@ #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_fw_counters.h" @@ -34,7 +35,7 @@ struct adf_ae_counters { struct adf_fw_counters { u16 ae_count; - struct adf_ae_counters ae_counters[]; + struct adf_ae_counters ae_counters[] __counted_by(ae_count); }; static void adf_fw_counters_parse_ae_values(struct adf_ae_counters *ae_counters, u32 ae, diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c new file mode 100644 index 0000000000000000000000000000000000000000..fe1f3d727dc5a54ee7853c18b889eaa91fb88b2c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.c @@ -0,0 +1,287 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_accel_devices.h" +#include "adf_cfg.h" +#include "adf_cfg_services.h" +#include "adf_cfg_strings.h" +#include "adf_common_drv.h" +#include "adf_gen4_config.h" +#include "adf_heartbeat.h" +#include "adf_transport_access_macros.h" +#include "qat_compression.h" +#include "qat_crypto.h" + +static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long bank, val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_crypto(accel_dev)) + instances = min(cpus, banks / 2); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + bank = i * 2; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + bank += 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &bank, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY, + i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i); + val = 128; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for crypto\n"); + return ret; +} + +static int adf_comp_dev_config(struct adf_accel_dev *accel_dev) +{ + char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES]; + int banks = GET_MAX_BANKS(accel_dev); + int cpus = num_online_cpus(); + unsigned long val; + int instances; + int ret; + int i; + + if (adf_hw_dev_has_compression(accel_dev)) + instances = min(cpus, banks); + else + instances = 0; + + for (i = 0; i < instances; i++) { + val = i; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_BANK_NUM, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 512; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_SIZE, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_TX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = 1; + snprintf(key, sizeof(key), ADF_DC "%d" ADF_RING_DC_RX, i); + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, + key, &val, ADF_DEC); + if (ret) + goto err; + + val = ADF_COALESCING_DEF_TIME; + snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i); + ret = adf_cfg_add_key_value_param(accel_dev, "Accelerator0", + key, &val, ADF_DEC); + if (ret) + goto err; + } + + val = i; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + goto err; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); + if (ret) + goto err; + + return 0; +err: + dev_err(&GET_DEV(accel_dev), "Failed to add configuration for compression\n"); + return ret; +} + +static int adf_no_dev_config(struct adf_accel_dev *accel_dev) +{ + unsigned long val; + int ret; + + val = 0; + ret = adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC); + if (ret) + return ret; + + return adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC); +} + +/** + * adf_gen4_dev_config() - create dev config required to create instances + * + * @accel_dev: Pointer to acceleration device. + * + * Function creates device configuration required to create instances + * + * Return: 0 on success, error code otherwise. + */ +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev) +{ + char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + int ret; + + ret = adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC); + if (ret) + goto err; + + ret = adf_cfg_section_add(accel_dev, "Accelerator0"); + if (ret) + goto err; + + ret = adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, services); + if (ret) + goto err; + + ret = sysfs_match_string(adf_cfg_services, services); + if (ret < 0) + goto err; + + switch (ret) { + case SVC_CY: + case SVC_CY2: + ret = adf_crypto_dev_config(accel_dev); + break; + case SVC_DC: + case SVC_DCC: + ret = adf_comp_dev_config(accel_dev); + break; + default: + ret = adf_no_dev_config(accel_dev); + break; + } + + if (ret) + goto err; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + + return ret; + +err: + dev_err(&GET_DEV(accel_dev), "Failed to configure QAT driver\n"); + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_dev_config); + +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev) +{ + const char *config; + int ret; + + config = accel_dev->accel_id % 2 ? ADF_CFG_DC : ADF_CFG_CY; + + ret = adf_cfg_section_add(accel_dev, ADF_GENERAL_SEC); + if (ret) + return ret; + + /* Default configuration is crypto only for even devices + * and compression for odd devices + */ + ret = adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, + ADF_SERVICES_ENABLED, config, + ADF_STR); + if (ret) + return ret; + + adf_heartbeat_save_cfg_param(accel_dev, ADF_CFG_HB_TIMER_MIN_MS); + + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_cfg_dev_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h new file mode 100644 index 0000000000000000000000000000000000000000..bb87655f69a8396de3a9f2ddf816a53192a5d186 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_config.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_CONFIG_H_ +#define ADF_GEN4_CONFIG_H_ + +#include "adf_accel_devices.h" + +int adf_gen4_dev_config(struct adf_accel_dev *accel_dev); +int adf_gen4_cfg_dev_init(struct adf_accel_dev *accel_dev); + +#endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c index 3148a62938fdd2de9ac30fc08be8147844fa29f4..d28e1921940a799b4bd145d82745ac957564cea3 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.c @@ -2,8 +2,11 @@ /* Copyright(c) 2020 Intel Corporation */ #include #include "adf_accel_devices.h" +#include "adf_cfg_services.h" #include "adf_common_drv.h" +#include "adf_fw_config.h" #include "adf_gen4_hw_data.h" +#include "adf_gen4_pm.h" static u64 build_csr_ring_base_addr(dma_addr_t addr, u32 size) { @@ -102,6 +105,131 @@ void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops) } EXPORT_SYMBOL_GPL(adf_gen4_init_hw_csr_ops); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ACCELERATORS_MASK; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_accel_mask); + +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self) +{ + return ADF_GEN4_MAX_ACCELERATORS; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_accels); + +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self) +{ + if (!self || !self->ae_mask) + return 0; + + return hweight32(self->ae_mask); +} +EXPORT_SYMBOL_GPL(adf_gen4_get_num_aes); + +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_PMISC_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_misc_bar_id); + +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_ETR_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_etr_bar_id); + +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self) +{ + return ADF_GEN4_SRAM_BAR; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sram_bar_id); + +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self) +{ + return DEV_SKU_1; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_sku); + +void adf_gen4_get_arb_info(struct arb_info *arb_info) +{ + arb_info->arb_cfg = ADF_GEN4_ARB_CONFIG; + arb_info->arb_offset = ADF_GEN4_ARB_OFFSET; + arb_info->wt2sam_offset = ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_arb_info); + +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info) +{ + admin_csrs_info->mailbox_offset = ADF_GEN4_MAILBOX_BASE_OFFSET; + admin_csrs_info->admin_msg_ur = ADF_GEN4_ADMINMSGUR_OFFSET; + admin_csrs_info->admin_msg_lr = ADF_GEN4_ADMINMSGLR_OFFSET; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_admin_info); + +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self) +{ + /* + * GEN4 uses KPT counter for HB + */ + return ADF_GEN4_KPT_COUNTER_FREQ; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_heartbeat_clock); + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev) +{ + struct adf_bar *misc_bar = &GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR]; + void __iomem *csr = misc_bar->virt_addr; + + /* Enable all in errsou3 except VFLR notification on host */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_VFLNOTIFY); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_error_correction); + +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Enable bundle interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET, 0); + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET, 0); + + /* Enable misc interrupts */ + ADF_CSR_WR(addr, ADF_GEN4_SMIAPF_MASK_OFFSET, 0); +} +EXPORT_SYMBOL_GPL(adf_gen4_enable_ints); + +int adf_gen4_init_device(struct adf_accel_dev *accel_dev) +{ + void __iomem *addr; + u32 status; + u32 csr; + int ret; + + addr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + + /* Temporarily mask PM interrupt */ + csr = ADF_CSR_RD(addr, ADF_GEN4_ERRMSK2); + csr |= ADF_GEN4_PM_SOU; + ADF_CSR_WR(addr, ADF_GEN4_ERRMSK2, csr); + + /* Set DRV_ACTIVE bit to power up the device */ + ADF_CSR_WR(addr, ADF_GEN4_PM_INTERRUPT, ADF_GEN4_PM_DRV_ACTIVE); + + /* Poll status register to make sure the device is powered up */ + ret = read_poll_timeout(ADF_CSR_RD, status, + status & ADF_GEN4_PM_INIT_STATE, + ADF_GEN4_PM_POLL_DELAY_US, + ADF_GEN4_PM_POLL_TIMEOUT_US, true, addr, + ADF_GEN4_PM_STATUS); + if (ret) + dev_err(&GET_DEV(accel_dev), "Failed to power up the device\n"); + + return ret; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_device); + static inline void adf_gen4_unpack_ssm_wdtimer(u64 value, u32 *upper, u32 *lower) { @@ -135,6 +263,28 @@ void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev) } EXPORT_SYMBOL_GPL(adf_gen4_set_ssm_wdtimer); +/* + * The vector routing table is used to select the MSI-X entry to use for each + * interrupt source. + * The first ADF_GEN4_ETR_MAX_BANKS entries correspond to ring interrupts. + * The final entry corresponds to VF2PF or error interrupts. + * This vector table could be used to configure one MSI-X entry to be shared + * between multiple interrupt sources. + * + * The default routing is set to have a one to one correspondence between the + * interrupt source and the MSI-X entry used. + */ +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev) +{ + void __iomem *csr; + int i; + + csr = (&GET_BARS(accel_dev)[ADF_GEN4_PMISC_BAR])->virt_addr; + for (i = 0; i <= ADF_GEN4_ETR_MAX_BANKS; i++) + ADF_CSR_WR(csr, ADF_GEN4_MSIX_RTTABLE_OFFSET(i), i); +} +EXPORT_SYMBOL_GPL(adf_gen4_set_msix_default_rttable); + int adf_pfvf_comms_disabled(struct adf_accel_dev *accel_dev) { return 0; @@ -192,3 +342,150 @@ int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number) return ret; } EXPORT_SYMBOL_GPL(adf_gen4_ring_pair_reset); + +static const u32 thrd_to_arb_map_dcc[] = { + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, 0x0000FFFF, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x0 +}; + +static const u16 rp_group_to_arb_mask[] = { + [RP_GROUP_0] = 0x5, + [RP_GROUP_1] = 0xA, +}; + +static bool is_single_service(int service_id) +{ + switch (service_id) { + case SVC_DC: + case SVC_SYM: + case SVC_ASYM: + return true; + case SVC_CY: + case SVC_CY2: + case SVC_DCC: + case SVC_ASYM_DC: + case SVC_DC_ASYM: + case SVC_SYM_DC: + case SVC_DC_SYM: + default: + return false; + } +} + +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u32 *thd2arb_map = hw_data->thd_to_arb_map; + unsigned int ae_cnt, worker_obj_cnt, i, j; + unsigned long ae_mask, thds_mask; + int srv_id, rp_group; + u32 thd2arb_map_base; + u16 arb_mask; + + if (!hw_data->get_rp_group || !hw_data->get_ena_thd_mask || + !hw_data->get_num_aes || !hw_data->uof_get_num_objs || + !hw_data->uof_get_ae_mask) + return -EFAULT; + + srv_id = adf_get_service_enabled(accel_dev); + if (srv_id < 0) + return srv_id; + + ae_cnt = hw_data->get_num_aes(hw_data); + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + + if (srv_id == SVC_DCC) { + if (ae_cnt > ICP_QAT_HW_AE_DELIMITER) + return -EINVAL; + + memcpy(thd2arb_map, thrd_to_arb_map_dcc, + array_size(sizeof(*thd2arb_map), ae_cnt)); + return 0; + } + + for (i = 0; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + thds_mask = hw_data->get_ena_thd_mask(accel_dev, i); + thd2arb_map_base = 0; + + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return -EINVAL; + + if (thds_mask == ADF_GEN4_ENA_THD_MASK_ERROR) + return -EINVAL; + + if (is_single_service(srv_id)) + arb_mask = rp_group_to_arb_mask[RP_GROUP_0] | + rp_group_to_arb_mask[RP_GROUP_1]; + else + arb_mask = rp_group_to_arb_mask[rp_group]; + + for_each_set_bit(j, &thds_mask, ADF_NUM_THREADS_PER_AE) + thd2arb_map_base |= arb_mask << (j * 4); + + for_each_set_bit(j, &ae_mask, ae_cnt) + thd2arb_map[j] = thd2arb_map_base; + } + return 0; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_thd2arb_map); + +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + enum adf_cfg_service_type rps[RP_GROUP_COUNT] = { }; + unsigned int ae_mask, start_id, worker_obj_cnt, i; + u16 ring_to_svc_map; + int rp_group; + + if (!hw_data->get_rp_group || !hw_data->uof_get_ae_mask || + !hw_data->uof_get_obj_type || !hw_data->uof_get_num_objs) + return 0; + + /* If dcc, all rings handle compression requests */ + if (adf_get_service_enabled(accel_dev) == SVC_DCC) { + for (i = 0; i < RP_GROUP_COUNT; i++) + rps[i] = COMP; + goto set_mask; + } + + worker_obj_cnt = hw_data->uof_get_num_objs(accel_dev) - + ADF_GEN4_ADMIN_ACCELENGINES; + start_id = worker_obj_cnt - RP_GROUP_COUNT; + + for (i = start_id; i < worker_obj_cnt; i++) { + ae_mask = hw_data->uof_get_ae_mask(accel_dev, i); + rp_group = hw_data->get_rp_group(accel_dev, ae_mask); + if (rp_group >= RP_GROUP_COUNT || rp_group < RP_GROUP_0) + return 0; + + switch (hw_data->uof_get_obj_type(accel_dev, i)) { + case ADF_FW_SYM_OBJ: + rps[rp_group] = SYM; + break; + case ADF_FW_ASYM_OBJ: + rps[rp_group] = ASYM; + break; + case ADF_FW_DC_OBJ: + rps[rp_group] = COMP; + break; + default: + rps[rp_group] = 0; + break; + } + } + +set_mask: + ring_to_svc_map = rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_0_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_1_SHIFT | + rps[RP_GROUP_0] << ADF_CFG_SERV_RING_PAIR_2_SHIFT | + rps[RP_GROUP_1] << ADF_CFG_SERV_RING_PAIR_3_SHIFT; + + return ring_to_svc_map; +} +EXPORT_SYMBOL_GPL(adf_gen4_get_ring_to_svc_map); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h index 02d7a019ebf8aa1c530708192687fcc73b8c8dc2..c6e80df5a85a337c6fb6a3fa383fec3ac849c748 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_hw_data.h @@ -3,9 +3,57 @@ #ifndef ADF_GEN4_HW_CSR_DATA_H_ #define ADF_GEN4_HW_CSR_DATA_H_ +#include + #include "adf_accel_devices.h" #include "adf_cfg_common.h" +/* PCIe configuration space */ +#define ADF_GEN4_BAR_MASK (BIT(0) | BIT(2) | BIT(4)) +#define ADF_GEN4_SRAM_BAR 0 +#define ADF_GEN4_PMISC_BAR 1 +#define ADF_GEN4_ETR_BAR 2 + +/* Clocks frequency */ +#define ADF_GEN4_KPT_COUNTER_FREQ (100 * HZ_PER_MHZ) + +/* Physical function fuses */ +#define ADF_GEN4_FUSECTL0_OFFSET 0x2C8 +#define ADF_GEN4_FUSECTL1_OFFSET 0x2CC +#define ADF_GEN4_FUSECTL2_OFFSET 0x2D0 +#define ADF_GEN4_FUSECTL3_OFFSET 0x2D4 +#define ADF_GEN4_FUSECTL4_OFFSET 0x2D8 +#define ADF_GEN4_FUSECTL5_OFFSET 0x2DC + +/* Accelerators */ +#define ADF_GEN4_ACCELERATORS_MASK 0x1 +#define ADF_GEN4_MAX_ACCELERATORS 1 +#define ADF_GEN4_ADMIN_ACCELENGINES 1 + +/* MSIX interrupt */ +#define ADF_GEN4_SMIAPF_RP_X0_MASK_OFFSET 0x41A040 +#define ADF_GEN4_SMIAPF_RP_X1_MASK_OFFSET 0x41A044 +#define ADF_GEN4_SMIAPF_MASK_OFFSET 0x41A084 +#define ADF_GEN4_MSIX_RTTABLE_OFFSET(i) (0x409000 + ((i) * 0x04)) + +/* Bank and ring configuration */ +#define ADF_GEN4_MAX_RPS 64 +#define ADF_GEN4_NUM_RINGS_PER_BANK 2 +#define ADF_GEN4_NUM_BANKS_PER_VF 4 +#define ADF_GEN4_ETR_MAX_BANKS 64 +#define ADF_GEN4_RX_RINGS_OFFSET 1 +#define ADF_GEN4_TX_RINGS_MASK 0x1 + +/* Arbiter configuration */ +#define ADF_GEN4_ARB_CONFIG (BIT(31) | BIT(6) | BIT(0)) +#define ADF_GEN4_ARB_OFFSET 0x0 +#define ADF_GEN4_ARB_WRK_2_SER_MAP_OFFSET 0x400 + +/* Admin Interface Reg Offset */ +#define ADF_GEN4_ADMINMSGUR_OFFSET 0x500574 +#define ADF_GEN4_ADMINMSGLR_OFFSET 0x500578 +#define ADF_GEN4_MAILBOX_BASE_OFFSET 0x600970 + /* Transport access */ #define ADF_BANK_INT_SRC_SEL_MASK 0x44UL #define ADF_RING_CSR_RING_CONFIG 0x1000 @@ -139,7 +187,54 @@ do { \ /* Number of heartbeat counter pairs */ #define ADF_NUM_HB_CNT_PER_AE ADF_NUM_THREADS_PER_AE +/* Rate Limiting */ +#define ADF_GEN4_RL_R2L_OFFSET 0x508000 +#define ADF_GEN4_RL_L2C_OFFSET 0x509000 +#define ADF_GEN4_RL_C2S_OFFSET 0x508818 +#define ADF_GEN4_RL_TOKEN_PCIEIN_BUCKET_OFFSET 0x508800 +#define ADF_GEN4_RL_TOKEN_PCIEOUT_BUCKET_OFFSET 0x508804 + +/* Arbiter threads mask with error value */ +#define ADF_GEN4_ENA_THD_MASK_ERROR GENMASK(ADF_NUM_THREADS_PER_AE, 0) + void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); + +enum icp_qat_gen4_slice_mask { + ICP_ACCEL_GEN4_MASK_CIPHER_SLICE = BIT(0), + ICP_ACCEL_GEN4_MASK_AUTH_SLICE = BIT(1), + ICP_ACCEL_GEN4_MASK_PKE_SLICE = BIT(2), + ICP_ACCEL_GEN4_MASK_COMPRESS_SLICE = BIT(3), + ICP_ACCEL_GEN4_MASK_UCS_SLICE = BIT(4), + ICP_ACCEL_GEN4_MASK_EIA3_SLICE = BIT(5), + ICP_ACCEL_GEN4_MASK_SMX_SLICE = BIT(7), + ICP_ACCEL_GEN4_MASK_WCP_WAT_SLICE = BIT(8), + ICP_ACCEL_GEN4_MASK_ZUC_256_SLICE = BIT(9), +}; + +enum adf_gen4_rp_groups { + RP_GROUP_0, + RP_GROUP_1, + RP_GROUP_COUNT +}; + +void adf_gen4_enable_error_correction(struct adf_accel_dev *accel_dev); +void adf_gen4_enable_ints(struct adf_accel_dev *accel_dev); +u32 adf_gen4_get_accel_mask(struct adf_hw_device_data *self); +void adf_gen4_get_admin_info(struct admin_info *admin_csrs_info); +void adf_gen4_get_arb_info(struct arb_info *arb_info); +u32 adf_gen4_get_etr_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_heartbeat_clock(struct adf_hw_device_data *self); +u32 adf_gen4_get_misc_bar_id(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_accels(struct adf_hw_device_data *self); +u32 adf_gen4_get_num_aes(struct adf_hw_device_data *self); +enum dev_sku_info adf_gen4_get_sku(struct adf_hw_device_data *self); +u32 adf_gen4_get_sram_bar_id(struct adf_hw_device_data *self); +int adf_gen4_init_device(struct adf_accel_dev *accel_dev); void adf_gen4_init_hw_csr_ops(struct adf_hw_csr_ops *csr_ops); int adf_gen4_ring_pair_reset(struct adf_accel_dev *accel_dev, u32 bank_number); +void adf_gen4_set_msix_default_rttable(struct adf_accel_dev *accel_dev); +void adf_gen4_set_ssm_wdtimer(struct adf_accel_dev *accel_dev); +int adf_gen4_init_thd2arb_map(struct adf_accel_dev *accel_dev); +u16 adf_gen4_get_ring_to_svc_map(struct adf_accel_dev *accel_dev); + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c index 34c6cd8e27c0b58d16db092e0a1cbc875c0dd1e6..5dafd9a270dbd87f261a6d6ea228326dfcb6e78b 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.c @@ -2,7 +2,10 @@ /* Copyright(c) 2022 Intel Corporation */ #include #include +#include + #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_common_drv.h" #include "adf_gen4_pm.h" #include "adf_cfg_strings.h" @@ -10,11 +13,6 @@ #include "adf_gen4_hw_data.h" #include "adf_cfg.h" -enum qat_pm_host_msg { - PM_NO_CHANGE = 0, - PM_SET_MIN, -}; - struct adf_gen4_pm_data { struct work_struct pm_irq_work; struct adf_accel_dev *accel_dev; @@ -25,6 +23,7 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) { char pm_idle_support_cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {}; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; bool pm_idle_support; u32 msg; int ret; @@ -39,6 +38,11 @@ static int send_host_msg(struct adf_accel_dev *accel_dev) if (ret) pm_idle_support = true; + if (pm_idle_support) + pm->host_ack_counter++; + else + pm->host_nack_counter++; + /* Send HOST_MSG */ msg = FIELD_PREP(ADF_GEN4_PM_MSG_PAYLOAD_BIT_MASK, pm_idle_support ? PM_SET_MIN : PM_NO_CHANGE); @@ -59,17 +63,27 @@ static void pm_bh_handler(struct work_struct *work) container_of(work, struct adf_gen4_pm_data, pm_irq_work); struct adf_accel_dev *accel_dev = pm_data->accel_dev; void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; u32 pm_int_sts = pm_data->pm_int_sts; u32 val; /* PM Idle interrupt */ if (pm_int_sts & ADF_GEN4_PM_IDLE_STS) { + pm->idle_irq_counters++; /* Issue host message to FW */ if (send_host_msg(accel_dev)) dev_warn_ratelimited(&GET_DEV(accel_dev), "Failed to send host msg to FW\n"); } + /* PM throttle interrupt */ + if (pm_int_sts & ADF_GEN4_PM_THR_STS) + pm->throttle_irq_counters++; + + /* PM fw interrupt */ + if (pm_int_sts & ADF_GEN4_PM_FW_INT_STS) + pm->fw_irq_counters++; + /* Clear interrupt status */ ADF_CSR_WR(pmisc, ADF_GEN4_PM_INTERRUPT, pm_int_sts); @@ -129,6 +143,9 @@ int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev) if (ret) return ret; + /* Initialize PM internal data */ + adf_gen4_init_dev_pm_data(accel_dev); + /* Enable default PM interrupts: IDLE, THROTTLE */ val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); val |= ADF_GEN4_PM_INT_EN_DEFAULT; diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h index c2768762cca3b683513936bdd1be1d40586d4bdc..a49352b79a7adff1b14eea0880f463d2010df7ff 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm.h @@ -3,7 +3,14 @@ #ifndef ADF_GEN4_PM_H #define ADF_GEN4_PM_H -#include "adf_accel_devices.h" +#include + +struct adf_accel_dev; + +enum qat_pm_host_msg { + PM_NO_CHANGE = 0, + PM_SET_MIN, +}; /* Power management registers */ #define ADF_GEN4_PM_HOST_MSG (0x50A01C) @@ -39,7 +46,48 @@ #define ADF_GEN4_PM_MAX_IDLE_FILTER (0x7) #define ADF_GEN4_PM_DEFAULT_IDLE_SUPPORT (0x1) +/* PM CSRs fields masks */ +#define ADF_GEN4_PM_DOMAIN_POWER_GATED_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_SSM_PM_ENABLE_MASK GENMASK(15, 0) +#define ADF_GEN4_PM_IDLE_FILTER_MASK GENMASK(5, 3) +#define ADF_GEN4_PM_IDLE_ENABLE_MASK BIT(2) +#define ADF_GEN4_PM_ENABLE_PM_MASK BIT(21) +#define ADF_GEN4_PM_ENABLE_PM_IDLE_MASK BIT(22) +#define ADF_GEN4_PM_ENABLE_DEEP_PM_IDLE_MASK BIT(23) +#define ADF_GEN4_PM_CURRENT_WP_MASK GENMASK(19, 11) +#define ADF_GEN4_PM_CPM_PM_STATE_MASK GENMASK(22, 20) +#define ADF_GEN4_PM_PENDING_WP_MASK GENMASK(31, 23) +#define ADF_GEN4_PM_THR_VALUE_MASK GENMASK(6, 4) +#define ADF_GEN4_PM_MIN_PWR_ACK_MASK BIT(7) +#define ADF_GEN4_PM_MIN_PWR_ACK_PENDING_MASK BIT(17) +#define ADF_GEN4_PM_CPR_ACTIVE_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_CPR_MANAGED_COUNT_MASK BIT(0) +#define ADF_GEN4_PM_XLT_ACTIVE_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_XLT_MANAGED_COUNT_MASK BIT(1) +#define ADF_GEN4_PM_DCPR_ACTIVE_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_DCPR_MANAGED_COUNT_MASK GENMASK(3, 2) +#define ADF_GEN4_PM_PKE_ACTIVE_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_PKE_MANAGED_COUNT_MASK GENMASK(8, 4) +#define ADF_GEN4_PM_WAT_ACTIVE_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WAT_MANAGED_COUNT_MASK GENMASK(13, 9) +#define ADF_GEN4_PM_WCP_ACTIVE_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_WCP_MANAGED_COUNT_MASK GENMASK(18, 14) +#define ADF_GEN4_PM_UCS_ACTIVE_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_UCS_MANAGED_COUNT_MASK GENMASK(20, 19) +#define ADF_GEN4_PM_CPH_ACTIVE_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_CPH_MANAGED_COUNT_MASK GENMASK(24, 21) +#define ADF_GEN4_PM_ATH_ACTIVE_COUNT_MASK GENMASK(28, 25) +#define ADF_GEN4_PM_ATH_MANAGED_COUNT_MASK GENMASK(28, 25) + int adf_gen4_enable_pm(struct adf_accel_dev *accel_dev); bool adf_gen4_handle_pm_interrupt(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev); +#else +static inline void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ + #endif diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..ee0b5079de3ec95756c3b23af51ab4a75db62ea7 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_pm_debugfs.c @@ -0,0 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_gen4_pm.h" +#include "icp_qat_fw_init_admin.h" + +/* + * This is needed because a variable is used to index the mask at + * pm_scnprint_table(), making it not compile time constant, so the compile + * asserts from FIELD_GET() or u32_get_bits() won't be fulfilled. + */ +#define field_get(_mask, _reg) (((_reg) & (_mask)) >> (ffs(_mask) - 1)) + +#define PM_INFO_MEMBER_OFF(member) \ + (offsetof(struct icp_qat_fw_init_admin_pm_info, member) / sizeof(u32)) + +#define PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, _mask_) \ +{ \ + .reg_offset = PM_INFO_MEMBER_OFF(_reg_), \ + .key = __stringify(_field_), \ + .field_mask = _mask_, \ +} + +#define PM_INFO_REGSET_ENTRY32(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, GENMASK(31, 0)) + +#define PM_INFO_REGSET_ENTRY(_reg_, _field_) \ + PM_INFO_REGSET_ENTRY_MASK(_reg_, _field_, ADF_GEN4_PM_##_field_##_MASK) + +#define PM_INFO_MAX_KEY_LEN 21 + +struct pm_status_row { + int reg_offset; + u32 field_mask; + const char *key; +}; + +static struct pm_status_row pm_fuse_rows[] = { + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_PM_IDLE), + PM_INFO_REGSET_ENTRY(fusectl0, ENABLE_DEEP_PM_IDLE), +}; + +static struct pm_status_row pm_info_rows[] = { + PM_INFO_REGSET_ENTRY(pm.status, CPM_PM_STATE), + PM_INFO_REGSET_ENTRY(pm.status, PENDING_WP), + PM_INFO_REGSET_ENTRY(pm.status, CURRENT_WP), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_ENABLE), + PM_INFO_REGSET_ENTRY(pm.fw_init, IDLE_FILTER), + PM_INFO_REGSET_ENTRY(pm.main, MIN_PWR_ACK), + PM_INFO_REGSET_ENTRY(pm.thread, MIN_PWR_ACK_PENDING), + PM_INFO_REGSET_ENTRY(pm.main, THR_VALUE), +}; + +static struct pm_status_row pm_ssm_rows[] = { + PM_INFO_REGSET_ENTRY(ssm.pm_enable, SSM_PM_ENABLE), + PM_INFO_REGSET_ENTRY32(ssm.active_constraint, ACTIVE_CONSTRAINT), + PM_INFO_REGSET_ENTRY(ssm.pm_domain_status, DOMAIN_POWER_GATED), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, ATH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPH_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, PKE_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, CPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, DCPR_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, UCS_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, XLT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WAT_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_active_status, WCP_ACTIVE_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, ATH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPH_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, PKE_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, CPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, DCPR_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, UCS_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, XLT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WAT_MANAGED_COUNT), + PM_INFO_REGSET_ENTRY(ssm.pm_managed_status, WCP_MANAGED_COUNT), +}; + +static struct pm_status_row pm_log_rows[] = { + PM_INFO_REGSET_ENTRY32(event_counters.host_msg, HOST_MSG_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.sys_pm, SYS_PM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.local_ssm, SSM_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.timer, TIMER_EVENT_COUNT), + PM_INFO_REGSET_ENTRY32(event_counters.unknown, UNKNOWN_EVENT_COUNT), +}; + +static struct pm_status_row pm_event_rows[ICP_QAT_NUMBER_OF_PM_EVENTS] = { + PM_INFO_REGSET_ENTRY32(event_log[0], EVENT0), + PM_INFO_REGSET_ENTRY32(event_log[1], EVENT1), + PM_INFO_REGSET_ENTRY32(event_log[2], EVENT2), + PM_INFO_REGSET_ENTRY32(event_log[3], EVENT3), + PM_INFO_REGSET_ENTRY32(event_log[4], EVENT4), + PM_INFO_REGSET_ENTRY32(event_log[5], EVENT5), + PM_INFO_REGSET_ENTRY32(event_log[6], EVENT6), + PM_INFO_REGSET_ENTRY32(event_log[7], EVENT7), +}; + +static struct pm_status_row pm_csrs_rows[] = { + PM_INFO_REGSET_ENTRY32(pm.fw_init, CPM_PM_FW_INIT), + PM_INFO_REGSET_ENTRY32(pm.status, CPM_PM_STATUS), + PM_INFO_REGSET_ENTRY32(pm.main, CPM_PM_MASTER_FW), + PM_INFO_REGSET_ENTRY32(pm.pwrreq, CPM_PM_PWRREQ), +}; + +static int pm_scnprint_table(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, int table_len, + bool lowercase) +{ + char key[PM_INFO_MAX_KEY_LEN]; + int wr = 0; + int i; + + for (i = 0; i < table_len; i++) { + if (lowercase) + string_lower(key, table[i].key); + else + string_upper(key, table[i].key); + + wr += scnprintf(&buff[wr], buff_size - wr, "%s: %#x\n", key, + field_get(table[i].field_mask, + pm_info_regs[table[i].reg_offset])); + } + + return wr; +} + +static int pm_scnprint_table_upper_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, false); +} + +static int pm_scnprint_table_lower_keys(char *buff, struct pm_status_row *table, + u32 *pm_info_regs, size_t buff_size, + int table_len) +{ + return pm_scnprint_table(buff, table, pm_info_regs, buff_size, + table_len, true); +} + +static_assert(sizeof(struct icp_qat_fw_init_admin_pm_info) < PAGE_SIZE); + +static ssize_t adf_gen4_print_pm_status(struct adf_accel_dev *accel_dev, + char __user *buf, size_t count, + loff_t *pos) +{ + void __iomem *pmisc = adf_get_pmisc_base(accel_dev); + struct adf_pm *pm = &accel_dev->power_management; + struct icp_qat_fw_init_admin_pm_info *pm_info; + dma_addr_t p_state_addr; + u32 *pm_info_regs; + char *pm_kv; + int len = 0; + u32 val; + int ret; + + pm_info = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_info) + return -ENOMEM; + + pm_kv = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!pm_kv) { + ret = -ENOMEM; + goto out_free; + } + + p_state_addr = dma_map_single(&GET_DEV(accel_dev), pm_info, PAGE_SIZE, + DMA_FROM_DEVICE); + ret = dma_mapping_error(&GET_DEV(accel_dev), p_state_addr); + if (ret) + goto out_free; + + /* Query PM info from QAT FW */ + ret = adf_get_pm_info(accel_dev, p_state_addr, PAGE_SIZE); + dma_unmap_single(&GET_DEV(accel_dev), p_state_addr, PAGE_SIZE, + DMA_FROM_DEVICE); + if (ret) + goto out_free; + + pm_info_regs = (u32 *)pm_info; + + /* Fusectl related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- PM Fuse info ---------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_fuse_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_fuse_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "max_pwrreq: %#x\n", + pm_info->max_pwrreq); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "min_pwrreq: %#x\n", + pm_info->min_pwrreq); + + /* PM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------ PM Info ------------\n"); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "power_level: %s\n", + pm_info->pwr_state == PM_SET_MIN ? "min" : "max"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_info_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_info_rows)); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "pm_mode: STATIC\n"); + + /* SSM related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- SSM_PM Info ----------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_ssm_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_ssm_rows)); + + /* Log related */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "------------- PM Log -------------\n"); + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_log_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_log_rows)); + + len += pm_scnprint_table_lower_keys(&pm_kv[len], pm_event_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_event_rows)); + + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "idle_irq_count: %#x\n", + pm->idle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "fw_irq_count: %#x\n", + pm->fw_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "throttle_irq_count: %#x\n", pm->throttle_irq_counters); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_ack_count: %#x\n", + pm->host_ack_counter); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, "host_nack_count: %#x\n", + pm->host_nack_counter); + + /* CSRs content */ + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "----------- HW PM CSRs -----------\n"); + len += pm_scnprint_table_upper_keys(&pm_kv[len], pm_csrs_rows, + pm_info_regs, PAGE_SIZE - len, + ARRAY_SIZE(pm_csrs_rows)); + + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_HOST_MSG); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_HOST_MSG: %#x\n", val); + val = ADF_CSR_RD(pmisc, ADF_GEN4_PM_INTERRUPT); + len += scnprintf(&pm_kv[len], PAGE_SIZE - len, + "CPM_PM_INTERRUPT: %#x\n", val); + ret = simple_read_from_buffer(buf, count, pos, pm_kv, len); + +out_free: + kfree(pm_info); + kfree(pm_kv); + return ret; +} + +void adf_gen4_init_dev_pm_data(struct adf_accel_dev *accel_dev) +{ + accel_dev->power_management.print_pm_status = adf_gen4_print_pm_status; + accel_dev->power_management.present = true; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c new file mode 100644 index 0000000000000000000000000000000000000000..2dd3772bf58a6ce673587bdae15c0f751e0329d6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.c @@ -0,0 +1,1564 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include "adf_common_drv.h" +#include "adf_gen4_hw_data.h" +#include "adf_gen4_ras.h" +#include "adf_sysfs_ras_counters.h" + +#define BITS_PER_REG(_n_) (sizeof(_n_) * BITS_PER_BYTE) + +static void enable_errsou_reporting(void __iomem *csr) +{ + /* Enable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, 0); + + /* Enable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, 0); + + /* + * Enable uncorrectable error reporting in ERRSOU2 + * but disable PM interrupt and CFC attention interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, + ADF_GEN4_ERRSOU2_PM_INT_BIT | + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK); + + /* + * Enable uncorrectable error reporting in ERRSOU3 + * but disable RLT error interrupt and VFLR notify interrupt by default + */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, + ADF_GEN4_ERRSOU3_RLTERROR_BIT | + ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT); +} + +static void disable_errsou_reporting(void __iomem *csr) +{ + u32 val = 0; + + /* Disable correctable error reporting in ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK0, ADF_GEN4_ERRSOU0_BIT); + + /* Disable uncorrectable error reporting in ERRSOU1 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK1, ADF_GEN4_ERRSOU1_BITMASK); + + /* Disable uncorrectable error reporting in ERRSOU2 */ + val = ADF_CSR_RD(csr, ADF_GEN4_ERRMSK2); + val |= ADF_GEN4_ERRSOU2_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK2, val); + + /* Disable uncorrectable error reporting in ERRSOU3 */ + ADF_CSR_WR(csr, ADF_GEN4_ERRMSK3, ADF_GEN4_ERRSOU3_BITMASK); +} + +static void enable_ae_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 ae_mask = GET_HW_DATA(accel_dev)->ae_mask; + + /* Enable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, ae_mask); + + /* Enable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, ae_mask); +} + +static void disable_ae_error_reporting(void __iomem *csr) +{ + /* Disable Acceleration Engine correctable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOGENABLE_CPP0, 0); + + /* Disable Acceleration Engine uncorrectable error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0, 0); +} + +static void enable_cpp_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, + err_mask->cppagentcmdpar_mask); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK); +} + +static void disable_cpp_error_reporting(void __iomem *csr) +{ + /* Disable HI CPP Agents Command Parity Error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE, 0); + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_CTRL, + ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK); +} + +static void enable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Enable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK); + + /* Enable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, ADF_GEN4_RIMISCSTS_BIT); + + /* Enable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, 0); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, 0); + + /* Enable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, ADF_GEN4_RICPPINTCTL_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, ADF_GEN4_TICPPINTCTL_BITMASK); + + /* + * Enable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + reg |= ADF_GEN4_TIMISCCTL_BIT; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); +} + +static void disable_ti_ri_error_reporting(void __iomem *csr) +{ + u32 reg; + + /* Disable RI Memory error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RI_MEM_PAR_ERR_EN0, 0); + + /* Disable IOSF Primary Command Parity error Reporting */ + ADF_CSR_WR(csr, ADF_GEN4_RIMISCCTL, 0); + + /* Disable TI Internal Memory Parity Error reporting */ + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_ERR_MASK, + ADF_GEN4_TI_CI_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK, + ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK, + ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_ERR_MASK, + ADF_GEN4_TI_CD_PAR_STS_BITMASK); + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_ERR_MASK, + ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK); + + /* Disable error handling in RI, TI CPP interface control registers */ + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTCTL, 0); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTCTL, 0); + + /* + * Disable error detection and reporting in TIMISCSTS + * with bits 1, 2 and 30 value preserved + */ + reg = ADF_CSR_RD(csr, ADF_GEN4_TIMISCCTL); + reg &= ADF_GEN4_TIMSCCTL_RELAY_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_TIMISCCTL, reg); +} + +static void enable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Enable RF parity error in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, 0); +} + +static void disable_rf_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + /* Disable RF Parity Error reporting in Shared RAM */ + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC, + ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Enable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, 0); + + /* Enable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val |= err_mask->ssmfeatren_mask; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Enable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, + ADF_GEN4_SER_EN_SSMSH_BITMASK); + + /* Enable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, 0); + + /* Enable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, 0); + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, 0); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, 0); +} + +static void disable_ssm_error_reporting(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 val = 0; + + /* Disable SSM interrupts */ + ADF_CSR_WR(csr, ADF_GEN4_INTMASKSSM, + ADF_GEN4_INTMASKSSM_BITMASK); + + /* Disable shared memory error detection & correction */ + val = ADF_CSR_RD(csr, ADF_GEN4_SSMFEATREN); + val &= ADF_GEN4_SSMFEATREN_DIS_BITMASK; + ADF_CSR_WR(csr, ADF_GEN4_SSMFEATREN, val); + + /* Disable SER detection in SER_err_ssmsh register */ + ADF_CSR_WR(csr, ADF_GEN4_SER_EN_SSMSH, 0); + + /* Disable SSM soft parity error */ + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SPPPARERRMSK_WAT_WCP, + err_mask->parerr_wat_wcp_mask); + + /* Disable slice hang interrupt reporting */ + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_ATH_CPH, + err_mask->parerr_ath_cph_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_CPR_XLT, + err_mask->parerr_cpr_xlt_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_DCPR_UCS, + err_mask->parerr_dcpr_ucs_mask); + + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_PKE, + err_mask->parerr_pke_mask); + + if (err_mask->parerr_wat_wcp_mask) + ADF_CSR_WR(csr, ADF_GEN4_SHINTMASKSSM_WAT_WCP, + err_mask->parerr_wat_wcp_mask); +} + +static void enable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, + ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, + ADF_GEN4_REG_ARAMCERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, + ADF_GEN4_REG_ARAMUERR_EN_BITMASK); + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, + ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK); +} + +static void disable_aram_error_reporting(void __iomem *csr) +{ + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERRUERR_EN, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, 0); + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, 0); +} + +static void adf_gen4_enable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + enable_errsou_reporting(csr); + enable_ae_error_reporting(accel_dev, csr); + enable_cpp_error_reporting(accel_dev, csr); + enable_ti_ri_error_reporting(csr); + enable_rf_error_reporting(accel_dev, csr); + enable_ssm_error_reporting(accel_dev, csr); + enable_aram_error_reporting(aram_csr); +} + +static void adf_gen4_disable_ras(struct adf_accel_dev *accel_dev) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + + disable_errsou_reporting(csr); + disable_ae_error_reporting(csr); + disable_cpp_error_reporting(csr); + disable_ti_ri_error_reporting(csr); + disable_rf_error_reporting(accel_dev, csr); + disable_ssm_error_reporting(accel_dev, csr); + disable_aram_error_reporting(aram_csr); +} + +static void adf_gen4_process_errsou0(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 aecorrerr = ADF_CSR_RD(csr, ADF_GEN4_HIAECORERRLOG_CPP0); + + aecorrerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error detected in AE: 0x%x\n", + aecorrerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + /* Clear interrupt from ERRSOU0 */ + ADF_CSR_WR(csr, ADF_GEN4_HIAECORERRLOG_CPP0, aecorrerr); +} + +static bool adf_handle_cpp_aeunc(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aeuncorerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT)) + return false; + + aeuncorerr = ADF_CSR_RD(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0); + aeuncorerr &= GET_HW_DATA(accel_dev)->ae_mask; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error detected in AE: 0x%x\n", + aeuncorerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_HIAEUNCERRLOG_CPP0, aeuncorerr); + + return false; +} + +static bool adf_handle_cppcmdparerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 cmdparerr; + + if (!(errsou & ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT)) + return false; + + cmdparerr = ADF_CSR_RD(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG); + cmdparerr &= err_mask->cppagentcmdpar_mask; + + dev_err(&GET_DEV(accel_dev), + "HI CPP agent command parity error: 0x%x\n", + cmdparerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_HICPPAGENTCMDPARERRLOG, cmdparerr); + + return true; +} + +static bool adf_handle_ri_mem_par_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 rimem_parerr_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT)) + return false; + + rimem_parerr_sts = ADF_CSR_RD(csr, ADF_GEN4_RIMEM_PARERR_STS); + rimem_parerr_sts &= ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK | + ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK; + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity uncorrectable error: 0x%x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (rimem_parerr_sts & ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "RI Memory Parity fatal error: 0x%x\n", + rimem_parerr_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_RIMEM_PARERR_STS, rimem_parerr_sts); + + return reset_required; +} + +static bool adf_handle_ti_ci_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_ci_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_ci_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CI_PAR_STS); + ti_ci_par_sts &= ADF_GEN4_TI_CI_PAR_STS_BITMASK; + + if (ti_ci_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Memory Parity Error: 0x%x\n", ti_ci_par_sts); + ADF_CSR_WR(csr, ADF_GEN4_TI_CI_PAR_STS, ti_ci_par_sts); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + return false; +} + +static bool adf_handle_ti_pullfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pullfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pullfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS); + ti_pullfub_par_sts &= ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK; + + if (ti_pullfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Pull Parity Error: 0x%x\n", ti_pullfub_par_sts); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PULL0FUB_PAR_STS, + ti_pullfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + return false; +} + +static bool adf_handle_ti_pushfub_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_pushfub_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_pushfub_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS); + ti_pushfub_par_sts &= ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK; + + if (ti_pushfub_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI Push Parity Error: 0x%x\n", ti_pushfub_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_PUSHFUB_PAR_STS, + ti_pushfub_par_sts); + } + + return false; +} + +static bool adf_handle_ti_cd_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_cd_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_cd_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_CD_PAR_STS); + ti_cd_par_sts &= ADF_GEN4_TI_CD_PAR_STS_BITMASK; + + if (ti_cd_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI CD Parity Error: 0x%x\n", ti_cd_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_CD_PAR_STS, ti_cd_par_sts); + } + + return false; +} + +static bool adf_handle_ti_trnsb_par_sts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ti_trnsb_par_sts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + ti_trnsb_par_sts = ADF_CSR_RD(csr, ADF_GEN4_TI_TRNSB_PAR_STS); + ti_trnsb_par_sts &= ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK; + + if (ti_trnsb_par_sts) { + dev_err(&GET_DEV(accel_dev), + "TI TRNSB Parity Error: 0x%x\n", ti_trnsb_par_sts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TI_TRNSB_PAR_STS, ti_trnsb_par_sts); + } + + return false; +} + +static bool adf_handle_iosfp_cmd_parerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 rimiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT)) + return false; + + rimiscsts = ADF_CSR_RD(csr, ADF_GEN4_RIMISCSTS); + rimiscsts &= ADF_GEN4_RIMISCSTS_BIT; + + dev_err(&GET_DEV(accel_dev), + "Command Parity error detected on IOSFP: 0x%x\n", + rimiscsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_RIMISCSTS, rimiscsts); + + return true; +} + +static void adf_gen4_process_errsou1(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_cpp_aeunc(accel_dev, csr, errsou); + *reset_required |= adf_handle_cppcmdparerr(accel_dev, csr, errsou); + *reset_required |= adf_handle_ri_mem_par_err(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_ci_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pullfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_pushfub_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_cd_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ti_trnsb_par_sts(accel_dev, csr, errsou); + *reset_required |= adf_handle_iosfp_cmd_parerr(accel_dev, csr, errsou); +} + +static bool adf_handle_uerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_UERRSSMSH); + reg &= ADF_GEN4_UERRSSMSH_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error on ssm shared memory: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_UERRSSMSH, reg); + + return false; +} + +static bool adf_handle_cerrssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CERRSSMSH); + reg &= ADF_GEN4_CERRSSMSH_ERROR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "Correctable error on ssm shared memory: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + ADF_CSR_WR(csr, ADF_GEN4_CERRSSMSH, reg); + + return false; +} + +static bool adf_handle_pperr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_PPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_PPERR); + reg &= ADF_GEN4_PPERR_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error CPP transaction on memory target: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_PPERR, reg); + + return false; +} + +static void adf_poll_slicehang_csr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 slice_hang_offset, + char *slice_name) +{ + u32 slice_hang_reg = ADF_CSR_RD(csr, slice_hang_offset); + + if (!slice_hang_reg) + return; + + dev_err(&GET_DEV(accel_dev), + "Slice %s hang error encountered\n", slice_name); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); +} + +static bool adf_handle_slice_hang_error(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT)) + return false; + + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_ATH_CPH, "ath_cph"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_CPR_XLT, "cpr_xlt"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS, "dcpr_ucs"); + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_PKE, "pke"); + + if (err_mask->parerr_wat_wcp_mask) + adf_poll_slicehang_csr(accel_dev, csr, + ADF_GEN4_SLICEHANGSTATUS_WAT_WCP, + "ath_cph"); + + return false; +} + +static bool adf_handle_spp_pullcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pulldata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP pull data err WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP, reg); + } + } + + return false; +} + +static bool adf_handle_spp_pushcmd_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + bool reset_required = false; + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS, reg); + + reset_required = true; + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error PKE: 0x%x\n", + reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_PKE, reg); + + reset_required = true; + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push command fatal error WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP, reg); + + reset_required = true; + } + } + + return reset_required; +} + +static bool adf_handle_spp_pushdata_err(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err ATH_CPH: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err CPR_XLT: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err DCPR_UCS: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err PKE: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + dev_err(&GET_DEV(accel_dev), + "SPP push data err WAT_WCP: 0x%x\n", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP, + reg); + } + } + + return false; +} + +static bool adf_handle_spppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + bool reset_required; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT)) + return false; + + reset_required = adf_handle_spp_pullcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pulldata_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushcmd_err(accel_dev, csr); + reset_required |= adf_handle_spp_pushdata_err(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_ssmcpppar_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg, bits_num = BITS_PER_REG(reg); + bool reset_required = false; + unsigned long errs_bits; + u32 bit_iterator; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMCPPERR); + reg &= ADF_GEN4_SSMCPPERR_FATAL_BITMASK | ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + if (reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SSM CPP parity error: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SSMCPPERR_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + reset_required = true; + } + + if (reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "non-Fatal SSM CPP parity error: 0x%x\n", reg); + errs_bits = reg & ADF_GEN4_SSMCPPERR_UNCERR_BITMASK; + + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + ADF_CSR_WR(csr, ADF_GEN4_SSMCPPERR, reg); + + return reset_required; +} + +static bool adf_handle_rf_parr_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + struct adf_dev_err_mask *err_mask = GET_ERR_MASK(accel_dev); + u32 reg; + + if (!(iastatssm & ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC); + reg &= ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_SRC, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH); + reg &= err_mask->parerr_ath_cph_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT); + reg &= err_mask->parerr_cpr_xlt_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS); + reg &= err_mask->parerr_dcpr_ucs_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS, reg); + } + + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE); + reg &= err_mask->parerr_pke_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_PKE, reg); + } + + if (err_mask->parerr_wat_wcp_mask) { + reg = ADF_CSR_RD(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP); + reg &= err_mask->parerr_wat_wcp_mask; + if (reg) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + ADF_CSR_WR(csr, ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP, + reg); + } + } + + dev_err(&GET_DEV(accel_dev), "Slice ssm soft parity error reported"); + + return false; +} + +static bool adf_handle_ser_err_ssmsh(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 iastatssm) +{ + u32 reg, bits_num = BITS_PER_REG(reg); + bool reset_required = false; + unsigned long errs_bits; + u32 bit_iterator; + + if (!(iastatssm & (ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT))) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_SER_ERR_SSMSH); + reg &= ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK | + ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + if (reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Fatal SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + } + + reset_required = true; + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "non-fatal SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + } + + if (reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK) { + dev_warn(&GET_DEV(accel_dev), + "Correctable SER_SSMSH_ERR: 0x%x\n", reg); + + errs_bits = reg & ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK; + for_each_set_bit(bit_iterator, &errs_bits, bits_num) { + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + } + + ADF_CSR_WR(csr, ADF_GEN4_SER_ERR_SSMSH, reg); + + return reset_required; +} + +static bool adf_handle_iaintstatssm(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 iastatssm = ADF_CSR_RD(csr, ADF_GEN4_IAINTSTATSSM); + bool reset_required; + + iastatssm &= ADF_GEN4_IAINTSTATSSM_BITMASK; + if (!iastatssm) + return false; + + reset_required = adf_handle_uerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_cerrssmsh(accel_dev, csr, iastatssm); + reset_required |= adf_handle_pperr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_slice_hang_error(accel_dev, csr, iastatssm); + reset_required |= adf_handle_spppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ssmcpppar_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_rf_parr_err(accel_dev, csr, iastatssm); + reset_required |= adf_handle_ser_err_ssmsh(accel_dev, csr, iastatssm); + + ADF_CSR_WR(csr, ADF_GEN4_IAINTSTATSSM, iastatssm); + + return reset_required; +} + +static bool adf_handle_exprpssmcmpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMCPR); + + reg &= ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK; + if (!reg) + return false; + + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM CMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMCPR, reg); + + return false; +} + +static bool adf_handle_exprpssmxlt(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMXLT); + + reg &= ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMXLT_CERR_BIT; + if (!reg) + return false; + + if (reg & ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM XLT: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMXLT_CERR_BIT) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM XLT: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMXLT, reg); + + return false; +} + +static bool adf_handle_exprpssmdcpr(struct adf_accel_dev *accel_dev, + void __iomem *csr) +{ + u32 reg; + int i; + + for (i = 0; i < ADF_GEN4_DCPR_SLICES_NUM; i++) { + reg = ADF_CSR_RD(csr, ADF_GEN4_EXPRPSSMDCPR(i)); + reg &= ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK | + ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK; + if (!reg) + continue; + + if (reg & ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK) { + dev_err(&GET_DEV(accel_dev), + "Uncorrectable error exception in SSM DCMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK) { + dev_warn(&GET_DEV(accel_dev), + "Correctable error exception in SSM DCMP: 0x%x", reg); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + } + + ADF_CSR_WR(csr, ADF_GEN4_EXPRPSSMDCPR(i), reg); + } + + return false; +} + +static bool adf_handle_ssm(struct adf_accel_dev *accel_dev, void __iomem *csr, + u32 errsou) +{ + bool reset_required; + + if (!(errsou & ADF_GEN4_ERRSOU2_SSM_ERR_BIT)) + return false; + + reset_required = adf_handle_iaintstatssm(accel_dev, csr); + reset_required |= adf_handle_exprpssmcmpr(accel_dev, csr); + reset_required |= adf_handle_exprpssmxlt(accel_dev, csr); + reset_required |= adf_handle_exprpssmdcpr(accel_dev, csr); + + return reset_required; +} + +static bool adf_handle_cpp_cfc_err(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 reg; + + if (!(errsou & ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT)) + return false; + + reg = ADF_CSR_RD(csr, ADF_GEN4_CPP_CFC_ERR_STATUS); + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: data parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: command parity: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } + + if (reg & ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT) { + dev_err(&GET_DEV(accel_dev), + "CPP_CFC_ERR: multiple errors: 0x%x", reg); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } + + ADF_CSR_WR(csr, ADF_GEN4_CPP_CFC_ERR_STATUS_CLR, + ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK); + + return reset_required; +} + +static void adf_gen4_process_errsou2(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou, + bool *reset_required) +{ + *reset_required |= adf_handle_ssm(accel_dev, csr, errsou); + *reset_required |= adf_handle_cpp_cfc_err(accel_dev, csr, errsou); +} + +static bool adf_handle_timiscsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 timiscsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TIMISCSTS_BIT)) + return false; + + timiscsts = ADF_CSR_RD(csr, ADF_GEN4_TIMISCSTS); + + dev_err(&GET_DEV(accel_dev), + "Fatal error in Transmit Interface: 0x%x\n", timiscsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + return true; +} + +static bool adf_handle_ricppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ricppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK)) + return false; + + ricppintsts = ADF_CSR_RD(csr, ADF_GEN4_RICPPINTSTS); + ricppintsts &= ADF_GEN4_RICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "RI CPP Uncorrectable Error: 0x%x\n", ricppintsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_RICPPINTSTS, ricppintsts); + + return false; +} + +static bool adf_handle_ticppintsts(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 ticppintsts; + + if (!(errsou & ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK)) + return false; + + ticppintsts = ADF_CSR_RD(csr, ADF_GEN4_TICPPINTSTS); + ticppintsts &= ADF_GEN4_TICPPINTSTS_BITMASK; + + dev_err(&GET_DEV(accel_dev), + "TI CPP Uncorrectable Error: 0x%x\n", ticppintsts); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_TICPPINTSTS, ticppintsts); + + return false; +} + +static bool adf_handle_aramcerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 aram_cerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT)) + return false; + + aram_cerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMCERR); + aram_cerr &= ADF_GEN4_REG_ARAMCERR_BIT; + + dev_warn(&GET_DEV(accel_dev), + "ARAM correctable error : 0x%x\n", aram_cerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_CORR); + + aram_cerr |= ADF_GEN4_REG_ARAMCERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMCERR, aram_cerr); + + return false; +} + +static bool adf_handle_aramuerr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 aramuerr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + aramuerr = ADF_CSR_RD(csr, ADF_GEN4_REG_ARAMUERR); + aramuerr &= ADF_GEN4_REG_ARAMUERR_ERROR_BIT | + ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT; + + if (!aramuerr) + return false; + + if (aramuerr & ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "ARAM multiple uncorrectable errors: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "ARAM uncorrectable error: 0x%x\n", aramuerr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + aramuerr |= ADF_GEN4_REG_ARAMUERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_ARAMUERR, aramuerr); + + return reset_required; +} + +static bool adf_handle_reg_cppmemtgterr(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + bool reset_required = false; + u32 cppmemtgterr; + + if (!(errsou & ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT)) + return false; + + cppmemtgterr = ADF_CSR_RD(csr, ADF_GEN4_REG_CPPMEMTGTERR); + cppmemtgterr &= ADF_GEN4_REG_CPPMEMTGTERR_BITMASK | + ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT; + if (!cppmemtgterr) + return false; + + if (cppmemtgterr & ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT) { + dev_err(&GET_DEV(accel_dev), + "Misc memory target multiple uncorrectable errors: 0x%x\n", + cppmemtgterr); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_FATAL); + + reset_required = true; + } else { + dev_err(&GET_DEV(accel_dev), + "Misc memory target uncorrectable error: 0x%x\n", cppmemtgterr); + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + } + + cppmemtgterr |= ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK; + + ADF_CSR_WR(csr, ADF_GEN4_REG_CPPMEMTGTERR, cppmemtgterr); + + return reset_required; +} + +static bool adf_handle_atufaultstatus(struct adf_accel_dev *accel_dev, + void __iomem *csr, u32 errsou) +{ + u32 i; + u32 max_rp_num = GET_HW_DATA(accel_dev)->num_banks; + + if (!(errsou & ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT)) + return false; + + for (i = 0; i < max_rp_num; i++) { + u32 atufaultstatus = ADF_CSR_RD(csr, ADF_GEN4_ATUFAULTSTATUS(i)); + + atufaultstatus &= ADF_GEN4_ATUFAULTSTATUS_BIT; + + if (atufaultstatus) { + dev_err(&GET_DEV(accel_dev), + "Ring Pair (%u) ATU detected fault: 0x%x\n", i, + atufaultstatus); + + ADF_RAS_ERR_CTR_INC(accel_dev->ras_errors, ADF_RAS_UNCORR); + + ADF_CSR_WR(csr, ADF_GEN4_ATUFAULTSTATUS(i), atufaultstatus); + } + } + + return false; +} + +static void adf_gen4_process_errsou3(struct adf_accel_dev *accel_dev, + void __iomem *csr, void __iomem *aram_csr, + u32 errsou, bool *reset_required) +{ + *reset_required |= adf_handle_timiscsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ricppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_ticppintsts(accel_dev, csr, errsou); + *reset_required |= adf_handle_aramcerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_aramuerr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_reg_cppmemtgterr(accel_dev, aram_csr, errsou); + *reset_required |= adf_handle_atufaultstatus(accel_dev, csr, errsou); +} + +static bool adf_gen4_handle_interrupt(struct adf_accel_dev *accel_dev, + bool *reset_required) +{ + void __iomem *aram_csr = adf_get_aram_base(accel_dev); + void __iomem *csr = adf_get_pmisc_base(accel_dev); + u32 errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU0); + bool handled = false; + + *reset_required = false; + + if (errsou & ADF_GEN4_ERRSOU0_BIT) { + adf_gen4_process_errsou0(accel_dev, csr); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU1); + if (errsou & ADF_GEN4_ERRSOU1_BITMASK) { + adf_gen4_process_errsou1(accel_dev, csr, errsou, reset_required); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU2); + if (errsou & ADF_GEN4_ERRSOU2_BITMASK) { + adf_gen4_process_errsou2(accel_dev, csr, errsou, reset_required); + handled = true; + } + + errsou = ADF_CSR_RD(csr, ADF_GEN4_ERRSOU3); + if (errsou & ADF_GEN4_ERRSOU3_BITMASK) { + adf_gen4_process_errsou3(accel_dev, csr, aram_csr, errsou, reset_required); + handled = true; + } + + return handled; +} + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops) +{ + ras_ops->enable_ras_errors = adf_gen4_enable_ras; + ras_ops->disable_ras_errors = adf_gen4_disable_ras; + ras_ops->handle_interrupt = adf_gen4_handle_interrupt; +} +EXPORT_SYMBOL_GPL(adf_gen4_init_ras_ops); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h new file mode 100644 index 0000000000000000000000000000000000000000..53352083cd12acfeec2ba9e415789f5ffef6972f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_ras.h @@ -0,0 +1,825 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_GEN4_RAS_H_ +#define ADF_GEN4_RAS_H_ + +#include + +struct adf_ras_ops; + +/* ERRSOU0 Correctable error mask*/ +#define ADF_GEN4_ERRSOU0_BIT BIT(0) + +/* HI AE Correctable error log */ +#define ADF_GEN4_HIAECORERRLOG_CPP0 0x41A308 + +/* HI AE Correctable error log enable */ +#define ADF_GEN4_HIAECORERRLOGENABLE_CPP0 0x41A318 +#define ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT BIT(0) +#define ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT BIT(1) +#define ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT BIT(2) +#define ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT BIT(3) +#define ADF_GEN4_ERRSOU1_RIMISCSTS_BIT BIT(4) + +#define ADF_GEN4_ERRSOU1_BITMASK ( \ + (ADF_GEN4_ERRSOU1_HIAEUNCERRLOG_CPP0_BIT) | \ + (ADF_GEN4_ERRSOU1_HICPPAGENTCMDPARERRLOG_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_TIMEM_PARERR_STS_BIT) | \ + (ADF_GEN4_ERRSOU1_RIMISCSTS_BIT)) + +/* HI AE Uncorrectable error log */ +#define ADF_GEN4_HIAEUNCERRLOG_CPP0 0x41A300 + +/* HI AE Uncorrectable error log enable */ +#define ADF_GEN4_HIAEUNCERRLOGENABLE_CPP0 0x41A320 + +/* HI CPP Agent Command parity error log */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOG 0x41A310 + +/* HI CPP Agent Command parity error logging enable */ +#define ADF_GEN4_HICPPAGENTCMDPARERRLOGENABLE 0x41A314 + +/* RI Memory parity error status register */ +#define ADF_GEN4_RIMEM_PARERR_STS 0x41B128 + +/* RI Memory parity error reporting enable */ +#define ADF_GEN4_RI_MEM_PAR_ERR_EN0 0x41B12C + +/* + * RI Memory parity error mask + * BIT(0) - BIT(3) - ri_iosf_pdata_rxq[0:3] parity error + * BIT(4) - ri_tlq_phdr parity error + * BIT(5) - ri_tlq_pdata parity error + * BIT(6) - ri_tlq_nphdr parity error + * BIT(7) - ri_tlq_npdata parity error + * BIT(8) - BIT(9) - ri_tlq_cplhdr[0:1] parity error + * BIT(10) - BIT(17) - ri_tlq_cpldata[0:7] parity error + * BIT(18) - set this bit to 1 to enable logging status to ri_mem_par_err_sts0 + * BIT(19) - ri_cds_cmd_fifo parity error + * BIT(20) - ri_obc_ricpl_fifo parity error + * BIT(21) - ri_obc_tiricpl_fifo parity error + * BIT(22) - ri_obc_cppcpl_fifo parity error + * BIT(23) - ri_obc_pendcpl_fifo parity error + * BIT(24) - ri_cpp_cmd_fifo parity error + * BIT(25) - ri_cds_ticmd_fifo parity error + * BIT(26) - riti_cmd_fifo parity error + * BIT(27) - ri_int_msixtbl parity error + * BIT(28) - ri_int_imstbl parity error + * BIT(30) - ri_kpt_fuses parity error + */ +#define ADF_GEN4_RIMEM_PARERR_STS_UNCERR_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | \ + BIT(7) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | \ + BIT(20) | BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | \ + BIT(26) | BIT(27) | BIT(28) | BIT(30)) + +#define ADF_GEN4_RIMEM_PARERR_STS_FATAL_BITMASK \ + (BIT(4) | BIT(6) | BIT(8) | BIT(9)) + +/* TI CI parity status */ +#define ADF_GEN4_TI_CI_PAR_STS 0x50060C + +/* TI CI parity reporting mask */ +#define ADF_GEN4_TI_CI_PAR_ERR_MASK 0x500608 + +/* + * TI CI parity status mask + * BIT(0) - CdCmdQ_sts patiry error status + * BIT(1) - CdDataQ_sts parity error status + * BIT(3) - CPP_SkidQ_sts parity error status + * BIT(7) - CPP_SkidQ_sc_sts parity error status + */ +#define ADF_GEN4_TI_CI_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(3) | BIT(7)) + +/* TI PULLFUB parity status */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS 0x500618 + +/* TI PULLFUB parity error reporting mask */ +#define ADF_GEN4_TI_PULL0FUB_PAR_ERR_MASK 0x500614 + +/* + * TI PULLFUB parity status mask + * BIT(0) - TrnPullReqQ_sts parity status + * BIT(1) - TrnSharedDataQ_sts parity status + * BIT(2) - TrnPullReqDataQ_sts parity status + * BIT(4) - CPP_CiPullReqQ_sts parity status + * BIT(5) - CPP_TrnPullReqQ_sts parity status + * BIT(6) - CPP_PullidQ_sts parity status + * BIT(7) - CPP_WaitDataQ_sts parity status + * BIT(8) - CPP_CdDataQ_sts parity status + * BIT(9) - CPP_TrnDataQP0_sts parity status + * BIT(10) - BIT(11) - CPP_TrnDataQRF[00:01]_sts parity status + * BIT(12) - CPP_TrnDataQP1_sts parity status + * BIT(13) - BIT(14) - CPP_TrnDataQRF[10:11]_sts parity status + */ +#define ADF_GEN4_TI_PULL0FUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | BIT(14)) + +/* TI PUSHUB parity status */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS 0x500630 + +/* TI PUSHFUB parity error reporting mask */ +#define ADF_GEN4_TI_PUSHFUB_PAR_ERR_MASK 0x50062C + +/* + * TI PUSHUB parity status mask + * BIT(0) - SbPushReqQ_sts parity status + * BIT(1) - BIT(2) - SbPushDataQ[0:1]_sts parity status + * BIT(4) - CPP_CdPushReqQ_sts parity status + * BIT(5) - BIT(6) - CPP_CdPushDataQ[0:1]_sts parity status + * BIT(7) - CPP_SbPushReqQ_sts parity status + * BIT(8) - CPP_SbPushDataQP_sts parity status + * BIT(9) - BIT(10) - CPP_SbPushDataQRF[0:1]_sts parity status + */ +#define ADF_GEN4_TI_PUSHFUB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(4) | BIT(5) | \ + BIT(6) | BIT(7) | BIT(8) | BIT(9) | BIT(10)) + +/* TI CD parity status */ +#define ADF_GEN4_TI_CD_PAR_STS 0x50063C + +/* TI CD parity error mask */ +#define ADF_GEN4_TI_CD_PAR_ERR_MASK 0x500638 + +/* + * TI CD parity status mask + * BIT(0) - BIT(15) - CtxMdRam[0:15]_sts parity status + * BIT(16) - Leaf2ClusterRam_sts parity status + * BIT(17) - BIT(18) - Ring2LeafRam[0:1]_sts parity status + * BIT(19) - VirtualQ_sts parity status + * BIT(20) - DtRdQ_sts parity status + * BIT(21) - DtWrQ_sts parity status + * BIT(22) - RiCmdQ_sts parity status + * BIT(23) - BypassQ_sts parity status + * BIT(24) - DtRdQ_sc_sts parity status + * BIT(25) - DtWrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_CD_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | BIT(13) | \ + BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25)) + +/* TI TRNSB parity status */ +#define ADF_GEN4_TI_TRNSB_PAR_STS 0x500648 + +/* TI TRNSB Parity error reporting mask */ +#define ADF_GEN4_TI_TRNSB_PAR_ERR_MASK 0x500644 + +/* + * TI TRNSB parity status mask + * BIT(0) - TrnPHdrQP_sts parity status + * BIT(1) - TrnPHdrQRF_sts parity status + * BIT(2) - TrnPDataQP_sts parity status + * BIT(3) - BIT(6) - TrnPDataQRF[0:3]_sts parity status + * BIT(7) - TrnNpHdrQP_sts parity status + * BIT(8) - BIT(9) - TrnNpHdrQRF[0:1]_sts parity status + * BIT(10) - TrnCplHdrQ_sts parity status + * BIT(11) - TrnPutObsReqQ_sts parity status + * BIT(12) - TrnPushReqQ_sts parity status + * BIT(13) - SbSplitIdRam_sts parity status + * BIT(14) - SbReqCountQ_sts parity status + * BIT(15) - SbCplTrkRam_sts parity status + * BIT(16) - SbGetObsReqQ_sts parity status + * BIT(17) - SbEpochIdQ_sts parity status + * BIT(18) - SbAtCplHdrQ_sts parity status + * BIT(19) - SbAtCplDataQ_sts parity status + * BIT(20) - SbReqCountRam_sts parity status + * BIT(21) - SbAtCplHdrQ_sc_sts parity status + */ +#define ADF_GEN4_TI_TRNSB_PAR_STS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6) | \ + BIT(7) | BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(12) | \ + BIT(13) | BIT(14) | BIT(15) | BIT(16) | BIT(17) | BIT(18) | \ + BIT(19) | BIT(20) | BIT(21)) + +/* Status register to log misc error on RI */ +#define ADF_GEN4_RIMISCSTS 0x41B1B8 + +/* Status control register to log misc RI error */ +#define ADF_GEN4_RIMISCCTL 0x41B1BC + +/* + * ERRSOU2 bit mask + * BIT(0) - SSM Interrupt Mask + * BIT(1) - CFC on CPP. ORed of CFC Push error and Pull error + * BIT(2) - BIT(4) - CPP attention interrupts, deprecated on gen4 devices + * BIT(18) - PM interrupt + */ +#define ADF_GEN4_ERRSOU2_SSM_ERR_BIT BIT(0) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT BIT(1) +#define ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK \ + (BIT(2) | BIT(3) | BIT(4)) + +#define ADF_GEN4_ERRSOU2_PM_INT_BIT BIT(18) + +#define ADF_GEN4_ERRSOU2_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT) + +#define ADF_GEN4_ERRSOU2_DIS_BITMASK \ + (ADF_GEN4_ERRSOU2_SSM_ERR_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ERR_STATUS_BIT | \ + ADF_GEN4_ERRSOU2_CPP_CFC_ATT_INT_BITMASK) + +#define ADF_GEN4_IAINTSTATSSM 0x28 + +/* IAINTSTATSSM error bit mask definitions */ +#define ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT BIT(0) +#define ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT BIT(1) +#define ADF_GEN4_IAINTSTATSSM_PPERR_BIT BIT(2) +#define ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT BIT(3) +#define ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT BIT(4) +#define ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT BIT(5) +#define ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT BIT(6) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT BIT(7) +#define ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT BIT(8) + +#define ADF_GEN4_IAINTSTATSSM_BITMASK \ + (ADF_GEN4_IAINTSTATSSM_UERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_CERRSSMSH_BIT | \ + ADF_GEN4_IAINTSTATSSM_PPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SLICEHANG_ERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SPPPARERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMCPPERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SSMSOFTERRORPARITY_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_CERR_BIT | \ + ADF_GEN4_IAINTSTATSSM_SER_ERR_SSMSH_UNCERR_BIT) + +#define ADF_GEN4_UERRSSMSH 0x18 + +/* + * UERRSSMSH error bit masks definitions + * + * BIT(0) - Indicates one uncorrectable error + * BIT(15) - Indicates multiple uncorrectable errors + * in device shared memory + */ +#define ADF_GEN4_UERRSSMSH_BITMASK (BIT(0) | BIT(15)) + +#define ADF_GEN4_UERRSSMSHAD 0x1C + +#define ADF_GEN4_CERRSSMSH 0x10 + +/* + * CERRSSMSH error bit + * BIT(0) - Indicates one correctable error + */ +#define ADF_GEN4_CERRSSMSH_ERROR_BIT BIT(0) + +#define ADF_GEN4_CERRSSMSHAD 0x14 + +/* SSM error handling features enable register */ +#define ADF_GEN4_SSMFEATREN 0x198 + +/* + * Disable SSM error detection and reporting features + * enabled by device driver on RAS initialization + * + * following bits should be cleared : + * BIT(4) - Disable parity for CPP parity + * BIT(12) - Disable logging push/pull data error in pperr register. + * BIT(16) - BIT(23) - Disable parity for SPPs + * BIT(24) - BIT(27) - Disable parity for SPPs, if it's supported on the device. + */ +#define ADF_GEN4_SSMFEATREN_DIS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(5) | BIT(6) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(11) | BIT(13) | BIT(14) | BIT(15)) + +#define ADF_GEN4_INTMASKSSM 0x0 + +/* + * Error reporting mask in INTMASKSSM + * BIT(0) - Shared memory uncorrectable interrupt mask + * BIT(1) - Shared memory correctable interrupt mask + * BIT(2) - PPERR interrupt mask + * BIT(3) - CPP parity error Interrupt mask + * BIT(4) - SSM interrupt generated by SER correctable error mask + * BIT(5) - SSM interrupt generated by SER uncorrectable error + * - not stop and scream - mask + */ +#define ADF_GEN4_INTMASKSSM_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5)) + +/* CPP push or pull error */ +#define ADF_GEN4_PPERR 0x8 + +#define ADF_GEN4_PPERR_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_PPERRID 0xC + +/* Slice hang handling related registers */ +#define ADF_GEN4_SLICEHANGSTATUS_ATH_CPH 0x84 +#define ADF_GEN4_SLICEHANGSTATUS_CPR_XLT 0x88 +#define ADF_GEN4_SLICEHANGSTATUS_DCPR_UCS 0x90 +#define ADF_GEN4_SLICEHANGSTATUS_WAT_WCP 0x8C +#define ADF_GEN4_SLICEHANGSTATUS_PKE 0x94 + +#define ADF_GEN4_SHINTMASKSSM_ATH_CPH 0xF0 +#define ADF_GEN4_SHINTMASKSSM_CPR_XLT 0xF4 +#define ADF_GEN4_SHINTMASKSSM_DCPR_UCS 0xFC +#define ADF_GEN4_SHINTMASKSSM_WAT_WCP 0xF8 +#define ADF_GEN4_SHINTMASKSSM_PKE 0x100 + +/* SPP pull cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLCMDPARERR_ATH_CPH 0x1A4 +#define ADF_GEN4_SPPPULLCMDPARERR_CPR_XLT 0x1A8 +#define ADF_GEN4_SPPPULLCMDPARERR_DCPR_UCS 0x1B0 +#define ADF_GEN4_SPPPULLCMDPARERR_PKE 0x1B4 +#define ADF_GEN4_SPPPULLCMDPARERR_WAT_WCP 0x1AC + +/* SPP pull data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPULLDATAPARERR_ATH_CPH 0x1BC +#define ADF_GEN4_SPPPULLDATAPARERR_CPR_XLT 0x1C0 +#define ADF_GEN4_SPPPULLDATAPARERR_DCPR_UCS 0x1C8 +#define ADF_GEN4_SPPPULLDATAPARERR_PKE 0x1CC +#define ADF_GEN4_SPPPULLDATAPARERR_WAT_WCP 0x1C4 + +/* SPP push cmd parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHCMDPARERR_ATH_CPH 0x1D4 +#define ADF_GEN4_SPPPUSHCMDPARERR_CPR_XLT 0x1D8 +#define ADF_GEN4_SPPPUSHCMDPARERR_DCPR_UCS 0x1E0 +#define ADF_GEN4_SPPPUSHCMDPARERR_PKE 0x1E4 +#define ADF_GEN4_SPPPUSHCMDPARERR_WAT_WCP 0x1DC + +/* SPP push data parity err_*slice* CSR */ +#define ADF_GEN4_SPPPUSHDATAPARERR_ATH_CPH 0x1EC +#define ADF_GEN4_SPPPUSHDATAPARERR_CPR_XLT 0x1F0 +#define ADF_GEN4_SPPPUSHDATAPARERR_DCPR_UCS 0x1F8 +#define ADF_GEN4_SPPPUSHDATAPARERR_PKE 0x1FC +#define ADF_GEN4_SPPPUSHDATAPARERR_WAT_WCP 0x1F4 + +/* Accelerator SPP parity error mask registers */ +#define ADF_GEN4_SPPPARERRMSK_ATH_CPH 0x204 +#define ADF_GEN4_SPPPARERRMSK_CPR_XLT 0x208 +#define ADF_GEN4_SPPPARERRMSK_DCPR_UCS 0x210 +#define ADF_GEN4_SPPPARERRMSK_PKE 0x214 +#define ADF_GEN4_SPPPARERRMSK_WAT_WCP 0x20C + +#define ADF_GEN4_SSMCPPERR 0x224 + +/* + * Uncorrectable error mask in SSMCPPERR + * BIT(0) - indicates CPP command parity error + * BIT(1) - indicates CPP Main Push PPID parity error + * BIT(2) - indicates CPP Main ePPID parity error + * BIT(3) - indicates CPP Main push data parity error + * BIT(4) - indicates CPP Main Pull PPID parity error + * BIT(5) - indicates CPP target pull data parity error + */ +#define ADF_GEN4_SSMCPPERR_FATAL_BITMASK \ + (BIT(0) | BIT(1) | BIT(4)) + +#define ADF_GEN4_SSMCPPERR_UNCERR_BITMASK \ + (BIT(2) | BIT(3) | BIT(5)) + +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC 0x9C +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_SRC 0xB8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_ATH_CPH 0xA0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_ATH_CPH 0xBC + +#define ADF_GEN4_SSMSOFTERRORPARITY_CPR_XLT 0xA4 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_CPR_XLT 0xC0 + +#define ADF_GEN4_SSMSOFTERRORPARITY_DCPR_UCS 0xAC +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_DCPR_UCS 0xC8 + +#define ADF_GEN4_SSMSOFTERRORPARITY_PKE 0xB0 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_PKE 0xCC + +#define ADF_GEN4_SSMSOFTERRORPARITY_WAT_WCP 0xA8 +#define ADF_GEN4_SSMSOFTERRORPARITYMASK_WAT_WCP 0xC4 + +/* RF parity error detected in SharedRAM */ +#define ADF_GEN4_SSMSOFTERRORPARITY_SRC_BIT BIT(0) + +#define ADF_GEN4_SER_ERR_SSMSH 0x44C + +/* + * Fatal error mask in SER_ERR_SSMSH + * BIT(0) - Indicates an uncorrectable error has occurred in the + * accelerator controller command RFs + * BIT(2) - Parity error occurred in the bank SPP fifos + * BIT(3) - Indicates Parity error occurred in following fifos in + * the design + * BIT(4) - Parity error occurred in flops in the design + * BIT(5) - Uncorrectable error has occurred in the + * target push and pull data register flop + * BIT(7) - Indicates Parity error occurred in the Resource Manager + * pending lock request fifos + * BIT(8) - Indicates Parity error occurred in the Resource Manager + * MECTX command queues logic + * BIT(9) - Indicates Parity error occurred in the Resource Manager + * MECTX sigdone fifo flops + * BIT(10) - Indicates an uncorrectable error has occurred in the + * Resource Manager MECTX command RFs + * BIT(14) - Parity error occurred in Buffer Manager sigdone FIFO + */ + #define ADF_GEN4_SER_ERR_SSMSH_FATAL_BITMASK \ + (BIT(0) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(7) | \ + BIT(8) | BIT(9) | BIT(10) | BIT(14)) + +/* + * Uncorrectable error mask in SER_ERR_SSMSH + * BIT(12) Parity error occurred in Buffer Manager pool 0 + * BIT(13) Parity error occurred in Buffer Manager pool 1 + */ +#define ADF_GEN4_SER_ERR_SSMSH_UNCERR_BITMASK \ + (BIT(12) | BIT(13)) + +/* + * Correctable error mask in SER_ERR_SSMSH + * BIT(1) - Indicates a correctable Error has occurred + * in the slice controller command RFs + * BIT(6) - Indicates a correctable Error has occurred in + * the target push and pull data RFs + * BIT(11) - Indicates an correctable Error has occurred in + * the Resource Manager MECTX command RFs + */ +#define ADF_GEN4_SER_ERR_SSMSH_CERR_BITMASK \ + (BIT(1) | BIT(6) | BIT(11)) + +/* SSM shared memory SER error reporting mask */ +#define ADF_GEN4_SER_EN_SSMSH 0x450 + +/* + * SSM SER error reporting mask in SER_en_err_ssmsh + * BIT(0) - Enables uncorrectable Error detection in : + * 1) slice controller command RFs. + * 2) target push/pull data registers + * BIT(1) - Enables correctable Error detection in : + * 1) slice controller command RFs + * 2) target push/pull data registers + * BIT(2) - Enables Parity error detection in + * 1) bank SPP fifos + * 2) gen4_pull_id_queue + * 3) gen4_push_id_queue + * 4) AE_pull_sigdn_fifo + * 5) DT_push_sigdn_fifo + * 6) slx_push_sigdn_fifo + * 7) secure_push_cmd_fifo + * 8) secure_pull_cmd_fifo + * 9) Head register in FIFO wrapper + * 10) current_cmd in individual push queue + * 11) current_cmd in individual pull queue + * 12) push_command_rxp arbitrated in ssm_push_cmd_queues + * 13) pull_command_rxp arbitrated in ssm_pull_cmd_queues + * BIT(3) - Enables uncorrectable Error detection in + * the resource manager mectx cmd RFs. + * BIT(4) - Enables correctable error detection in the Resource Manager + * mectx command RFs + * BIT(5) - Enables Parity error detection in + * 1) resource manager lock request fifo + * 2) mectx cmdqueues logic + * 3) mectx sigdone fifo + * BIT(6) - Enables Parity error detection in Buffer Manager pools + * and sigdone fifo + */ +#define ADF_GEN4_SER_EN_SSMSH_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_CPP_CFC_ERR_STATUS 0x640C04 + +/* + * BIT(1) - Indicates multiple CPP CFC errors + * BIT(7) - Indicates CPP CFC command parity error type + * BIT(8) - Indicated CPP CFC data parity error type + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_MERR_BIT BIT(1) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CMDPAR_BIT BIT(7) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_DATAPAR_BIT BIT(8) + +/* + * BIT(0) - Enables CFC to detect and log push/pull data error + * BIT(1) - Enables CFC to generate interrupt to PCIEP for CPP error + * BIT(4) - When 1 Parity detection is disabled + * BIT(5) - When 1 Parity detection is disabled on CPP command bus + * BIT(6) - When 1 Parity detection is disabled on CPP push/pull bus + * BIT(9) - When 1 RF parity error detection is disabled + */ +#define ADF_GEN4_CPP_CFC_ERR_CTRL_BITMASK (BIT(0) | BIT(1)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL_DIS_BITMASK \ + (BIT(4) | BIT(5) | BIT(6) | BIT(9) | BIT(10)) + +#define ADF_GEN4_CPP_CFC_ERR_CTRL 0x640C00 + +/* + * BIT(0) - Clears bit(0) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when an error is reported on CPP + * BIT(1) - Clears bit(1) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when multiple errors are reported on CPP + * BIT(2) - Clears bit(2) of ADF_GEN4_CPP_CFC_ERR_STATUS + * when attention interrupt is reported + */ +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR_BITMASK (BIT(0) | BIT(1) | BIT(2)) +#define ADF_GEN4_CPP_CFC_ERR_STATUS_CLR 0x640C08 + +#define ADF_GEN4_CPP_CFC_ERR_PPID_LO 0x640C0C +#define ADF_GEN4_CPP_CFC_ERR_PPID_HI 0x640C10 + +/* Exception reporting in QAT SSM CMP */ +#define ADF_GEN4_EXPRPSSMCPR 0x2000 + +/* + * Uncorrectable error mask in EXPRPSSMCPR + * BIT(2) - Hard fatal error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in CPR Hash Table + * BIT(19) - Parity error detected in CPR History Buffer Copy 0 + * BIT(20) - Parity error detected in CPR History Buffer Copy 1 + * BIT(21) - Parity error detected in CPR History Buffer Copy 2 + * BIT(22) - Parity error detected in CPR History Buffer Copy 3 + * BIT(23) - Parity error detected in CPR History Buffer Copy 4 + * BIT(24) - Parity error detected in CPR History Buffer Copy 5 + * BIT(25) - Parity error detected in CPR History Buffer Copy 6 + * BIT(26) - Parity error detected in CPR History Buffer Copy 7 + */ +#define ADF_GEN4_EXPRPSSMCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | \ + BIT(21) | BIT(22) | BIT(23) | BIT(24) | BIT(25) | BIT(26)) + +/* Exception reporting in QAT SSM XLT */ +#define ADF_GEN4_EXPRPSSMXLT 0xA000 + +/* + * Uncorrectable error mask in EXPRPSSMXLT + * BIT(2) - If set, an Uncorrectable Error event occurred + * BIT(16) - Parity error detected in XLT Push FIFO + * BIT(17) - Parity error detected in XLT Pull FIFO + * BIT(18) - Parity error detected in XLT HCTB0 + * BIT(19) - Parity error detected in XLT HCTB1 + * BIT(20) - Parity error detected in XLT HCTB2 + * BIT(21) - Parity error detected in XLT HCTB3 + * BIT(22) - Parity error detected in XLT CBCL + * BIT(23) - Parity error detected in XLT LITPTR + */ +#define ADF_GEN4_EXPRPSSMXLT_UNCERR_BITMASK \ + (BIT(2) | BIT(16) | BIT(17) | BIT(18) | BIT(19) | BIT(20) | BIT(21) | \ + BIT(22) | BIT(23)) + +/* + * Correctable error mask in EXPRPSSMXLT + * BIT(3) - Correctable error event occurred. + */ +#define ADF_GEN4_EXPRPSSMXLT_CERR_BIT BIT(3) + +/* Exception reporting in QAT SSM DCMP */ +#define ADF_GEN4_EXPRPSSMDCPR(_n_) (0x12000 + (_n_) * 0x80) + +/* + * Uncorrectable error mask in EXPRPSSMDCPR + * BIT(2) - Even hard fatal error + * BIT(4) - Odd hard fatal error + * BIT(6) - decode soft error + * BIT(16) - Parity error detected in CPR Push FIFO + * BIT(17) - Parity error detected in CPR Pull FIFO + * BIT(18) - Parity error detected in the Input Buffer + * BIT(19) - symbuf0parerr + * Parity error detected in CPR Push FIFO + * BIT(20) - symbuf1parerr + * Parity error detected in CPR Push FIFO + */ +#define ADF_GEN4_EXPRPSSMDCPR_UNCERR_BITMASK \ + (BIT(2) | BIT(4) | BIT(6) | BIT(16) | BIT(17) | \ + BIT(18) | BIT(19) | BIT(20)) + +/* + * Correctable error mask in EXPRPSSMDCPR + * BIT(3) - Even ecc correctable error + * BIT(5) - Odd ecc correctable error + */ +#define ADF_GEN4_EXPRPSSMDCPR_CERR_BITMASK (BIT(3) | BIT(5)) + +#define ADF_GEN4_DCPR_SLICES_NUM 3 + +/* + * ERRSOU3 bit masks + * BIT(0) - indicates error Response Order Overflow and/or BME error + * BIT(1) - indicates RI push/pull error + * BIT(2) - indicates TI push/pull error + * BIT(3) - indicates ARAM correctable error + * BIT(4) - indicates ARAM uncorrectable error + * BIT(5) - indicates TI pull parity error + * BIT(6) - indicates RI push parity error + * BIT(7) - indicates VFLR interrupt + * BIT(8) - indicates ring pair interrupts for ATU detected fault + * BIT(9) - indicates error when accessing RLT block + */ +#define ADF_GEN4_ERRSOU3_TIMISCSTS_BIT BIT(0) +#define ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK (BIT(1) | BIT(6)) +#define ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK (BIT(2) | BIT(5)) +#define ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT BIT(3) +#define ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT BIT(4) +#define ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT BIT(7) +#define ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT BIT(8) +#define ADF_GEN4_ERRSOU3_RLTERROR_BIT BIT(9) + +#define ADF_GEN4_ERRSOU3_BITMASK ( \ + (ADF_GEN4_ERRSOU3_TIMISCSTS_BIT) | \ + (ADF_GEN4_ERRSOU3_RICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_TICPPINTSTS_BITMASK) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMCERR_BIT) | \ + (ADF_GEN4_ERRSOU3_REG_ARAMUERR_BIT) | \ + (ADF_GEN4_ERRSOU3_VFLRNOTIFY_BIT) | \ + (ADF_GEN4_ERRSOU3_ATUFAULTSTATUS_BIT) | \ + (ADF_GEN4_ERRSOU3_RLTERROR_BIT)) + +/* TI Misc status register */ +#define ADF_GEN4_TIMISCSTS 0x50054C + +/* TI Misc error reporting mask */ +#define ADF_GEN4_TIMISCCTL 0x500548 + +/* + * TI Misc error reporting control mask + * BIT(0) - Enables error detection and logging in TIMISCSTS register + * BIT(1) - It has effect only when SRIOV enabled, this bit is 0 by default + * BIT(2) - Enables the D-F-x counter within the dispatch arbiter + * to start based on the command triggered from + * BIT(30) - Disables VFLR functionality + * By setting this bit will revert to CPM1.x functionality + * bits 1, 2 and 30 value should be preserved and not meant to be changed + * within RAS. + */ +#define ADF_GEN4_TIMISCCTL_BIT BIT(0) +#define ADF_GEN4_TIMSCCTL_RELAY_BITMASK (BIT(1) | BIT(2) | BIT(30)) + +/* RI CPP interface status register */ +#define ADF_GEN4_RICPPINTSTS 0x41A330 + +/* + * Uncorrectable error mask in RICPPINTSTS register + * BIT(0) - RI asserted the CPP error signal during a push + * BIT(1) - RI detected the CPP error signal asserted during a pull + * BIT(2) - RI detected a push data parity error + * BIT(3) - RI detected a push valid parity error + */ +#define ADF_GEN4_RICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3)) + +/* RI CPP interface status register control */ +#define ADF_GEN4_RICPPINTCTL 0x41A32C + +/* + * Control bit mask for RICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting + * on the RI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting + * on the RI CPP Pull interface + * BIT(2) - value of 1 enables error detection and reporting + * on the RI Parity + * BIT(3) - value of 1 enable checking parity on CPP + * BIT(4) - value of 1 enables the stop feature of the stop and stream + * for all RI CPP Command RFs + */ +#define ADF_GEN4_RICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPUSHID 0x41A334 + +/* Pull ID of the command which triggered the transaction error on RI */ +#define ADF_GEN4_RIERRPULLID 0x41A338 + +/* TI CPP interface status register */ +#define ADF_GEN4_TICPPINTSTS 0x50053C + +/* + * Uncorrectable error mask in TICPPINTSTS register + * BIT(0) - value of 1 indicates that the TI asserted + * the CPP error signal during a push + * BIT(1) - value of 1 indicates that the TI detected + * the CPP error signal asserted during a pull + * BIT(2) - value of 1 indicates that the TI detected + * a pull data parity error + */ +#define ADF_GEN4_TICPPINTSTS_BITMASK \ + (BIT(0) | BIT(1) | BIT(2)) + +/* TI CPP interface status register control */ +#define ADF_GEN4_TICPPINTCTL 0x500538 + +/* + * Control bit mask for TICPPINTCTL register + * BIT(0) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(1) - value of 1 enables error detection and reporting on + * the TI CPP Push interface + * BIT(2) - value of 1 enables parity error detection and logging on + * the TI CPP Pull interface + * BIT(3) - value of 1 enables CPP CMD and Pull Data parity checking + * BIT(4) - value of 1 enables TI stop part of stop and scream mode on + * CPP/RF Parity error + */ +#define ADF_GEN4_TICPPINTCTL_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4)) + +/* Push ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPUSHID 0x500540 + +/* Pull ID of the command which triggered the transaction error on TI */ +#define ADF_GEN4_TIERRPULLID 0x500544 + +/* Correctable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERR 0x1700 + +#define ADF_GEN4_REG_ARAMCERR_BIT BIT(0) + +/* + * Correctable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log correctable error + * BIT(26) - enables ARAM agent to generate interrupt for correctable error + */ +#define ADF_GEN4_REG_ARAMCERR_EN_BITMASK (BIT(3) | BIT(26)) + +/* Correctable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMCERRAD 0x1708 + +/* Uncorrectable error in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERR 0x1704 + +/* + * ARAM error bit mask + * BIT(0) - indicates error logged in ARAMCERR or ARAMUCERR + * BIT(18) - indicates uncorrectable multiple errors in ARAM agent + */ +#define ADF_GEN4_REG_ARAMUERR_ERROR_BIT BIT(0) +#define ADF_GEN4_REG_ARAMUERR_MULTI_ERRORS_BIT BIT(18) + +/* + * Uncorrectable error enablement in ARAM bit mask + * BIT(3) - enable ARAM RAM to fix and log uncorrectable error + * BIT(19) - enables ARAM agent to generate interrupt for uncorrectable error + */ +#define ADF_GEN4_REG_ARAMUERR_EN_BITMASK (BIT(3) | BIT(19)) + +/* Unorrectable error address in ARAM agent register */ +#define ADF_GEN4_REG_ARAMUERRAD 0x170C + +/* Uncorrectable error transaction push/pull ID registers*/ +#define ADF_GEN4_REG_ERRPPID_LO 0x1714 +#define ADF_GEN4_REG_ERRPPID_HI 0x1718 + +/* ARAM ECC block error enablement */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN 0x1808 + +/* + * ARAM ECC block error control bit masks + * BIT(0) - enable ARAM CD ECC block error detecting + * BIT(1) - enable ARAM pull request ECC error detecting + * BIT(2) - enable ARAM command dispatch ECC error detecting + * BIT(3) - enable ARAM read datapath push ECC error detecting + * BIT(4) - enable ARAM read datapath pull ECC error detecting + * BIT(5) - enable ARAM RMW ECC error detecting + * BIT(6) - enable ARAM write datapath RMW ECC error detecting + * BIT(7) - enable ARAM write datapath ECC error detecting + */ +#define ADF_GEN4_REG_ARAMCERRUERR_EN_BITMASK \ + (BIT(0) | BIT(1) | BIT(2) | BIT(3) | BIT(4) | \ + BIT(5) | BIT(6) | BIT(7)) + +/* ARAM misc memory target error registers*/ +#define ADF_GEN4_REG_CPPMEMTGTERR 0x1710 + +/* + * ARAM misc memory target error bit masks + * BIT(0) - indicates an error in ARAM target memory + * BIT(1) - indicates multiple errors in ARAM target memory + * BIT(4) - indicates pull error in ARAM target memory + * BIT(5) - indicates parity pull error in ARAM target memory + * BIT(6) - indicates push error in ARAM target memory + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_BITMASK \ + (BIT(0) | BIT(4) | BIT(5) | BIT(6)) + +#define ADF_GEN4_REG_CPPMEMTGTERR_MULTI_ERRORS_BIT BIT(1) + +/* + * ARAM misc memory target error enablement mask + * BIT(2) - enables CPP memory to detect and log push/pull data error + * BIT(7) - enables push/pull error to generate interrupts to RI + * BIT(8) - enables ARAM to check parity on pull data and CPP command buses + * BIT(9) - enables ARAM to autopush to AE when push/parity error is detected + * on lookaside DT + */ +#define ADF_GEN4_REG_CPPMEMTGTERR_EN_BITMASK \ + (BIT(2) | BIT(7) | BIT(8) | BIT(9)) + +/* ATU fault status register */ +#define ADF_GEN4_ATUFAULTSTATUS(i) (0x506000 + ((i) * 0x4)) + +#define ADF_GEN4_ATUFAULTSTATUS_BIT BIT(0) + +/* Command Parity error detected on IOSFP Command to QAT */ +#define ADF_GEN4_RIMISCSTS_BIT BIT(0) + +void adf_gen4_init_ras_ops(struct adf_ras_ops *ras_ops); + +#endif /* ADF_GEN4_RAS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c index 646c57922fcda5c1db50d4c0cb416936bd2ec7dd..35ccb91d6ec1b9060d368bc71a93e68bed77217c 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_timer.c @@ -9,6 +9,7 @@ #include #include +#include "adf_admin.h" #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "adf_gen4_timer.h" diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c new file mode 100644 index 0000000000000000000000000000000000000000..7fc7a77f6aed93e3d56efea23d94ae5a3b5a995f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#include +#include + +#include "adf_gen4_tl.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define ADF_GEN4_TL_DEV_REG_OFF(reg) ADF_TL_DEV_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_RP_REG_OFF(reg) ADF_TL_RP_REG_OFF(reg, gen4) + +#define ADF_GEN4_TL_SL_UTIL_COUNTER(_name) \ + ADF_TL_COUNTER("util_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_util, gen4)) + +#define ADF_GEN4_TL_SL_EXEC_COUNTER(_name) \ + ADF_TL_COUNTER("exec_" #_name, \ + ADF_TL_SIMPLE_COUNT, \ + ADF_TL_SLICE_REG_OFF(_name, reg_tm_slice_exec_cnt, gen4)) + +/* Device level counters. */ +static const struct adf_tl_dbg_counter dev_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_pci_trans_cnt)), + /* Max read latency[ns]. */ + ADF_TL_COUNTER(MAX_RD_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_max)), + /* Read latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(RD_LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_rd_cmpl_cnt)), + /* Max get to put latency[ns]. */ + ADF_TL_COUNTER(MAX_LAT_NAME, ADF_TL_COUNTER_NS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_max)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_bw_out)), + /* Page request latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(PAGE_REQ_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_page_req_cnt)), + /* Page translation latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(AT_TRANS_LAT_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_acc), + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_trans_lat_cnt)), + /* Maximum uTLB used. */ + ADF_TL_COUNTER(AT_MAX_UTLB_USED_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_DEV_REG_OFF(reg_tl_at_max_tlb_used)), +}; + +/* Slice utilization counters. */ +static const struct adf_tl_dbg_counter sl_util_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cpr), + /* Translator slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(xlt), + /* Decompression slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(dcpr), + /* PKE utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(pke), + /* Wireless Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wat), + /* Wireless Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(wcp), + /* UCS slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ucs), + /* Cipher slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(cph), + /* Authentication slice utilization. */ + ADF_GEN4_TL_SL_UTIL_COUNTER(ath), +}; + +/* Slice execution counters. */ +static const struct adf_tl_dbg_counter sl_exec_counters[ADF_TL_SL_CNT_COUNT] = { + /* Compression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cpr), + /* Translator slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(xlt), + /* Decompression slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(dcpr), + /* PKE execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(pke), + /* Wireless Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wat), + /* Wireless Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(wcp), + /* UCS slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ucs), + /* Cipher slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(cph), + /* Authentication slice execution count. */ + ADF_GEN4_TL_SL_EXEC_COUNTER(ath), +}; + +/* Ring pair counters. */ +static const struct adf_tl_dbg_counter rp_counters[] = { + /* PCIe partial transactions. */ + ADF_TL_COUNTER(PCI_TRANS_CNT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_pci_trans_cnt)), + /* Get to put latency average[ns]. */ + ADF_TL_COUNTER_LATENCY(LAT_ACC_NAME, ADF_TL_COUNTER_NS_AVG, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_gp_lat_acc), + ADF_GEN4_TL_RP_REG_OFF(reg_tl_ae_put_cnt)), + /* PCIe write bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_IN_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_in)), + /* PCIe read bandwidth[Mbps]. */ + ADF_TL_COUNTER(BW_OUT_NAME, ADF_TL_COUNTER_MBPS, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_bw_out)), + /* Message descriptor DevTLB hit rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_hit)), + /* Message descriptor DevTLB miss rate. */ + ADF_TL_COUNTER(AT_GLOB_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_glob_devtlb_miss)), + /* Payload DevTLB hit rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_HIT_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_hit)), + /* Payload DevTLB miss rate. */ + ADF_TL_COUNTER(AT_PAYLD_DTLB_MISS_NAME, ADF_TL_SIMPLE_COUNT, + ADF_GEN4_TL_RP_REG_OFF(reg_tl_at_payld_devtlb_miss)), +}; + +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ + tl_data->layout_sz = ADF_GEN4_TL_LAYOUT_SZ; + tl_data->slice_reg_sz = ADF_GEN4_TL_SLICE_REG_SZ; + tl_data->rp_reg_sz = ADF_GEN4_TL_RP_REG_SZ; + tl_data->num_hbuff = ADF_GEN4_TL_NUM_HIST_BUFFS; + tl_data->max_rp = ADF_GEN4_TL_MAX_RP_NUM; + tl_data->msg_cnt_off = ADF_GEN4_TL_MSG_CNT_OFF; + tl_data->cpp_ns_per_cycle = ADF_GEN4_CPP_NS_PER_CYCLE; + tl_data->bw_units_to_bytes = ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES; + + tl_data->dev_counters = dev_counters; + tl_data->num_dev_counters = ARRAY_SIZE(dev_counters); + tl_data->sl_util_counters = sl_util_counters; + tl_data->sl_exec_counters = sl_exec_counters; + tl_data->rp_counters = rp_counters; + tl_data->num_rp_counters = ARRAY_SIZE(rp_counters); +} +EXPORT_SYMBOL_GPL(adf_gen4_init_tl_data); diff --git a/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h new file mode 100644 index 0000000000000000000000000000000000000000..32df4163beb9f098664b5be8fcd3b0440a5db53c --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_gen4_tl.h @@ -0,0 +1,158 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_GEN4_TL_H +#define ADF_GEN4_TL_H + +#include +#include + +struct adf_tl_hw_data; + +/* Computation constants. */ +#define ADF_GEN4_CPP_NS_PER_CYCLE 2 +#define ADF_GEN4_TL_BW_HW_UNITS_TO_BYTES 64 + +/* Maximum aggregation time. Value in milliseconds. */ +#define ADF_GEN4_TL_MAX_AGGR_TIME_MS 4000 +/* Num of buffers to store historic values. */ +#define ADF_GEN4_TL_NUM_HIST_BUFFS \ + (ADF_GEN4_TL_MAX_AGGR_TIME_MS / ADF_TL_DATA_WR_INTERVAL_MS) + +/* Max number of HW resources of one type. */ +#define ADF_GEN4_TL_MAX_SLICES_PER_TYPE 24 + +/* Max number of simultaneously monitored ring pairs. */ +#define ADF_GEN4_TL_MAX_RP_NUM 4 + +/** + * struct adf_gen4_tl_slice_data_regs - HW slice data as populated by FW. + * @reg_tm_slice_exec_cnt: Slice execution count. + * @reg_tm_slice_util: Slice utilization. + */ +struct adf_gen4_tl_slice_data_regs { + __u32 reg_tm_slice_exec_cnt; + __u32 reg_tm_slice_util; +}; + +#define ADF_GEN4_TL_SLICE_REG_SZ sizeof(struct adf_gen4_tl_slice_data_regs) + +/** + * struct adf_gen4_tl_device_data_regs - This structure stores device telemetry + * counter values as are being populated periodically by device. + * @reg_tl_rd_lat_acc: read latency accumulator + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reg_tl_at_page_req_lat_acc: AT/DevTLB page request latency accumulator + * @reg_tl_at_trans_lat_acc: DevTLB transaction latency accumulator + * @reg_tl_re_acc: accumulated ring empty time + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_rd_lat_max: maximum logged read latency + * @reg_tl_rd_cmpl_cnt: read requests completed count + * @reg_tl_gp_lat_max: maximum logged get to put latency + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_page_req_cnt: DevTLB page requests count + * @reg_tl_at_trans_lat_cnt: DevTLB transaction latency samples count + * @reg_tl_at_max_tlb_used: maximum uTLB used + * @reg_tl_re_cnt: ring empty time samples count + * @reserved: reserved + * @ath_slices: array of Authentication slices utilization registers + * @cph_slices: array of Cipher slices utilization registers + * @cpr_slices: array of Compression slices utilization registers + * @xlt_slices: array of Translator slices utilization registers + * @dcpr_slices: array of Decompression slices utilization registers + * @pke_slices: array of PKE slices utilization registers + * @ucs_slices: array of UCS slices utilization registers + * @wat_slices: array of Wireless Authentication slices utilization registers + * @wcp_slices: array of Wireless Cipher slices utilization registers + */ +struct adf_gen4_tl_device_data_regs { + __u64 reg_tl_rd_lat_acc; + __u64 reg_tl_gp_lat_acc; + __u64 reg_tl_at_page_req_lat_acc; + __u64 reg_tl_at_trans_lat_acc; + __u64 reg_tl_re_acc; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_rd_lat_max; + __u32 reg_tl_rd_cmpl_cnt; + __u32 reg_tl_gp_lat_max; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_page_req_cnt; + __u32 reg_tl_at_trans_lat_cnt; + __u32 reg_tl_at_max_tlb_used; + __u32 reg_tl_re_cnt; + __u32 reserved; + struct adf_gen4_tl_slice_data_regs ath_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cph_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs cpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs xlt_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs dcpr_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs pke_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs ucs_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wat_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; + struct adf_gen4_tl_slice_data_regs wcp_slices[ADF_GEN4_TL_MAX_SLICES_PER_TYPE]; +}; + +/** + * struct adf_gen4_tl_ring_pair_data_regs - This structure stores Ring Pair + * telemetry counter values as are being populated periodically by device. + * @reg_tl_gp_lat_acc: get-put latency accumulator + * @reserved: reserved + * @reg_tl_pci_trans_cnt: PCIe partial transactions + * @reg_tl_ae_put_cnt: Accelerator Engine put counts across all rings + * @reg_tl_bw_in: PCIe write bandwidth + * @reg_tl_bw_out: PCIe read bandwidth + * @reg_tl_at_glob_devtlb_hit: Message descriptor DevTLB hit rate + * @reg_tl_at_glob_devtlb_miss: Message descriptor DevTLB miss rate + * @reg_tl_at_payld_devtlb_hit: Payload DevTLB hit rate + * @reg_tl_at_payld_devtlb_miss: Payload DevTLB miss rate + * @reg_tl_re_cnt: ring empty time samples count + * @reserved1: reserved + */ +struct adf_gen4_tl_ring_pair_data_regs { + __u64 reg_tl_gp_lat_acc; + __u64 reserved; + __u32 reg_tl_pci_trans_cnt; + __u32 reg_tl_ae_put_cnt; + __u32 reg_tl_bw_in; + __u32 reg_tl_bw_out; + __u32 reg_tl_at_glob_devtlb_hit; + __u32 reg_tl_at_glob_devtlb_miss; + __u32 reg_tl_at_payld_devtlb_hit; + __u32 reg_tl_at_payld_devtlb_miss; + __u32 reg_tl_re_cnt; + __u32 reserved1; +}; + +#define ADF_GEN4_TL_RP_REG_SZ sizeof(struct adf_gen4_tl_ring_pair_data_regs) + +/** + * struct adf_gen4_tl_layout - This structure represents entire telemetry + * counters data: Device + 4 Ring Pairs as are being populated periodically + * by device. + * @tl_device_data_regs: structure of device telemetry registers + * @tl_ring_pairs_data_regs: array of ring pairs telemetry registers + * @reg_tl_msg_cnt: telemetry messages counter + * @reserved: reserved + */ +struct adf_gen4_tl_layout { + struct adf_gen4_tl_device_data_regs tl_device_data_regs; + struct adf_gen4_tl_ring_pair_data_regs + tl_ring_pairs_data_regs[ADF_GEN4_TL_MAX_RP_NUM]; + __u32 reg_tl_msg_cnt; + __u32 reserved; +}; + +#define ADF_GEN4_TL_LAYOUT_SZ sizeof(struct adf_gen4_tl_layout) +#define ADF_GEN4_TL_MSG_CNT_OFF offsetof(struct adf_gen4_tl_layout, reg_tl_msg_cnt) + +#ifdef CONFIG_DEBUG_FS +void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data); +#else +static inline void adf_gen4_init_tl_data(struct adf_tl_hw_data *tl_data) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_GEN4_TL_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c index beef9a5f6c75c0868d9b4be0f69c572a199e068a..b19aa1ef8eeed9f55a89426cddebe165c4717cb5 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.c @@ -12,6 +12,7 @@ #include #include #include "adf_accel_devices.h" +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_cfg_strings.h" #include "adf_clock.h" @@ -22,12 +23,6 @@ #define ADF_HB_EMPTY_SIG 0xA5A5A5A5 -/* Heartbeat counter pair */ -struct hb_cnt_pair { - __u16 resp_heartbeat_cnt; - __u16 req_heartbeat_cnt; -}; - static int adf_hb_check_polling_freq(struct adf_accel_dev *accel_dev) { u64 curr_time = adf_clock_get_current_time(); @@ -210,6 +205,19 @@ static int adf_hb_get_status(struct adf_accel_dev *accel_dev) return ret; } +static void adf_heartbeat_reset(struct adf_accel_dev *accel_dev) +{ + u64 curr_time = adf_clock_get_current_time(); + u64 time_since_reset = curr_time - accel_dev->heartbeat->last_hb_reset_time; + + if (time_since_reset < ADF_CFG_HB_RESET_MS) + return; + + accel_dev->heartbeat->last_hb_reset_time = curr_time; + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), "Failed to notify fatal error\n"); +} + void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status) { @@ -234,6 +242,7 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, "Heartbeat ERROR: QAT is not responding.\n"); *hb_status = HB_DEV_UNRESPONSIVE; hb->hb_failed_counter++; + adf_heartbeat_reset(accel_dev); return; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h index b22e3cb29798ec57200d82ea110a087105c53273..16fdfb48b196acd33f020f9cd580a5d026b5a2f7 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat.h @@ -13,17 +13,26 @@ struct dentry; #define ADF_CFG_HB_TIMER_DEFAULT_MS 500 #define ADF_CFG_HB_COUNT_THRESHOLD 3 +#define ADF_CFG_HB_RESET_MS 5000 + enum adf_device_heartbeat_status { HB_DEV_UNRESPONSIVE = 0, HB_DEV_ALIVE, HB_DEV_UNSUPPORTED, }; +/* Heartbeat counter pair */ +struct hb_cnt_pair { + __u16 resp_heartbeat_cnt; + __u16 req_heartbeat_cnt; +}; + struct adf_heartbeat { unsigned int hb_sent_counter; unsigned int hb_failed_counter; unsigned int hb_timer; u64 last_hb_check_time; + u64 last_hb_reset_time; bool ctrs_cnt_checked; struct hb_dma_addr { dma_addr_t phy_addr; @@ -35,6 +44,9 @@ struct adf_heartbeat { struct dentry *cfg; struct dentry *sent; struct dentry *failed; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + struct dentry *inject_error; +#endif } dbgfs; }; @@ -51,6 +63,15 @@ void adf_heartbeat_status(struct adf_accel_dev *accel_dev, enum adf_device_heartbeat_status *hb_status); void adf_heartbeat_check_ctrs(struct adf_accel_dev *accel_dev); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev); +#else +static inline int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + return -EPERM; +} +#endif + #else static inline int adf_heartbeat_init(struct adf_accel_dev *accel_dev) { diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c index 803cbfd838f0a1333e639d642653b034f81b0034..cccdff24b48d61baf2c70165c0f0d6be254b18ad 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_dbgfs.c @@ -8,6 +8,7 @@ #include #include #include +#include "adf_admin.h" #include "adf_cfg.h" #include "adf_common_drv.h" #include "adf_heartbeat.h" @@ -154,6 +155,44 @@ static const struct file_operations adf_hb_cfg_fops = { .write = adf_hb_cfg_write, }; +static ssize_t adf_hb_error_inject_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct adf_accel_dev *accel_dev = file->private_data; + char buf[3]; + int ret; + + /* last byte left as string termination */ + if (*ppos != 0 || count != 2) + return -EINVAL; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + buf[count] = '\0'; + + if (buf[0] != '1') + return -EINVAL; + + ret = adf_heartbeat_inject_error(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "Heartbeat error injection failed with status %d\n", + ret); + return ret; + } + + dev_info(&GET_DEV(accel_dev), "Heartbeat error injection enabled\n"); + + return count; +} + +static const struct file_operations adf_hb_error_inject_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .write = adf_hb_error_inject_write, +}; + void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) { struct adf_heartbeat *hb = accel_dev->heartbeat; @@ -170,6 +209,17 @@ void adf_heartbeat_dbgfs_add(struct adf_accel_dev *accel_dev) &hb->hb_failed_counter, &adf_hb_stats_fops); hb->dbgfs.cfg = debugfs_create_file("config", 0600, hb->dbgfs.base_dir, accel_dev, &adf_hb_cfg_fops); + + if (IS_ENABLED(CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION)) { + struct dentry *inject_error __maybe_unused; + + inject_error = debugfs_create_file("inject_error", 0200, + hb->dbgfs.base_dir, accel_dev, + &adf_hb_error_inject_fops); +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + hb->dbgfs.inject_error = inject_error; +#endif + } } EXPORT_SYMBOL_GPL(adf_heartbeat_dbgfs_add); @@ -188,6 +238,10 @@ void adf_heartbeat_dbgfs_rm(struct adf_accel_dev *accel_dev) hb->dbgfs.failed = NULL; debugfs_remove(hb->dbgfs.cfg); hb->dbgfs.cfg = NULL; +#ifdef CONFIG_CRYPTO_DEV_QAT_ERROR_INJECTION + debugfs_remove(hb->dbgfs.inject_error); + hb->dbgfs.inject_error = NULL; +#endif debugfs_remove(hb->dbgfs.base_dir); hb->dbgfs.base_dir = NULL; } diff --git a/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c new file mode 100644 index 0000000000000000000000000000000000000000..a3b474bdef6c832f8ae10f03e64d7c0f9ffdcd02 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_heartbeat_inject.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include + +#include "adf_admin.h" +#include "adf_common_drv.h" +#include "adf_heartbeat.h" + +#define MAX_HB_TICKS 0xFFFFFFFF + +static int adf_hb_set_timer_to_max(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + + accel_dev->heartbeat->hb_timer = 0; + + if (hw_data->stop_timer) + hw_data->stop_timer(accel_dev); + + return adf_send_admin_hb_timer(accel_dev, MAX_HB_TICKS); +} + +static void adf_set_hb_counters_fail(struct adf_accel_dev *accel_dev, u32 ae, + u32 thr) +{ + struct hb_cnt_pair *stats = accel_dev->heartbeat->dma.virt_addr; + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + size_t thr_id = ae * hb_ctrs + thr; + u16 num_rsp = stats[thr_id].resp_heartbeat_cnt; + + /* + * Inject live.req != live.rsp and live.rsp == last.rsp + * to trigger the heartbeat error detection + */ + stats[thr_id].req_heartbeat_cnt++; + stats += (max_aes * hb_ctrs); + stats[thr_id].resp_heartbeat_cnt = num_rsp; +} + +int adf_heartbeat_inject_error(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_device = accel_dev->hw_device; + const size_t max_aes = hw_device->get_num_aes(hw_device); + const size_t hb_ctrs = hw_device->num_hb_ctrs; + u32 rand, rand_ae, rand_thr; + unsigned long ae_mask; + int ret; + + ae_mask = hw_device->ae_mask; + + do { + /* Ensure we have a valid ae */ + get_random_bytes(&rand, sizeof(rand)); + rand_ae = rand % max_aes; + } while (!test_bit(rand_ae, &ae_mask)); + + get_random_bytes(&rand, sizeof(rand)); + rand_thr = rand % hb_ctrs; + + /* Increase the heartbeat timer to prevent FW updating HB counters */ + ret = adf_hb_set_timer_to_max(accel_dev); + if (ret) + return ret; + + /* Configure worker threads to stop processing any packet */ + ret = adf_disable_arb_thd(accel_dev, rand_ae, rand_thr); + if (ret) + return ret; + + /* Change HB counters memory to simulate a hang */ + adf_set_hb_counters_fail(accel_dev, rand_ae, rand_thr); + + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c index da6956699246749a13b5f57079ffb74c44ffae38..65bd26b25abce9c8eccef5f87d27cbaf22614179 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c +++ b/drivers/crypto/intel/qat/qat_common/adf_hw_arbiter.c @@ -103,3 +103,28 @@ void adf_exit_arb(struct adf_accel_dev *accel_dev) csr_ops->write_csr_ring_srv_arb_en(csr, i, 0); } EXPORT_SYMBOL_GPL(adf_exit_arb); + +int adf_disable_arb_thd(struct adf_accel_dev *accel_dev, u32 ae, u32 thr) +{ + void __iomem *csr = accel_dev->transport->banks[0].csr_addr; + struct adf_hw_device_data *hw_data = accel_dev->hw_device; + const u32 *thd_2_arb_cfg; + struct arb_info info; + u32 ae_thr_map; + + if (ADF_AE_STRAND0_THREAD == thr || ADF_AE_STRAND1_THREAD == thr) + thr = ADF_AE_ADMIN_THREAD; + + hw_data->get_arb_info(&info); + thd_2_arb_cfg = hw_data->get_arb_mapping(accel_dev); + if (!thd_2_arb_cfg) + return -EFAULT; + + /* Disable scheduling for this particular AE and thread */ + ae_thr_map = *(thd_2_arb_cfg + ae); + ae_thr_map &= ~(GENMASK(3, 0) << (thr * BIT(2))); + + WRITE_CSR_ARB_WT2SAM(csr, info.arb_offset, info.wt2sam_offset, ae, + ae_thr_map); + return 0; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_init.c b/drivers/crypto/intel/qat/qat_common/adf_init.c index 0f9e2d59ce385730c2e342fbf05a0275177058e1..74f0818c07034873871056269fc0db040cc284be 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_init.c +++ b/drivers/crypto/intel/qat/qat_common/adf_init.c @@ -9,6 +9,9 @@ #include "adf_common_drv.h" #include "adf_dbgfs.h" #include "adf_heartbeat.h" +#include "adf_rl.h" +#include "adf_sysfs_ras_counters.h" +#include "adf_telemetry.h" static LIST_HEAD(service_table); static DEFINE_MUTEX(service_lock); @@ -61,7 +64,6 @@ int adf_service_unregister(struct service_hndl *service) static int adf_dev_init(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; struct adf_hw_device_data *hw_data = accel_dev->hw_device; int ret; @@ -120,6 +122,9 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } set_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status); + if (hw_data->ras_ops.enable_ras_errors) + hw_data->ras_ops.enable_ras_errors(accel_dev); + hw_data->enable_ints(accel_dev); hw_data->enable_error_correction(accel_dev); @@ -134,14 +139,20 @@ static int adf_dev_init(struct adf_accel_dev *accel_dev) } adf_heartbeat_init(accel_dev); + ret = adf_rl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + + ret = adf_tl_init(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; /* * Subservice initialisation is divided into two stages: init and start. * This is to facilitate any ordering dependencies between services * prior to starting any of the accelerators. */ - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_INIT)) { dev_err(&GET_DEV(accel_dev), "Failed to initialise service %s\n", @@ -168,7 +179,6 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; int ret; set_bit(ADF_STATUS_STARTING, &accel_dev->status); @@ -211,9 +221,15 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) } adf_heartbeat_start(accel_dev); + ret = adf_rl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + ret = adf_tl_start(accel_dev); + if (ret && ret != -EOPNOTSUPP) + return ret; + + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_START)) { dev_err(&GET_DEV(accel_dev), "Failed to start service %s\n", @@ -246,6 +262,7 @@ static int adf_dev_start(struct adf_accel_dev *accel_dev) set_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); adf_dbgfs_add(accel_dev); + adf_sysfs_start_ras(accel_dev); return 0; } @@ -264,7 +281,6 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; bool wait = false; int ret; @@ -272,7 +288,10 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) !test_bit(ADF_STATUS_STARTING, &accel_dev->status)) return; + adf_tl_stop(accel_dev); + adf_rl_stop(accel_dev); adf_dbgfs_rm(accel_dev); + adf_sysfs_stop_ras(accel_dev); clear_bit(ADF_STATUS_STARTING, &accel_dev->status); clear_bit(ADF_STATUS_STARTED, &accel_dev->status); @@ -289,8 +308,7 @@ static void adf_dev_stop(struct adf_accel_dev *accel_dev) qat_comp_algs_unregister(); clear_bit(ADF_STATUS_COMP_ALGS_REGISTERED, &accel_dev->status); - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->start_status)) continue; ret = service->event_hld(accel_dev, ADF_EVENT_STOP); @@ -327,7 +345,6 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) { struct adf_hw_device_data *hw_data = accel_dev->hw_device; struct service_hndl *service; - struct list_head *list_itr; if (!hw_data) { dev_err(&GET_DEV(accel_dev), @@ -349,8 +366,7 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) &accel_dev->status); } - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (!test_bit(accel_dev->accel_id, service->init_status)) continue; if (service->event_hld(accel_dev, ADF_EVENT_SHUTDOWN)) @@ -361,8 +377,15 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) clear_bit(accel_dev->accel_id, service->init_status); } + adf_rl_exit(accel_dev); + + if (hw_data->ras_ops.disable_ras_errors) + hw_data->ras_ops.disable_ras_errors(accel_dev); + adf_heartbeat_shutdown(accel_dev); + adf_tl_shutdown(accel_dev); + hw_data->disable_iov(accel_dev); if (test_bit(ADF_STATUS_IRQ_ALLOCATED, &accel_dev->status)) { @@ -387,10 +410,8 @@ static void adf_dev_shutdown(struct adf_accel_dev *accel_dev) int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTING)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -402,10 +423,8 @@ int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev) int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) { struct service_hndl *service; - struct list_head *list_itr; - list_for_each(list_itr, &service_table) { - service = list_entry(list_itr, struct service_hndl, list); + list_for_each_entry(service, &service_table, list) { if (service->event_hld(accel_dev, ADF_EVENT_RESTARTED)) dev_err(&GET_DEV(accel_dev), "Failed to restart service %s.\n", @@ -414,6 +433,18 @@ int adf_dev_restarted_notify(struct adf_accel_dev *accel_dev) return 0; } +void adf_error_notifier(struct adf_accel_dev *accel_dev) +{ + struct service_hndl *service; + + list_for_each_entry(service, &service_table, list) { + if (service->event_hld(accel_dev, ADF_EVENT_FATAL_ERROR)) + dev_err(&GET_DEV(accel_dev), + "Failed to send error event to %s.\n", + service->name); + } +} + static int adf_dev_shutdown_cache_cfg(struct adf_accel_dev *accel_dev) { char services[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; diff --git a/drivers/crypto/intel/qat/qat_common/adf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_isr.c index 2aba194a7c292244b1e34503748851f53c3e16da..cae1aee5479aff04e7c123674b9f5e0cb33f67d9 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_isr.c @@ -132,6 +132,26 @@ static bool adf_handle_pm_int(struct adf_accel_dev *accel_dev) return false; } +static bool adf_handle_ras_int(struct adf_accel_dev *accel_dev) +{ + struct adf_ras_ops *ras_ops = &accel_dev->hw_device->ras_ops; + bool reset_required; + + if (ras_ops->handle_interrupt && + ras_ops->handle_interrupt(accel_dev, &reset_required)) { + if (reset_required) { + dev_err(&GET_DEV(accel_dev), "Fatal error, reset required\n"); + if (adf_notify_fatal_error(accel_dev)) + dev_err(&GET_DEV(accel_dev), + "Failed to notify fatal error\n"); + } + + return true; + } + + return false; +} + static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) { struct adf_accel_dev *accel_dev = dev_ptr; @@ -145,6 +165,9 @@ static irqreturn_t adf_msix_isr_ae(int irq, void *dev_ptr) if (adf_handle_pm_int(accel_dev)) return IRQ_HANDLED; + if (adf_handle_ras_int(accel_dev)) + return IRQ_HANDLED; + dev_dbg(&GET_DEV(accel_dev), "qat_dev%d spurious AE interrupt\n", accel_dev->accel_id); @@ -254,7 +277,7 @@ static int adf_isr_alloc_msix_vectors_data(struct adf_accel_dev *accel_dev) if (!accel_dev->pf.vf_info) msix_num_entries += hw_data->num_banks; - irqs = kzalloc_node(msix_num_entries * sizeof(*irqs), + irqs = kcalloc_node(msix_num_entries, sizeof(*irqs), GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev))); if (!irqs) return -ENOMEM; @@ -357,8 +380,6 @@ EXPORT_SYMBOL_GPL(adf_isr_resource_alloc); /** * adf_init_misc_wq() - Init misc workqueue * - * Function init workqueue 'qat_misc_wq' for general purpose. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_misc_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h index 204a42438992645960e99b234b77983cc3e54864..d1b3ef9cadacc02574ccf9c56515cc1c2cabee36 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_msg.h @@ -99,6 +99,8 @@ enum pf2vf_msgtype { ADF_PF2VF_MSGTYPE_RESTARTING = 0x01, ADF_PF2VF_MSGTYPE_VERSION_RESP = 0x02, ADF_PF2VF_MSGTYPE_BLKMSG_RESP = 0x03, + ADF_PF2VF_MSGTYPE_FATAL_ERROR = 0x04, + ADF_PF2VF_MSGTYPE_RESTARTED = 0x05, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_PF2VF_MSGTYPE_RP_RESET_RESP = 0x10, }; @@ -112,6 +114,7 @@ enum vf2pf_msgtype { ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ = 0x07, ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ = 0x08, ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ = 0x09, + ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE = 0x0a, /* Values from 0x10 are Gen4 specific, message type is only 4 bits in Gen2 devices. */ ADF_VF2PF_MSGTYPE_RP_RESET = 0x10, }; @@ -124,8 +127,10 @@ enum pfvf_compatibility_version { ADF_PFVF_COMPAT_FAST_ACK = 0x03, /* Ring to service mapping support for non-standard mappings */ ADF_PFVF_COMPAT_RING_TO_SVC_MAP = 0x04, + /* Fallback compat */ + ADF_PFVF_COMPAT_FALLBACK = 0x05, /* Reference to the latest version */ - ADF_PFVF_COMPAT_THIS_VERSION = 0x04, + ADF_PFVF_COMPAT_THIS_VERSION = 0x05, }; /* PF->VF Version Response */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c index 14c069f0d71a5b81ce246366f83e492924d663cd..0e31f4b41844e0a8d53de4000c4d574afab989f4 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.c @@ -1,21 +1,83 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2015 - 2021 Intel Corporation */ +#include #include #include "adf_accel_devices.h" #include "adf_pfvf_msg.h" #include "adf_pfvf_pf_msg.h" #include "adf_pfvf_pf_proto.h" +#define ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY 100 +#define ADF_VF_SHUTDOWN_RETRY 100 + void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) { struct adf_accel_vf_info *vf; struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTING }; int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarting\n"); for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { - if (vf->init && adf_send_pf2vf_msg(accel_dev, i, msg)) + vf->restarting = false; + if (!vf->init) + continue; + if (adf_send_pf2vf_msg(accel_dev, i, msg)) dev_err(&GET_DEV(accel_dev), "Failed to send restarting msg to VF%d\n", i); + else if (vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK) + vf->restarting = true; + } +} + +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ + int num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + int i, retries = ADF_VF_SHUTDOWN_RETRY; + struct adf_accel_vf_info *vf; + bool vf_running; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf wait for restarting complete\n"); + do { + vf_running = false; + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) + if (vf->restarting) + vf_running = true; + if (!vf_running) + break; + msleep(ADF_PF_WAIT_RESTARTING_COMPLETE_DELAY); + } while (--retries); + + if (vf_running) + dev_warn(&GET_DEV(accel_dev), "Some VFs are still running\n"); +} + +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_RESTARTED }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify restarted\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send restarted msg to VF%d\n", i); + } +} + +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ + struct pfvf_message msg = { .type = ADF_PF2VF_MSGTYPE_FATAL_ERROR }; + int i, num_vfs = pci_num_vf(accel_to_pci_dev(accel_dev)); + struct adf_accel_vf_info *vf; + + dev_dbg(&GET_DEV(accel_dev), "pf2vf notify fatal error\n"); + for (i = 0, vf = accel_dev->pf.vf_info; i < num_vfs; i++, vf++) { + if (vf->init && vf->vf_compat_ver >= ADF_PFVF_COMPAT_FALLBACK && + adf_send_pf2vf_msg(accel_dev, i, msg)) + dev_err(&GET_DEV(accel_dev), + "Failed to send fatal error msg to VF%d\n", i); } } diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h index e8982d1ac8962b3c4ebf3ab0d34f7d3eb65d8df3..f203d88c919c2f06bbbe1d82cf9f5966f0d5706f 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_msg.h @@ -5,7 +5,28 @@ #include "adf_accel_devices.h" +#if defined(CONFIG_PCI_IOV) void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev); +void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev); +void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev); +#else +static inline void adf_pf2vf_notify_restarting(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_wait_for_restarting_complete(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_restarted(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_pf2vf_notify_fatal_error(struct adf_accel_dev *accel_dev) +{ +} +#endif typedef int (*adf_pf2vf_blkmsg_provider)(struct adf_accel_dev *accel_dev, u8 *buffer, u8 compat); diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c index 388e58bcbcaf2683228ae30e8aef99f35f3a513b..9ab93fbfefde9408826c44d59eae5c01411179f6 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_pf_proto.c @@ -291,6 +291,14 @@ static int adf_handle_vf2pf_msg(struct adf_accel_dev *accel_dev, u8 vf_nr, vf_info->init = false; } break; + case ADF_VF2PF_MSGTYPE_RESTARTING_COMPLETE: + { + dev_dbg(&GET_DEV(accel_dev), + "Restarting Complete received from VF%d\n", vf_nr); + vf_info->restarting = false; + vf_info->init = false; + } + break; case ADF_VF2PF_MSGTYPE_LARGE_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_MEDIUM_BLOCK_REQ: case ADF_VF2PF_MSGTYPE_SMALL_BLOCK_REQ: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c index 1015155b637493fb81c9720b72b1324873020457..dc284a089c88954c100bf1a64348c456d6162353 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c +++ b/drivers/crypto/intel/qat/qat_common/adf_pfvf_vf_proto.c @@ -308,6 +308,12 @@ static bool adf_handle_pf2vf_msg(struct adf_accel_dev *accel_dev, adf_pf2vf_handle_pf_restarting(accel_dev); return false; + case ADF_PF2VF_MSGTYPE_RESTARTED: + dev_dbg(&GET_DEV(accel_dev), "Restarted message received from PF\n"); + return true; + case ADF_PF2VF_MSGTYPE_FATAL_ERROR: + dev_err(&GET_DEV(accel_dev), "Fatal error received from PF\n"); + return true; case ADF_PF2VF_MSGTYPE_VERSION_RESP: case ADF_PF2VF_MSGTYPE_BLKMSG_RESP: case ADF_PF2VF_MSGTYPE_RP_RESET_RESP: diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c new file mode 100644 index 0000000000000000000000000000000000000000..f0a13c19019673b48ef7a1dd6e9d159eb785b933 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_pm_dbgfs.h" + +static ssize_t pm_status_read(struct file *f, char __user *buf, size_t count, + loff_t *pos) +{ + struct adf_accel_dev *accel_dev = file_inode(f)->i_private; + struct adf_pm pm = accel_dev->power_management; + + if (pm.print_pm_status) + return pm.print_pm_status(accel_dev, buf, count, pos); + + return count; +} + +static const struct file_operations pm_status_fops = { + .owner = THIS_MODULE, + .read = pm_status_read, +}; + +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present || !pm->print_pm_status) + return; + + pm->debugfs_pm_status = debugfs_create_file("pm_status", 0400, + accel_dev->debugfs_dir, + accel_dev, &pm_status_fops); +} + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_pm *pm = &accel_dev->power_management; + + if (!pm->present) + return; + + debugfs_remove(pm->debugfs_pm_status); + pm->debugfs_pm_status = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h new file mode 100644 index 0000000000000000000000000000000000000000..83632e5aa097c06c1c1c2a11b0b615a878d649ab --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_pm_dbgfs.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_PM_DBGFS_H_ +#define ADF_PM_DBGFS_H_ + +struct adf_accel_dev; + +void adf_pm_dbgfs_rm(struct adf_accel_dev *accel_dev); +void adf_pm_dbgfs_add(struct adf_accel_dev *accel_dev); + +#endif /* ADF_PM_DBGFS_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.c b/drivers/crypto/intel/qat/qat_common/adf_rl.c new file mode 100644 index 0000000000000000000000000000000000000000..d4f2db3c53d8c0c09f636b66281ab4d28f87549f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.c @@ -0,0 +1,1186 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include + +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_rl_admin.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define RL_TOKEN_GRANULARITY_PCIEIN_BUCKET 0U +#define RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET 0U +#define RL_TOKEN_PCIE_SIZE 64 +#define RL_TOKEN_ASYM_SIZE 1024 +#define RL_CSR_SIZE 4U +#define RL_CAPABILITY_MASK GENMASK(6, 4) +#define RL_CAPABILITY_VALUE 0x70 +#define RL_VALIDATE_NON_ZERO(input) ((input) == 0) +#define ROOT_MASK GENMASK(1, 0) +#define CLUSTER_MASK GENMASK(3, 0) +#define LEAF_MASK GENMASK(5, 0) + +static int validate_user_input(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + const unsigned long rp_mask = sla_in->rp_mask; + size_t rp_mask_size; + int i, cnt; + + if (sla_in->pir < sla_in->cir) { + dev_notice(&GET_DEV(accel_dev), + "PIR must be >= CIR, setting PIR to CIR\n"); + sla_in->pir = sla_in->cir; + } + + if (!is_update) { + cnt = 0; + rp_mask_size = sizeof(sla_in->rp_mask) * BITS_PER_BYTE; + for_each_set_bit(i, &rp_mask, rp_mask_size) { + if (++cnt > RL_RP_CNT_PER_LEAF_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Too many ring pairs selected for this SLA\n"); + return -EINVAL; + } + } + + if (sla_in->srv >= ADF_SVC_NONE) { + dev_notice(&GET_DEV(accel_dev), + "Wrong service type\n"); + return -EINVAL; + } + + if (sla_in->type > RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), + "Wrong node type\n"); + return -EINVAL; + } + + if (sla_in->parent_id < RL_PARENT_DEFAULT_ID || + sla_in->parent_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), + "Wrong parent ID\n"); + return -EINVAL; + } + } + + return 0; +} + +static int validate_sla_id(struct adf_accel_dev *accel_dev, int sla_id) +{ + struct rl_sla *sla; + + if (sla_id <= RL_SLA_EMPTY_ID || sla_id >= RL_NODES_CNT_MAX) { + dev_notice(&GET_DEV(accel_dev), "Provided ID is out of bounds\n"); + return -EINVAL; + } + + sla = accel_dev->rate_limiting->sla[sla_id]; + + if (!sla) { + dev_notice(&GET_DEV(accel_dev), "SLA with provided ID does not exist\n"); + return -EINVAL; + } + + if (sla->type != RL_LEAF) { + dev_notice(&GET_DEV(accel_dev), "This ID is reserved for internal use\n"); + return -EINVAL; + } + + return 0; +} + +/** + * find_parent() - Find the parent for a new SLA + * @rl_data: pointer to ratelimiting data + * @sla_in: pointer to user input data for a new SLA + * + * Function returns a pointer to the parent SLA. If the parent ID is provided + * as input in the user data, then such ID is validated and the parent SLA + * is returned. + * Otherwise, it returns the default parent SLA (root or cluster) for + * the new object. + * + * Return: + * * Pointer to the parent SLA object + * * NULL - when parent cannot be found + */ +static struct rl_sla *find_parent(struct adf_rl *rl_data, + struct adf_rl_sla_input_data *sla_in) +{ + int input_parent_id = sla_in->parent_id; + struct rl_sla *root = NULL; + struct rl_sla *parent_sla; + int i; + + if (sla_in->type == RL_ROOT) + return NULL; + + if (input_parent_id > RL_PARENT_DEFAULT_ID) { + parent_sla = rl_data->sla[input_parent_id]; + /* + * SLA can be a parent if it has the same service as the child + * and its type is higher in the hierarchy, + * for example the parent type of a LEAF must be a CLUSTER. + */ + if (parent_sla && parent_sla->srv == sla_in->srv && + parent_sla->type == sla_in->type - 1) + return parent_sla; + + return NULL; + } + + /* If input_parent_id is not valid, get root for this service type. */ + for (i = 0; i < RL_ROOT_MAX; i++) { + if (rl_data->root[i] && rl_data->root[i]->srv == sla_in->srv) { + root = rl_data->root[i]; + break; + } + } + + if (!root) + return NULL; + + /* + * If the type of this SLA is cluster, then return the root. + * Otherwise, find the default (i.e. first) cluster for this service. + */ + if (sla_in->type == RL_CLUSTER) + return root; + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (rl_data->cluster[i] && rl_data->cluster[i]->parent == root) + return rl_data->cluster[i]; + } + + return NULL; +} + +static enum adf_cfg_service_type srv_to_cfg_svc_type(enum adf_base_services rl_srv) +{ + switch (rl_srv) { + case ADF_SVC_ASYM: + return ASYM; + case ADF_SVC_SYM: + return SYM; + case ADF_SVC_DC: + return COMP; + default: + return UNUSED; + } +} + +/** + * get_sla_arr_of_type() - Returns a pointer to SLA type specific array + * @rl_data: pointer to ratelimiting data + * @type: SLA type + * @sla_arr: pointer to variable where requested pointer will be stored + * + * Return: Max number of elements allowed for the returned array + */ +static u32 get_sla_arr_of_type(struct adf_rl *rl_data, enum rl_node_type type, + struct rl_sla ***sla_arr) +{ + switch (type) { + case RL_LEAF: + *sla_arr = rl_data->leaf; + return RL_LEAF_MAX; + case RL_CLUSTER: + *sla_arr = rl_data->cluster; + return RL_CLUSTER_MAX; + case RL_ROOT: + *sla_arr = rl_data->root; + return RL_ROOT_MAX; + default: + *sla_arr = NULL; + return 0; + } +} + +static bool is_service_enabled(struct adf_accel_dev *accel_dev, + enum adf_base_services rl_srv) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(rl_srv); + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u8 rps_per_bundle = hw_data->num_banks_per_vf; + int i; + + for (i = 0; i < rps_per_bundle; i++) { + if (GET_SRV_TYPE(accel_dev, i) == arb_srv) + return true; + } + + return false; +} + +/** + * prepare_rp_ids() - Creates an array of ring pair IDs from bitmask + * @accel_dev: pointer to acceleration device structure + * @sla: SLA object data where result will be written + * @rp_mask: bitmask of ring pair IDs + * + * Function tries to convert provided bitmap to an array of IDs. It checks if + * RPs aren't in use, are assigned to SLA service or if a number of provided + * IDs is not too big. If successful, writes the result into the field + * sla->ring_pairs_cnt. + * + * Return: + * * 0 - ok + * * -EINVAL - ring pairs array cannot be created from provided mask + */ +static int prepare_rp_ids(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + const unsigned long rp_mask) +{ + enum adf_cfg_service_type arb_srv = srv_to_cfg_svc_type(sla->srv); + u16 rps_per_bundle = GET_HW_DATA(accel_dev)->num_banks_per_vf; + bool *rp_in_use = accel_dev->rate_limiting->rp_in_use; + size_t rp_cnt_max = ARRAY_SIZE(sla->ring_pairs_ids); + u16 rp_id_max = GET_HW_DATA(accel_dev)->num_banks; + u16 cnt = 0; + u16 rp_id; + + for_each_set_bit(rp_id, &rp_mask, rp_id_max) { + if (cnt >= rp_cnt_max) { + dev_notice(&GET_DEV(accel_dev), + "Assigned more ring pairs than supported"); + return -EINVAL; + } + + if (rp_in_use[rp_id]) { + dev_notice(&GET_DEV(accel_dev), + "RP %u already assigned to other SLA", rp_id); + return -EINVAL; + } + + if (GET_SRV_TYPE(accel_dev, rp_id % rps_per_bundle) != arb_srv) { + dev_notice(&GET_DEV(accel_dev), + "RP %u does not support SLA service", rp_id); + return -EINVAL; + } + + sla->ring_pairs_ids[cnt++] = rp_id; + } + + sla->ring_pairs_cnt = cnt; + + return 0; +} + +static void mark_rps_usage(struct rl_sla *sla, bool *rp_in_use, bool used) +{ + u16 rp_id; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + rp_id = sla->ring_pairs_ids[i]; + rp_in_use[rp_id] = used; + } +} + +static void assign_rps_to_leaf(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.r2l_offset; + u32 node_id = clear ? 0U : (sla->node_id & LEAF_MASK); + u32 offset; + int i; + + for (i = 0; i < sla->ring_pairs_cnt; i++) { + offset = base_offset + (RL_CSR_SIZE * sla->ring_pairs_ids[i]); + ADF_CSR_WR(pmisc_addr, offset, node_id); + } +} + +static void assign_leaf_to_cluster(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.l2c_offset; + u32 node_id = sla->node_id & LEAF_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & CLUSTER_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_cluster_to_root(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u32 base_offset = hw_data->rl_data.c2s_offset; + u32 node_id = sla->node_id & CLUSTER_MASK; + u32 parent_id = clear ? 0U : (sla->parent->node_id & ROOT_MASK); + u32 offset; + + offset = base_offset + (RL_CSR_SIZE * node_id); + ADF_CSR_WR(pmisc_addr, offset, parent_id); +} + +static void assign_node_to_parent(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool clear_assignment) +{ + switch (sla->type) { + case RL_LEAF: + assign_rps_to_leaf(accel_dev, sla, clear_assignment); + assign_leaf_to_cluster(accel_dev, sla, clear_assignment); + break; + case RL_CLUSTER: + assign_cluster_to_root(accel_dev, sla, clear_assignment); + break; + default: + break; + } +} + +/** + * can_parent_afford_sla() - Verifies if parent allows to create an SLA + * @sla_in: pointer to user input data for a new SLA + * @sla_parent: pointer to parent SLA object + * @sla_cir: current child CIR value (only for update) + * @is_update: request is a update + * + * Algorithm verifies if parent has enough remaining budget to take assignment + * of a child with provided parameters. In update case current CIR value must be + * returned to budget first. + * PIR value cannot exceed the PIR assigned to parent. + * + * Return: + * * true - SLA can be created + * * false - SLA cannot be created + */ +static bool can_parent_afford_sla(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla_parent, u32 sla_cir, + bool is_update) +{ + u32 rem_cir = sla_parent->rem_cir; + + if (is_update) + rem_cir += sla_cir; + + if (sla_in->cir > rem_cir || sla_in->pir > sla_parent->pir) + return false; + + return true; +} + +/** + * can_node_afford_update() - Verifies if SLA can be updated with input data + * @sla_in: pointer to user input data for a new SLA + * @sla: pointer to SLA object selected for update + * + * Algorithm verifies if a new CIR value is big enough to satisfy currently + * assigned child SLAs and if PIR can be updated + * + * Return: + * * true - SLA can be updated + * * false - SLA cannot be updated + */ +static bool can_node_afford_update(struct adf_rl_sla_input_data *sla_in, + struct rl_sla *sla) +{ + u32 cir_in_use = sla->cir - sla->rem_cir; + + /* new CIR cannot be smaller then currently consumed value */ + if (cir_in_use > sla_in->cir) + return false; + + /* PIR of root/cluster cannot be reduced in node with assigned children */ + if (sla_in->pir < sla->pir && sla->type != RL_LEAF && cir_in_use > 0) + return false; + + return true; +} + +static bool is_enough_budget(struct adf_rl *rl_data, struct rl_sla *sla, + struct adf_rl_sla_input_data *sla_in, + bool is_update) +{ + u32 max_val = rl_data->device_data->scale_ref; + struct rl_sla *parent = sla->parent; + bool ret = true; + + if (sla_in->cir > max_val || sla_in->pir > max_val) + ret = false; + + switch (sla->type) { + case RL_LEAF: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + break; + case RL_CLUSTER: + ret &= can_parent_afford_sla(sla_in, parent, sla->cir, + is_update); + + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + case RL_ROOT: + if (is_update) + ret &= can_node_afford_update(sla_in, sla); + + break; + default: + ret = false; + break; + } + + return ret; +} + +static void update_budget(struct rl_sla *sla, u32 old_cir, bool is_update) +{ + switch (sla->type) { + case RL_LEAF: + if (is_update) + sla->parent->rem_cir += old_cir; + + sla->parent->rem_cir -= sla->cir; + sla->rem_cir = 0; + break; + case RL_CLUSTER: + if (is_update) { + sla->parent->rem_cir += old_cir; + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + } else { + sla->rem_cir = sla->cir; + } + + sla->parent->rem_cir -= sla->cir; + break; + case RL_ROOT: + if (is_update) + sla->rem_cir = sla->cir - (old_cir - sla->rem_cir); + else + sla->rem_cir = sla->cir; + break; + default: + break; + } +} + +/** + * get_next_free_sla_id() - finds next free ID in the SLA array + * @rl_data: Pointer to ratelimiting data structure + * + * Return: + * * 0 : RL_NODES_CNT_MAX - correct ID + * * -ENOSPC - all SLA slots are in use + */ +static int get_next_free_sla_id(struct adf_rl *rl_data) +{ + int i = 0; + + while (i < RL_NODES_CNT_MAX && rl_data->sla[i++]) + ; + + if (i == RL_NODES_CNT_MAX) + return -ENOSPC; + + return i - 1; +} + +/** + * get_next_free_node_id() - finds next free ID in the array of that node type + * @rl_data: Pointer to ratelimiting data structure + * @sla: Pointer to SLA object for which the ID is searched + * + * Return: + * * 0 : RL_[NODE_TYPE]_MAX - correct ID + * * -ENOSPC - all slots of that type are in use + */ +static int get_next_free_node_id(struct adf_rl *rl_data, struct rl_sla *sla) +{ + struct adf_hw_device_data *hw_device = GET_HW_DATA(rl_data->accel_dev); + int max_id, i, step, rp_per_leaf; + struct rl_sla **sla_list; + + rp_per_leaf = hw_device->num_banks / hw_device->num_banks_per_vf; + + /* + * Static nodes mapping: + * root0 - cluster[0,4,8,12] - leaf[0-15] + * root1 - cluster[1,5,9,13] - leaf[16-31] + * root2 - cluster[2,6,10,14] - leaf[32-47] + */ + switch (sla->type) { + case RL_LEAF: + i = sla->srv * rp_per_leaf; + step = 1; + max_id = i + rp_per_leaf; + sla_list = rl_data->leaf; + break; + case RL_CLUSTER: + i = sla->srv; + step = 4; + max_id = RL_CLUSTER_MAX; + sla_list = rl_data->cluster; + break; + case RL_ROOT: + return sla->srv; + default: + return -EINVAL; + } + + while (i < max_id && sla_list[i]) + i += step; + + if (i >= max_id) + return -ENOSPC; + + return i; +} + +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 avail_slice_cycles, allocated_tokens; + + if (!sla_val) + return 0; + + avail_slice_cycles = hw_data->clock_frequency; + + switch (svc_type) { + case ADF_SVC_ASYM: + avail_slice_cycles *= device_data->slices.pke_cnt; + break; + case ADF_SVC_SYM: + avail_slice_cycles *= device_data->slices.cph_cnt; + break; + case ADF_SVC_DC: + avail_slice_cycles *= device_data->slices.dcpr_cnt; + break; + default: + break; + } + + do_div(avail_slice_cycles, device_data->scan_interval); + allocated_tokens = avail_slice_cycles * sla_val; + do_div(allocated_tokens, device_data->scale_ref); + + return allocated_tokens; +} + +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + u64 allocated_ae_cycles, avail_ae_cycles; + + if (!sla_val) + return 0; + + avail_ae_cycles = hw_data->clock_frequency; + avail_ae_cycles *= hw_data->get_num_aes(hw_data) - 1; + do_div(avail_ae_cycles, device_data->scan_interval); + + sla_val *= device_data->max_tp[svc_type]; + sla_val /= device_data->scale_ref; + + allocated_ae_cycles = (sla_val * avail_ae_cycles); + do_div(allocated_ae_cycles, device_data->max_tp[svc_type]); + + return allocated_ae_cycles; +} + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out) +{ + struct adf_rl_hw_data *device_data = &accel_dev->hw_device->rl_data; + u64 sla_to_bytes, allocated_bw, sla_scaled; + + if (!sla_val) + return 0; + + sla_to_bytes = sla_val; + sla_to_bytes *= device_data->max_tp[svc_type]; + do_div(sla_to_bytes, device_data->scale_ref); + + sla_to_bytes *= (svc_type == ADF_SVC_ASYM) ? RL_TOKEN_ASYM_SIZE : + BYTES_PER_MBIT; + if (svc_type == ADF_SVC_DC && is_bw_out) + sla_to_bytes *= device_data->slices.dcpr_cnt - + device_data->dcpr_correction; + + sla_scaled = sla_to_bytes * device_data->pcie_scale_mul; + do_div(sla_scaled, device_data->pcie_scale_div); + allocated_bw = sla_scaled; + do_div(allocated_bw, RL_TOKEN_PCIE_SIZE); + do_div(allocated_bw, device_data->scan_interval); + + return allocated_bw; +} + +/** + * add_new_sla_entry() - creates a new SLA object and fills it with user data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new SLA + * @sla_out: Pointer to variable that will contain the address of a new + * SLA object if the operation succeeds + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +static int add_new_sla_entry(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, + struct rl_sla **sla_out) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + sla = kzalloc(sizeof(*sla), GFP_KERNEL); + if (!sla) { + ret = -ENOMEM; + goto ret_err; + } + *sla_out = sla; + + if (!is_service_enabled(accel_dev, sla_in->srv)) { + dev_notice(&GET_DEV(accel_dev), + "Provided service is not enabled\n"); + ret = -EINVAL; + goto ret_err; + } + + sla->srv = sla_in->srv; + sla->type = sla_in->type; + ret = get_next_free_node_id(rl_data, sla); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Exceeded number of available nodes for that service\n"); + goto ret_err; + } + sla->node_id = ret; + + ret = get_next_free_sla_id(rl_data); + if (ret < 0) { + dev_notice(&GET_DEV(accel_dev), + "Allocated maximum SLAs number\n"); + goto ret_err; + } + sla->sla_id = ret; + + sla->parent = find_parent(rl_data, sla_in); + if (!sla->parent && sla->type != RL_ROOT) { + if (sla_in->parent_id != RL_PARENT_DEFAULT_ID) + dev_notice(&GET_DEV(accel_dev), + "Provided parent ID does not exist or cannot be parent for this SLA."); + else + dev_notice(&GET_DEV(accel_dev), + "Unable to find parent node for this service. Is service enabled?"); + ret = -EINVAL; + goto ret_err; + } + + if (sla->type == RL_LEAF) { + ret = prepare_rp_ids(accel_dev, sla, sla_in->rp_mask); + if (!sla->ring_pairs_cnt || ret) { + dev_notice(&GET_DEV(accel_dev), + "Unable to find ring pairs to assign to the leaf"); + if (!ret) + ret = -EINVAL; + + goto ret_err; + } + } + + return 0; + +ret_err: + kfree(sla); + *sla_out = NULL; + + return ret; +} + +static int initialize_default_nodes(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct adf_rl_hw_data *device_data = rl_data->device_data; + struct adf_rl_sla_input_data sla_in = { }; + int ret = 0; + int i; + + /* Init root for each enabled service */ + sla_in.type = RL_ROOT; + sla_in.parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!is_service_enabled(accel_dev, i)) + continue; + + sla_in.cir = device_data->scale_ref; + sla_in.pir = sla_in.cir; + sla_in.srv = i; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + /* Init default cluster for each root */ + sla_in.type = RL_CLUSTER; + for (i = 0; i < ADF_SVC_NONE; i++) { + if (!rl_data->root[i]) + continue; + + sla_in.cir = rl_data->root[i]->cir; + sla_in.pir = sla_in.cir; + sla_in.srv = rl_data->root[i]->srv; + + ret = adf_rl_add_sla(accel_dev, &sla_in); + if (ret) + return ret; + } + + return 0; +} + +static void clear_sla(struct adf_rl *rl_data, struct rl_sla *sla) +{ + bool *rp_in_use = rl_data->rp_in_use; + struct rl_sla **sla_type_arr = NULL; + int i, sla_id, node_id; + u32 old_cir; + + sla_id = sla->sla_id; + node_id = sla->node_id; + old_cir = sla->cir; + sla->cir = 0; + sla->pir = 0; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + rp_in_use[sla->ring_pairs_ids[i]] = false; + + update_budget(sla, old_cir, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + assign_node_to_parent(rl_data->accel_dev, sla, true); + adf_rl_send_admin_delete_msg(rl_data->accel_dev, node_id, sla->type); + mark_rps_usage(sla, rl_data->rp_in_use, false); + + kfree(sla); + rl_data->sla[sla_id] = NULL; + sla_type_arr[node_id] = NULL; +} + +static void free_all_sla(struct adf_accel_dev *accel_dev) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int sla_id; + + mutex_lock(&rl_data->rl_lock); + + for (sla_id = 0; sla_id < RL_NODES_CNT_MAX; sla_id++) { + if (!rl_data->sla[sla_id]) + continue; + + kfree(rl_data->sla[sla_id]); + rl_data->sla[sla_id] = NULL; + } + + mutex_unlock(&rl_data->rl_lock); +} + +/** + * add_update_sla() - handles the creation and the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data for a new/updated SLA + * @is_update: flag to indicate if this is an update or an add operation + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - user input data cannot be used to create SLA + * * -ENOSPC - all available SLAs are in use + */ +static int add_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in, bool is_update) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla **sla_type_arr = NULL; + struct rl_sla *sla = NULL; + u32 old_cir = 0; + int ret; + + if (!sla_in) { + dev_warn(&GET_DEV(accel_dev), + "SLA input data pointer is missing\n"); + return -EFAULT; + } + + mutex_lock(&rl_data->rl_lock); + + /* Input validation */ + ret = validate_user_input(accel_dev, sla_in, is_update); + if (ret) + goto ret_err; + + if (is_update) { + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + goto ret_err; + + sla = rl_data->sla[sla_in->sla_id]; + old_cir = sla->cir; + } else { + ret = add_new_sla_entry(accel_dev, sla_in, &sla); + if (ret) + goto ret_err; + } + + if (!is_enough_budget(rl_data, sla, sla_in, is_update)) { + dev_notice(&GET_DEV(accel_dev), + "Input value exceeds the remaining budget%s\n", + is_update ? " or more budget is already in use" : ""); + ret = -EINVAL; + goto ret_err; + } + sla->cir = sla_in->cir; + sla->pir = sla_in->pir; + + /* Apply SLA */ + assign_node_to_parent(accel_dev, sla, false); + ret = adf_rl_send_admin_add_update_msg(accel_dev, sla, is_update); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "Failed to apply an SLA\n"); + goto ret_err; + } + update_budget(sla, old_cir, is_update); + + if (!is_update) { + mark_rps_usage(sla, rl_data->rp_in_use, true); + get_sla_arr_of_type(rl_data, sla->type, &sla_type_arr); + sla_type_arr[sla->node_id] = sla; + rl_data->sla[sla->sla_id] = sla; + } + + sla_in->sla_id = sla->sla_id; + goto ret_ok; + +ret_err: + if (!is_update) { + sla_in->sla_id = -1; + kfree(sla); + } +ret_ok: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_add_sla() - handles the creation of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to add an SLA + * + * Return: + * * 0 - ok + * * -ENOMEM - memory allocation failed + * * -EINVAL - invalid user input + * * -ENOSPC - all available SLAs are in use + */ +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, false); +} + +/** + * adf_rl_update_sla() - handles the update of an SLA + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user input data required to update an SLA + * + * Return: + * * 0 - ok + * * -EINVAL - user input data cannot be used to update SLA + */ +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + return add_update_sla(accel_dev, sla_in, true); +} + +/** + * adf_rl_get_sla() - returns an existing SLA data + * @accel_dev: pointer to acceleration device structure + * @sla_in: pointer to user data where SLA info will be stored + * + * The sla_id for which data are requested should be set in sla_id structure + * + * Return: + * * 0 - ok + * * -EINVAL - provided sla_id does not exist + */ +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in) +{ + struct rl_sla *sla; + int ret, i; + + ret = validate_sla_id(accel_dev, sla_in->sla_id); + if (ret) + return ret; + + sla = accel_dev->rate_limiting->sla[sla_in->sla_id]; + sla_in->type = sla->type; + sla_in->srv = sla->srv; + sla_in->cir = sla->cir; + sla_in->pir = sla->pir; + sla_in->rp_mask = 0U; + if (sla->parent) + sla_in->parent_id = sla->parent->sla_id; + else + sla_in->parent_id = RL_PARENT_DEFAULT_ID; + + for (i = 0; i < sla->ring_pairs_cnt; i++) + sla_in->rp_mask |= BIT(sla->ring_pairs_ids[i]); + + return 0; +} + +/** + * adf_rl_get_capability_remaining() - returns the remaining SLA value (CIR) for + * selected service or provided sla_id + * @accel_dev: pointer to acceleration device structure + * @srv: service ID for which capability is requested + * @sla_id: ID of the cluster or root to which we want assign a new SLA + * + * Check if the provided SLA id is valid. If it is and the service matches + * the requested service and the type is cluster or root, return the remaining + * capability. + * If the provided ID does not match the service or type, return the remaining + * capacity of the default cluster for that service. + * + * Return: + * * Positive value - correct remaining value + * * -EINVAL - algorithm cannot find a remaining value for provided data + */ +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla = NULL; + int i; + + if (srv >= ADF_SVC_NONE) + return -EINVAL; + + if (sla_id > RL_SLA_EMPTY_ID && !validate_sla_id(accel_dev, sla_id)) { + sla = rl_data->sla[sla_id]; + + if (sla->srv == srv && sla->type <= RL_CLUSTER) + goto ret_ok; + } + + for (i = 0; i < RL_CLUSTER_MAX; i++) { + if (!rl_data->cluster[i]) + continue; + + if (rl_data->cluster[i]->srv == srv) { + sla = rl_data->cluster[i]; + goto ret_ok; + } + } + + return -EINVAL; +ret_ok: + return sla->rem_cir; +} + +/** + * adf_rl_remove_sla() - removes provided sla_id + * @accel_dev: pointer to acceleration device structure + * @sla_id: ID of the cluster or root to which we want assign an new SLA + * + * Return: + * * 0 - ok + * * -EINVAL - wrong sla_id or it still have assigned children + */ +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + struct rl_sla *sla; + int ret = 0; + + mutex_lock(&rl_data->rl_lock); + ret = validate_sla_id(accel_dev, sla_id); + if (ret) + goto err_ret; + + sla = rl_data->sla[sla_id]; + + if (sla->type < RL_LEAF && sla->rem_cir != sla->cir) { + dev_notice(&GET_DEV(accel_dev), + "To remove parent SLA all its children must be removed first"); + ret = -EINVAL; + goto err_ret; + } + + clear_sla(rl_data, sla); + +err_ret: + mutex_unlock(&rl_data->rl_lock); + return ret; +} + +/** + * adf_rl_remove_sla_all() - removes all SLAs from device + * @accel_dev: pointer to acceleration device structure + * @incl_default: set to true if default SLAs also should be removed + */ +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default) +{ + struct adf_rl *rl_data = accel_dev->rate_limiting; + int end_type = incl_default ? RL_ROOT : RL_LEAF; + struct rl_sla **sla_type_arr = NULL; + u32 max_id; + int i, j; + + mutex_lock(&rl_data->rl_lock); + + /* Unregister and remove all SLAs */ + for (j = RL_LEAF; j >= end_type; j--) { + max_id = get_sla_arr_of_type(rl_data, j, &sla_type_arr); + + for (i = 0; i < max_id; i++) { + if (!sla_type_arr[i]) + continue; + + clear_sla(rl_data, sla_type_arr[i]); + } + } + + mutex_unlock(&rl_data->rl_lock); +} + +int adf_rl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_rl_hw_data *rl_hw_data = &hw_data->rl_data; + struct adf_rl *rl; + int ret = 0; + + /* Validate device parameters */ + if (RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_ASYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_SYM]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->max_tp[ADF_SVC_DC]) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scan_interval) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_div) || + RL_VALIDATE_NON_ZERO(rl_hw_data->pcie_scale_mul) || + RL_VALIDATE_NON_ZERO(rl_hw_data->scale_ref)) { + ret = -EOPNOTSUPP; + goto err_ret; + } + + rl = kzalloc(sizeof(*rl), GFP_KERNEL); + if (!rl) { + ret = -ENOMEM; + goto err_ret; + } + + mutex_init(&rl->rl_lock); + rl->device_data = &accel_dev->hw_device->rl_data; + rl->accel_dev = accel_dev; + accel_dev->rate_limiting = rl; + +err_ret: + return ret; +} + +int adf_rl_start(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_hw_data *rl_hw_data = &GET_HW_DATA(accel_dev)->rl_data; + void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev); + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + int ret; + + if (!accel_dev->rate_limiting) { + ret = -EOPNOTSUPP; + goto ret_err; + } + + if ((fw_caps & RL_CAPABILITY_MASK) != RL_CAPABILITY_VALUE) { + dev_info(&GET_DEV(accel_dev), "not supported\n"); + ret = -EOPNOTSUPP; + goto ret_free; + } + + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciin_tb_offset, + RL_TOKEN_GRANULARITY_PCIEIN_BUCKET); + ADF_CSR_WR(pmisc_addr, rl_hw_data->pciout_tb_offset, + RL_TOKEN_GRANULARITY_PCIEOUT_BUCKET); + + ret = adf_rl_send_admin_init_msg(accel_dev, &rl_hw_data->slices); + if (ret) { + dev_err(&GET_DEV(accel_dev), "initialization failed\n"); + goto ret_free; + } + + ret = initialize_default_nodes(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), + "failed to initialize default SLAs\n"); + goto ret_sla_rm; + } + + ret = adf_sysfs_rl_add(accel_dev); + if (ret) { + dev_err(&GET_DEV(accel_dev), "failed to add sysfs interface\n"); + goto ret_sysfs_rm; + } + + return 0; + +ret_sysfs_rm: + adf_sysfs_rl_rm(accel_dev); +ret_sla_rm: + adf_rl_remove_sla_all(accel_dev, true); +ret_free: + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +ret_err: + return ret; +} + +void adf_rl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + adf_sysfs_rl_rm(accel_dev); + free_all_sla(accel_dev); +} + +void adf_rl_exit(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->rate_limiting) + return; + + kfree(accel_dev->rate_limiting); + accel_dev->rate_limiting = NULL; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl.h b/drivers/crypto/intel/qat/qat_common/adf_rl.h new file mode 100644 index 0000000000000000000000000000000000000000..269c6656fb90eac27152892306adf4c211a6fbc4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_H_ +#define ADF_RL_H_ + +#include +#include + +struct adf_accel_dev; + +#define RL_ROOT_MAX 4 +#define RL_CLUSTER_MAX 16 +#define RL_LEAF_MAX 64 +#define RL_NODES_CNT_MAX (RL_ROOT_MAX + RL_CLUSTER_MAX + RL_LEAF_MAX) +#define RL_RP_CNT_PER_LEAF_MAX 4U +#define RL_RP_CNT_MAX 64 +#define RL_SLA_EMPTY_ID -1 +#define RL_PARENT_DEFAULT_ID -1 + +enum rl_node_type { + RL_ROOT, + RL_CLUSTER, + RL_LEAF, +}; + +enum adf_base_services { + ADF_SVC_ASYM = 0, + ADF_SVC_SYM, + ADF_SVC_DC, + ADF_SVC_NONE, +}; + +/** + * struct adf_rl_sla_input_data - ratelimiting user input data structure + * @rp_mask: 64 bit bitmask of ring pair IDs which will be assigned to SLA. + * Eg. 0x5 -> RP0 and RP2 assigned; 0xA005 -> RP0,2,13,15 assigned. + * @sla_id: ID of current SLA for operations update, rm, get. For the add + * operation, this field will be updated with the ID of the newly + * added SLA + * @parent_id: ID of the SLA to which the current one should be assigned. + * Set to -1 to refer to the default parent. + * @cir: Committed information rate. Rate guaranteed to be achieved. Input value + * is expressed in permille scale, i.e. 1000 refers to the maximum + * device throughput for a selected service. + * @pir: Peak information rate. Maximum rate available that the SLA can achieve. + * Input value is expressed in permille scale, i.e. 1000 refers to + * the maximum device throughput for a selected service. + * @type: SLA type: root, cluster, node + * @srv: Service associated to the SLA: asym, sym dc. + * + * This structure is used to perform operations on an SLA. + * Depending on the operation, some of the parameters are ignored. + * The following list reports which parameters should be set for each operation. + * - add: all except sla_id + * - update: cir, pir, sla_id + * - rm: sla_id + * - rm_all: - + * - get: sla_id + * - get_capability_rem: srv, sla_id + */ +struct adf_rl_sla_input_data { + u64 rp_mask; + int sla_id; + int parent_id; + unsigned int cir; + unsigned int pir; + enum rl_node_type type; + enum adf_base_services srv; +}; + +struct rl_slice_cnt { + u8 dcpr_cnt; + u8 pke_cnt; + u8 cph_cnt; +}; + +struct adf_rl_interface_data { + struct adf_rl_sla_input_data input; + enum adf_base_services cap_rem_srv; + struct rw_semaphore lock; + bool sysfs_added; +}; + +struct adf_rl_hw_data { + u32 scale_ref; + u32 scan_interval; + u32 r2l_offset; + u32 l2c_offset; + u32 c2s_offset; + u32 pciin_tb_offset; + u32 pciout_tb_offset; + u32 pcie_scale_mul; + u32 pcie_scale_div; + u32 dcpr_correction; + u32 max_tp[RL_ROOT_MAX]; + struct rl_slice_cnt slices; +}; + +/** + * struct adf_rl - ratelimiting data structure + * @accel_dev: pointer to acceleration device data + * @device_data: pointer to rate limiting data specific to a device type (or revision) + * @sla: array of pointers to SLA objects + * @root: array of pointers to root type SLAs, element number reflects node_id + * @cluster: array of pointers to cluster type SLAs, element number reflects node_id + * @leaf: array of pointers to leaf type SLAs, element number reflects node_id + * @rp_in_use: array of ring pair IDs already used in one of SLAs + * @rl_lock: mutex object which is protecting data in this structure + * @input: structure which is used for holding the data received from user + */ +struct adf_rl { + struct adf_accel_dev *accel_dev; + struct adf_rl_hw_data *device_data; + /* mapping sla_id to SLA objects */ + struct rl_sla *sla[RL_NODES_CNT_MAX]; + struct rl_sla *root[RL_ROOT_MAX]; + struct rl_sla *cluster[RL_CLUSTER_MAX]; + struct rl_sla *leaf[RL_LEAF_MAX]; + bool rp_in_use[RL_RP_CNT_MAX]; + /* Mutex protecting writing to SLAs lists */ + struct mutex rl_lock; + struct adf_rl_interface_data user_input; +}; + +/** + * struct rl_sla - SLA object data structure + * @parent: pointer to the parent SLA (root/cluster) + * @type: SLA type + * @srv: service associated with this SLA + * @sla_id: ID of the SLA, used as element number in SLA array and as identifier + * shared with the user + * @node_id: ID of node, each of SLA type have a separate ID list + * @cir: committed information rate + * @pir: peak information rate (PIR >= CIR) + * @rem_cir: if this SLA is a parent then this field represents a remaining + * value to be used by child SLAs. + * @ring_pairs_ids: array with numeric ring pairs IDs assigned to this SLA + * @ring_pairs_cnt: number of assigned ring pairs listed in the array above + */ +struct rl_sla { + struct rl_sla *parent; + enum rl_node_type type; + enum adf_base_services srv; + u32 sla_id; + u32 node_id; + u32 cir; + u32 pir; + u32 rem_cir; + u16 ring_pairs_ids[RL_RP_CNT_PER_LEAF_MAX]; + u16 ring_pairs_cnt; +}; + +int adf_rl_add_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_update_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_sla(struct adf_accel_dev *accel_dev, + struct adf_rl_sla_input_data *sla_in); +int adf_rl_get_capability_remaining(struct adf_accel_dev *accel_dev, + enum adf_base_services srv, int sla_id); +int adf_rl_remove_sla(struct adf_accel_dev *accel_dev, u32 sla_id); +void adf_rl_remove_sla_all(struct adf_accel_dev *accel_dev, bool incl_default); + +int adf_rl_init(struct adf_accel_dev *accel_dev); +int adf_rl_start(struct adf_accel_dev *accel_dev); +void adf_rl_stop(struct adf_accel_dev *accel_dev); +void adf_rl_exit(struct adf_accel_dev *accel_dev); + +u32 adf_rl_calculate_pci_bw(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type, bool is_bw_out); +u32 adf_rl_calculate_ae_cycles(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); +u32 adf_rl_calculate_slice_tokens(struct adf_accel_dev *accel_dev, u32 sla_val, + enum adf_base_services svc_type); + +#endif /* ADF_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c new file mode 100644 index 0000000000000000000000000000000000000000..698a14f4ce66a831a59f46b1b420b35de0a7e8c6 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.c @@ -0,0 +1,97 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_rl_admin.h" + +static void +prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, + struct icp_qat_fw_init_admin_sla_config_params *fw_params, + struct icp_qat_fw_init_admin_req *req, bool is_update) +{ + req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; + req->init_cfg_ptr = dma_addr; + req->init_cfg_sz = sizeof(*fw_params); + req->node_id = sla->node_id; + req->node_type = sla->type; + req->rp_count = sla->ring_pairs_cnt; + req->svc_type = sla->srv; +} + +static void +prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, + struct icp_qat_fw_init_admin_sla_config_params *fw_params) +{ + fw_params->pcie_in_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, false); + fw_params->pcie_in_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, false); + fw_params->pcie_out_cir = + adf_rl_calculate_pci_bw(accel_dev, sla->cir, sla->srv, true); + fw_params->pcie_out_pir = + adf_rl_calculate_pci_bw(accel_dev, sla->pir, sla->srv, true); + + fw_params->slice_util_cir = + adf_rl_calculate_slice_tokens(accel_dev, sla->cir, sla->srv); + fw_params->slice_util_pir = + adf_rl_calculate_slice_tokens(accel_dev, sla->pir, sla->srv); + + fw_params->ae_util_cir = + adf_rl_calculate_ae_cycles(accel_dev, sla->cir, sla->srv); + fw_params->ae_util_pir = + adf_rl_calculate_ae_cycles(accel_dev, sla->pir, sla->srv); + + memcpy(fw_params->rp_ids, sla->ring_pairs_ids, + sizeof(sla->ring_pairs_ids)); +} + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int) +{ + struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; + int ret; + + ret = adf_send_admin_rl_init(accel_dev, &slices_resp); + if (ret) + return ret; + + slices_int->dcpr_cnt = slices_resp.dcpr_cnt; + slices_int->pke_cnt = slices_resp.pke_cnt; + /* For symmetric crypto, slice tokens are relative to the UCS slice */ + slices_int->cph_cnt = slices_resp.ucs_cnt; + + return 0; +} + +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update) +{ + struct icp_qat_fw_init_admin_sla_config_params *fw_params; + struct icp_qat_fw_init_admin_req req = { }; + dma_addr_t dma_addr; + int ret; + + fw_params = dma_alloc_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), + &dma_addr, GFP_KERNEL); + if (!fw_params) + return -ENOMEM; + + prep_admin_req_params(accel_dev, sla, fw_params); + prep_admin_req_msg(sla, dma_addr, fw_params, &req, is_update); + ret = adf_send_admin_rl_add_update(accel_dev, &req); + + dma_free_coherent(&GET_DEV(accel_dev), sizeof(*fw_params), fw_params, + dma_addr); + + return ret; +} + +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type) +{ + return adf_send_admin_rl_delete(accel_dev, node_id, node_type); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h new file mode 100644 index 0000000000000000000000000000000000000000..dd5419b7e896b2a4eca5b315ce352a07149b70f2 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_rl_admin.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RL_ADMIN_H_ +#define ADF_RL_ADMIN_H_ + +#include + +#include "adf_rl.h" + +int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, + struct rl_slice_cnt *slices_int); +int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, + struct rl_sla *sla, bool is_update); +int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, + u8 node_type); + +#endif /* ADF_RL_ADMIN_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sriov.c b/drivers/crypto/intel/qat/qat_common/adf_sriov.c index f44025bb6f995d9bdf58bf9d6290fd5566c10192..87a70c00c41ee61c4e3dde0708fe4b3bcb4c8e16 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sriov.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sriov.c @@ -60,7 +60,6 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) /* This ptr will be populated when VFs will be created */ vf_info->accel_dev = accel_dev; vf_info->vf_nr = i; - vf_info->vf_compat_ver = 0; mutex_init(&vf_info->pf2vf_lock); ratelimit_state_init(&vf_info->vf2pf_ratelimit, @@ -84,6 +83,32 @@ static int adf_enable_sriov(struct adf_accel_dev *accel_dev) return pci_enable_sriov(pdev, totalvfs); } +void adf_reenable_sriov(struct adf_accel_dev *accel_dev) +{ + struct pci_dev *pdev = accel_to_pci_dev(accel_dev); + char cfg[ADF_CFG_MAX_VAL_LEN_IN_BYTES] = {0}; + unsigned long val = 0; + + if (adf_cfg_get_param_value(accel_dev, ADF_GENERAL_SEC, + ADF_SRIOV_ENABLED, cfg)) + return; + + if (!accel_dev->pf.vf_info) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_CY, + &val, ADF_DEC)) + return; + + if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC, ADF_NUM_DC, + &val, ADF_DEC)) + return; + + set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status); + dev_dbg(&pdev->dev, "Re-enabling SRIOV\n"); + adf_enable_sriov(accel_dev); +} + /** * adf_disable_sriov() - Disable SRIOV for the device * @accel_dev: Pointer to accel device. @@ -103,6 +128,7 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) return; adf_pf2vf_notify_restarting(accel_dev); + adf_pf2vf_wait_for_restarting_complete(accel_dev); pci_disable_sriov(accel_to_pci_dev(accel_dev)); /* Disable VF to PF interrupts */ @@ -115,8 +141,10 @@ void adf_disable_sriov(struct adf_accel_dev *accel_dev) for (i = 0, vf = accel_dev->pf.vf_info; i < totalvfs; i++, vf++) mutex_destroy(&vf->pf2vf_lock); - kfree(accel_dev->pf.vf_info); - accel_dev->pf.vf_info = NULL; + if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status)) { + kfree(accel_dev->pf.vf_info); + accel_dev->pf.vf_info = NULL; + } } EXPORT_SYMBOL_GPL(adf_disable_sriov); @@ -194,6 +222,10 @@ int adf_sriov_configure(struct pci_dev *pdev, int numvfs) if (ret) return ret; + val = 1; + adf_cfg_add_key_value_param(accel_dev, ADF_GENERAL_SEC, ADF_SRIOV_ENABLED, + &val, ADF_DEC); + return numvfs; } EXPORT_SYMBOL_GPL(adf_sriov_configure); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c index 8f04b0d3c5ac890a987d854bb8af8caf1eedc8b4..4e7f70d4049d354bd1776611dbae079fe61f3511 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_sysfs.c +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs.c @@ -8,6 +8,8 @@ #include "adf_cfg_services.h" #include "adf_common_drv.h" +#define UNSET_RING_NUM -1 + static const char * const state_operations[] = { [DEV_DOWN] = "down", [DEV_UP] = "up", @@ -61,8 +63,8 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr, } ret = adf_dev_down(accel_dev, true); - if (ret < 0) - return -EINVAL; + if (ret) + return ret; break; case DEV_UP: @@ -202,13 +204,130 @@ static ssize_t pm_idle_enabled_store(struct device *dev, struct device_attribute } static DEVICE_ATTR_RW(pm_idle_enabled); +static ssize_t auto_reset_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + char *auto_reset; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + auto_reset = accel_dev->autoreset_on_error ? "on" : "off"; + + return sysfs_emit(buf, "%s\n", auto_reset); +} + +static ssize_t auto_reset_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + bool enabled = false; + int ret; + + ret = kstrtobool(buf, &enabled); + if (ret) + return ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + accel_dev->autoreset_on_error = enabled; + + return count; +} +static DEVICE_ATTR_RW(auto_reset); + static DEVICE_ATTR_RW(state); static DEVICE_ATTR_RW(cfg_services); +static ssize_t rp2srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_hw_device_data *hw_data; + struct adf_accel_dev *accel_dev; + enum adf_cfg_service_type svc; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + hw_data = GET_HW_DATA(accel_dev); + + if (accel_dev->sysfs.ring_num == UNSET_RING_NUM) + return -EINVAL; + + down_read(&accel_dev->sysfs.lock); + svc = GET_SRV_TYPE(accel_dev, accel_dev->sysfs.ring_num % + hw_data->num_banks_per_vf); + up_read(&accel_dev->sysfs.lock); + + switch (svc) { + case COMP: + return sysfs_emit(buf, "%s\n", ADF_CFG_DC); + case SYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_SYM); + case ASYM: + return sysfs_emit(buf, "%s\n", ADF_CFG_ASYM); + default: + break; + } + return -EINVAL; +} + +static ssize_t rp2srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + int num_rings, ret; + unsigned int ring; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ret = kstrtouint(buf, 10, &ring); + if (ret) + return ret; + + num_rings = GET_MAX_BANKS(accel_dev); + if (ring >= num_rings) { + dev_err(&GET_DEV(accel_dev), + "Device does not support more than %u ring pairs\n", + num_rings); + return -EINVAL; + } + + down_write(&accel_dev->sysfs.lock); + accel_dev->sysfs.ring_num = ring; + up_write(&accel_dev->sysfs.lock); + + return count; +} +static DEVICE_ATTR_RW(rp2srv); + +static ssize_t num_rps_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + return sysfs_emit(buf, "%u\n", GET_MAX_BANKS(accel_dev)); +} +static DEVICE_ATTR_RO(num_rps); + static struct attribute *qat_attrs[] = { &dev_attr_state.attr, &dev_attr_cfg_services.attr, &dev_attr_pm_idle_enabled.attr, + &dev_attr_rp2srv.attr, + &dev_attr_num_rps.attr, + &dev_attr_auto_reset.attr, NULL, }; @@ -227,6 +346,8 @@ int adf_sysfs_init(struct adf_accel_dev *accel_dev) "Failed to create qat attribute group: %d\n", ret); } + accel_dev->sysfs.ring_num = UNSET_RING_NUM; + return ret; } EXPORT_SYMBOL_GPL(adf_sysfs_init); diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c new file mode 100644 index 0000000000000000000000000000000000000000..e97c67c87b3cf17d124a4e97099622e170e30253 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_sysfs_ras_counters.h" + +static ssize_t errors_correctable_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_CORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_nonfatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_UNCORR); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t errors_fatal_show(struct device *dev, + struct device_attribute *dev_attr, + char *buf) +{ + struct adf_accel_dev *accel_dev; + unsigned long counter; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + counter = ADF_RAS_ERR_CTR_READ(accel_dev->ras_errors, ADF_RAS_FATAL); + return scnprintf(buf, PAGE_SIZE, "%ld\n", counter); +} + +static ssize_t reset_error_counters_store(struct device *dev, + struct device_attribute *dev_attr, + const char *buf, size_t count) +{ + struct adf_accel_dev *accel_dev; + + if (buf[0] != '1' || count != 2) + return -EINVAL; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + return count; +} + +static DEVICE_ATTR_RO(errors_correctable); +static DEVICE_ATTR_RO(errors_nonfatal); +static DEVICE_ATTR_RO(errors_fatal); +static DEVICE_ATTR_WO(reset_error_counters); + +static struct attribute *qat_ras_attrs[] = { + &dev_attr_errors_correctable.attr, + &dev_attr_errors_nonfatal.attr, + &dev_attr_errors_fatal.attr, + &dev_attr_reset_error_counters.attr, + NULL, +}; + +static struct attribute_group qat_ras_group = { + .attrs = qat_ras_attrs, + .name = "qat_ras", +}; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); + + if (device_add_group(&GET_DEV(accel_dev), &qat_ras_group)) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_ras attribute group.\n"); + + accel_dev->ras_errors.sysfs_added = true; +} + +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->ras_errors.enabled) + return; + + if (accel_dev->ras_errors.sysfs_added) { + device_remove_group(&GET_DEV(accel_dev), &qat_ras_group); + accel_dev->ras_errors.sysfs_added = false; + } + + ADF_RAS_ERR_CTR_CLEAR(accel_dev->ras_errors); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h new file mode 100644 index 0000000000000000000000000000000000000000..99e9d9cf57f848d9f4f4915e451aaf7e1c3a1fd4 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_ras_counters.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ + +#ifndef ADF_RAS_H +#define ADF_RAS_H + +#include +#include + +struct adf_accel_dev; + +void adf_sysfs_start_ras(struct adf_accel_dev *accel_dev); +void adf_sysfs_stop_ras(struct adf_accel_dev *accel_dev); + +#define ADF_RAS_ERR_CTR_READ(ras_errors, ERR) \ + atomic_read(&(ras_errors).counter[ERR]) + +#define ADF_RAS_ERR_CTR_CLEAR(ras_errors) \ + do { \ + for (int err = 0; err < ADF_RAS_ERRORS; ++err) \ + atomic_set(&(ras_errors).counter[err], 0); \ + } while (0) + +#define ADF_RAS_ERR_CTR_INC(ras_errors, ERR) \ + atomic_inc(&(ras_errors).counter[ERR]) + +#endif /* ADF_RAS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c new file mode 100644 index 0000000000000000000000000000000000000000..bedb514d4e30424d23aeb986417b9b838283ef2f --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.c @@ -0,0 +1,459 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2023 Intel Corporation */ + +#define dev_fmt(fmt) "RateLimiting: " fmt + +#include +#include +#include +#include + +#include "adf_common_drv.h" +#include "adf_rl.h" +#include "adf_sysfs_rl.h" + +#define GET_RL_STRUCT(accel_dev) ((accel_dev)->rate_limiting->user_input) + +enum rl_ops { + ADD, + UPDATE, + RM, + RM_ALL, + GET, +}; + +enum rl_params { + RP_MASK, + ID, + CIR, + PIR, + SRV, + CAP_REM_SRV, +}; + +static const char *const rl_services[] = { + [ADF_SVC_ASYM] = "asym", + [ADF_SVC_SYM] = "sym", + [ADF_SVC_DC] = "dc", +}; + +static const char *const rl_operations[] = { + [ADD] = "add", + [UPDATE] = "update", + [RM] = "rm", + [RM_ALL] = "rm_all", + [GET] = "get", +}; + +static int set_param_u(struct device *dev, enum rl_params param, u64 set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + switch (param) { + case RP_MASK: + data->input.rp_mask = set; + break; + case CIR: + data->input.cir = set; + break; + case PIR: + data->input.pir = set; + break; + case SRV: + data->input.srv = set; + break; + case CAP_REM_SRV: + data->cap_rem_srv = set; + break; + default: + ret = -EINVAL; + break; + } + up_write(&data->lock); + + return ret; +} + +static int set_param_s(struct device *dev, enum rl_params param, int set) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev || param != ID) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_write(&data->lock); + data->input.sla_id = set; + up_write(&data->lock); + + return 0; +} + +static int get_param_u(struct device *dev, enum rl_params param, u64 *get) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + switch (param) { + case RP_MASK: + *get = data->input.rp_mask; + break; + case CIR: + *get = data->input.cir; + break; + case PIR: + *get = data->input.pir; + break; + case SRV: + *get = data->input.srv; + break; + default: + ret = -EINVAL; + } + up_read(&data->lock); + + return ret; +} + +static int get_param_s(struct device *dev, enum rl_params param) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret = 0; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + if (param == ID) + ret = data->input.sla_id; + up_read(&data->lock); + + return ret; +} + +static ssize_t rp_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, RP_MASK, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%#llx\n", get); +} + +static ssize_t rp_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + u64 val; + + err = kstrtou64(buf, 16, &val); + if (err) + return err; + + err = set_param_u(dev, RP_MASK, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(rp); + +static ssize_t id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%d\n", get_param_s(dev, ID)); +} + +static ssize_t id_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int err; + int val; + + err = kstrtoint(buf, 10, &val); + if (err) + return err; + + err = set_param_s(dev, ID, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(id); + +static ssize_t cir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, CIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t cir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, CIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(cir); + +static ssize_t pir_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, PIR, &get); + if (ret) + return ret; + + return sysfs_emit(buf, "%llu\n", get); +} + +static ssize_t pir_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int err; + + err = kstrtouint(buf, 10, &val); + if (err) + return err; + + err = set_param_u(dev, PIR, val); + if (err) + return err; + + return count; +} +static DEVICE_ATTR_RW(pir); + +static ssize_t srv_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int ret; + u64 get; + + ret = get_param_u(dev, SRV, &get); + if (ret) + return ret; + + if (get == ADF_SVC_NONE) + return -EINVAL; + + return sysfs_emit(buf, "%s\n", rl_services[get]); +} + +static ssize_t srv_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(srv); + +static ssize_t cap_rem_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret, rem_cap; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + down_read(&data->lock); + rem_cap = adf_rl_get_capability_remaining(accel_dev, data->cap_rem_srv, + RL_SLA_EMPTY_ID); + up_read(&data->lock); + if (rem_cap < 0) + return rem_cap; + + ret = sysfs_emit(buf, "%u\n", rem_cap); + + return ret; +} + +static ssize_t cap_rem_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + unsigned int val; + int ret; + + ret = sysfs_match_string(rl_services, buf); + if (ret < 0) + return ret; + + val = ret; + ret = set_param_u(dev, CAP_REM_SRV, val); + if (ret) + return ret; + + return count; +} +static DEVICE_ATTR_RW(cap_rem); + +static ssize_t sla_op_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct adf_rl_interface_data *data; + struct adf_accel_dev *accel_dev; + int ret; + + accel_dev = adf_devmgr_pci_to_accel_dev(to_pci_dev(dev)); + if (!accel_dev) + return -EINVAL; + + data = &GET_RL_STRUCT(accel_dev); + + ret = sysfs_match_string(rl_operations, buf); + if (ret < 0) + return ret; + + down_write(&data->lock); + switch (ret) { + case ADD: + data->input.parent_id = RL_PARENT_DEFAULT_ID; + data->input.type = RL_LEAF; + data->input.sla_id = 0; + ret = adf_rl_add_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case UPDATE: + ret = adf_rl_update_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + case RM: + ret = adf_rl_remove_sla(accel_dev, data->input.sla_id); + if (ret) + goto err_free_lock; + break; + case RM_ALL: + adf_rl_remove_sla_all(accel_dev, false); + break; + case GET: + ret = adf_rl_get_sla(accel_dev, &data->input); + if (ret) + goto err_free_lock; + break; + default: + ret = -EINVAL; + goto err_free_lock; + } + up_write(&data->lock); + + return count; + +err_free_lock: + up_write(&data->lock); + + return ret; +} +static DEVICE_ATTR_WO(sla_op); + +static struct attribute *qat_rl_attrs[] = { + &dev_attr_rp.attr, + &dev_attr_id.attr, + &dev_attr_cir.attr, + &dev_attr_pir.attr, + &dev_attr_srv.attr, + &dev_attr_cap_rem.attr, + &dev_attr_sla_op.attr, + NULL, +}; + +static struct attribute_group qat_rl_group = { + .attrs = qat_rl_attrs, + .name = "qat_rl", +}; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + int ret; + + data = &GET_RL_STRUCT(accel_dev); + + ret = device_add_group(&GET_DEV(accel_dev), &qat_rl_group); + if (ret) + dev_err(&GET_DEV(accel_dev), + "Failed to create qat_rl attribute group\n"); + + data->cap_rem_srv = ADF_SVC_NONE; + data->input.srv = ADF_SVC_NONE; + data->sysfs_added = true; + + return ret; +} + +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_rl_interface_data *data; + + data = &GET_RL_STRUCT(accel_dev); + if (!data->sysfs_added) + return; + + device_remove_group(&GET_DEV(accel_dev), &qat_rl_group); + data->sysfs_added = false; +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h new file mode 100644 index 0000000000000000000000000000000000000000..22d36aa8a757fcf85ed3e5a02061618ffaddca41 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_sysfs_rl.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright(c) 2023 Intel Corporation */ +#ifndef ADF_SYSFS_RL_H_ +#define ADF_SYSFS_RL_H_ + +struct adf_accel_dev; + +int adf_sysfs_rl_add(struct adf_accel_dev *accel_dev); +void adf_sysfs_rl_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_SYSFS_RL_H_ */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.c b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c new file mode 100644 index 0000000000000000000000000000000000000000..2ff714d11bd2f6aa6a634b6d656ae03d83fe4a69 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_admin.h" +#include "adf_accel_devices.h" +#include "adf_common_drv.h" +#include "adf_telemetry.h" + +#define TL_IS_ZERO(input) ((input) == 0) + +static bool is_tl_supported(struct adf_accel_dev *accel_dev) +{ + u16 fw_caps = GET_HW_DATA(accel_dev)->fw_capabilities; + + return fw_caps & TL_CAPABILITY_BIT; +} + +static int validate_tl_data(struct adf_tl_hw_data *tl_data) +{ + if (!tl_data->dev_counters || + TL_IS_ZERO(tl_data->num_dev_counters) || + !tl_data->sl_util_counters || + !tl_data->sl_exec_counters || + !tl_data->rp_counters || + TL_IS_ZERO(tl_data->num_rp_counters)) + return -EOPNOTSUPP; + + return 0; +} + +static int adf_tl_alloc_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + struct adf_telemetry *telemetry; + int node = dev_to_node(dev); + void *tl_data_regs; + unsigned int i; + + telemetry = kzalloc_node(sizeof(*telemetry), GFP_KERNEL, node); + if (!telemetry) + return -ENOMEM; + + telemetry->rp_num_indexes = kmalloc_array(tl_data->max_rp, + sizeof(*telemetry->rp_num_indexes), + GFP_KERNEL); + if (!telemetry->rp_num_indexes) + goto err_free_tl; + + telemetry->regs_hist_buff = kmalloc_array(tl_data->num_hbuff, + sizeof(*telemetry->regs_hist_buff), + GFP_KERNEL); + if (!telemetry->regs_hist_buff) + goto err_free_rp_indexes; + + telemetry->regs_data = dma_alloc_coherent(dev, regs_sz, + &telemetry->regs_data_p, + GFP_KERNEL); + if (!telemetry->regs_data) + goto err_free_regs_hist_buff; + + for (i = 0; i < tl_data->num_hbuff; i++) { + tl_data_regs = kzalloc_node(regs_sz, GFP_KERNEL, node); + if (!tl_data_regs) + goto err_free_dma; + + telemetry->regs_hist_buff[i] = tl_data_regs; + } + + accel_dev->telemetry = telemetry; + + return 0; + +err_free_dma: + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + while (i--) + kfree(telemetry->regs_hist_buff[i]); + +err_free_regs_hist_buff: + kfree(telemetry->regs_hist_buff); +err_free_rp_indexes: + kfree(telemetry->rp_num_indexes); +err_free_tl: + kfree(telemetry); + + return -ENOMEM; +} + +static void adf_tl_free_mem(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t regs_sz = tl_data->layout_sz; + unsigned int i; + + for (i = 0; i < tl_data->num_hbuff; i++) + kfree(telemetry->regs_hist_buff[i]); + + dma_free_coherent(dev, regs_sz, telemetry->regs_data, + telemetry->regs_data_p); + + kfree(telemetry->regs_hist_buff); + kfree(telemetry->rp_num_indexes); + kfree(telemetry); + accel_dev->telemetry = NULL; +} + +static unsigned long get_next_timeout(void) +{ + return msecs_to_jiffies(ADF_TL_TIMER_INT_MS); +} + +static void snapshot_regs(struct adf_telemetry *telemetry, size_t size) +{ + void *dst = telemetry->regs_hist_buff[telemetry->hb_num]; + void *src = telemetry->regs_data; + + memcpy(dst, src, size); +} + +static void tl_work_handler(struct work_struct *work) +{ + struct delayed_work *delayed_work; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + u32 msg_cnt, old_msg_cnt; + size_t layout_sz; + u32 *regs_data; + size_t id; + + delayed_work = to_delayed_work(work); + telemetry = container_of(delayed_work, struct adf_telemetry, work_ctx); + tl_data = &GET_TL_DATA(telemetry->accel_dev); + regs_data = telemetry->regs_data; + + id = tl_data->msg_cnt_off / sizeof(*regs_data); + layout_sz = tl_data->layout_sz; + + if (!atomic_read(&telemetry->state)) { + cancel_delayed_work_sync(&telemetry->work_ctx); + return; + } + + msg_cnt = regs_data[id]; + old_msg_cnt = msg_cnt; + if (msg_cnt == telemetry->msg_cnt) + goto out; + + mutex_lock(&telemetry->regs_hist_lock); + + snapshot_regs(telemetry, layout_sz); + + /* Check if data changed while updating it */ + msg_cnt = regs_data[id]; + if (old_msg_cnt != msg_cnt) + snapshot_regs(telemetry, layout_sz); + + telemetry->msg_cnt = msg_cnt; + telemetry->hb_num++; + telemetry->hb_num %= telemetry->hbuffs; + + mutex_unlock(&telemetry->regs_hist_lock); + +out: + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); +} + +int adf_tl_halt(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + int ret; + + cancel_delayed_work_sync(&telemetry->work_ctx); + atomic_set(&telemetry->state, 0); + + ret = adf_send_admin_tl_stop(accel_dev); + if (ret) + dev_err(dev, "failed to stop telemetry\n"); + + return ret; +} + +int adf_tl_run(struct adf_accel_dev *accel_dev, int state) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + size_t layout_sz = tl_data->layout_sz; + int ret; + + ret = adf_send_admin_tl_start(accel_dev, telemetry->regs_data_p, + layout_sz, telemetry->rp_num_indexes, + &telemetry->slice_cnt); + if (ret) { + dev_err(dev, "failed to start telemetry\n"); + return ret; + } + + telemetry->hbuffs = state; + atomic_set(&telemetry->state, state); + + adf_misc_wq_queue_delayed_work(&telemetry->work_ctx, get_next_timeout()); + + return 0; +} + +int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + struct device *dev = &GET_DEV(accel_dev); + struct adf_telemetry *telemetry; + unsigned int i; + int ret; + + ret = validate_tl_data(tl_data); + if (ret) + return ret; + + ret = adf_tl_alloc_mem(accel_dev); + if (ret) { + dev_err(dev, "failed to initialize: %d\n", ret); + return ret; + } + + telemetry = accel_dev->telemetry; + telemetry->accel_dev = accel_dev; + + mutex_init(&telemetry->wr_lock); + mutex_init(&telemetry->regs_hist_lock); + INIT_DELAYED_WORK(&telemetry->work_ctx, tl_work_handler); + + for (i = 0; i < max_rp; i++) + telemetry->rp_num_indexes[i] = ADF_TL_RP_REGS_DISABLED; + + return 0; +} + +int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + struct device *dev = &GET_DEV(accel_dev); + + if (!accel_dev->telemetry) + return -EOPNOTSUPP; + + if (!is_tl_supported(accel_dev)) { + dev_info(dev, "feature not supported by FW\n"); + adf_tl_free_mem(accel_dev); + return -EOPNOTSUPP; + } + + return 0; +} + +void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + if (atomic_read(&accel_dev->telemetry->state)) + adf_tl_halt(accel_dev); +} + +void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ + if (!accel_dev->telemetry) + return; + + adf_tl_free_mem(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_telemetry.h b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h new file mode 100644 index 0000000000000000000000000000000000000000..9be81cd3b886064ca24cd946346e0f344e1f4251 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_telemetry.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TELEMETRY_H +#define ADF_TELEMETRY_H + +#include +#include +#include +#include + +#include "icp_qat_fw_init_admin.h" + +struct adf_accel_dev; +struct adf_tl_dbg_counter; +struct dentry; + +#define ADF_TL_SL_CNT_COUNT \ + (sizeof(struct icp_qat_fw_init_admin_slice_cnt) / sizeof(__u8)) + +#define TL_CAPABILITY_BIT BIT(1) +/* Interval within device writes data to DMA region. Value in milliseconds. */ +#define ADF_TL_DATA_WR_INTERVAL_MS 1000 +/* Interval within timer interrupt should be handled. Value in milliseconds. */ +#define ADF_TL_TIMER_INT_MS (ADF_TL_DATA_WR_INTERVAL_MS / 2) + +#define ADF_TL_RP_REGS_DISABLED (0xff) + +struct adf_tl_hw_data { + size_t layout_sz; + size_t slice_reg_sz; + size_t rp_reg_sz; + size_t msg_cnt_off; + const struct adf_tl_dbg_counter *dev_counters; + const struct adf_tl_dbg_counter *sl_util_counters; + const struct adf_tl_dbg_counter *sl_exec_counters; + const struct adf_tl_dbg_counter *rp_counters; + u8 num_hbuff; + u8 cpp_ns_per_cycle; + u8 bw_units_to_bytes; + u8 num_dev_counters; + u8 num_rp_counters; + u8 max_rp; +}; + +struct adf_telemetry { + struct adf_accel_dev *accel_dev; + atomic_t state; + u32 hbuffs; + int hb_num; + u32 msg_cnt; + dma_addr_t regs_data_p; /* bus address for DMA mapping */ + void *regs_data; /* virtual address for DMA mapping */ + /** + * @regs_hist_buff: array of pointers to copies of the last @hbuffs + * values of @regs_data + */ + void **regs_hist_buff; + struct dentry *dbg_dir; + u8 *rp_num_indexes; + /** + * @regs_hist_lock: protects from race conditions between write and read + * to the copies referenced by @regs_hist_buff + */ + struct mutex regs_hist_lock; + /** + * @wr_lock: protects from concurrent writes to debugfs telemetry files + */ + struct mutex wr_lock; + struct delayed_work work_ctx; + struct icp_qat_fw_init_admin_slice_cnt slice_cnt; +}; + +#ifdef CONFIG_DEBUG_FS +int adf_tl_init(struct adf_accel_dev *accel_dev); +int adf_tl_start(struct adf_accel_dev *accel_dev); +void adf_tl_stop(struct adf_accel_dev *accel_dev); +void adf_tl_shutdown(struct adf_accel_dev *accel_dev); +int adf_tl_run(struct adf_accel_dev *accel_dev, int state); +int adf_tl_halt(struct adf_accel_dev *accel_dev); +#else +static inline int adf_tl_init(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline int adf_tl_start(struct adf_accel_dev *accel_dev) +{ + return 0; +} + +static inline void adf_tl_stop(struct adf_accel_dev *accel_dev) +{ +} + +static inline void adf_tl_shutdown(struct adf_accel_dev *accel_dev) +{ +} +#endif /* CONFIG_DEBUG_FS */ +#endif /* ADF_TELEMETRY_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..c8241f5a0a26ee996525a0038a5d74b3e15ec094 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.c @@ -0,0 +1,710 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2023 Intel Corporation. */ +#define dev_fmt(fmt) "Telemetry debugfs: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "adf_accel_devices.h" +#include "adf_cfg_strings.h" +#include "adf_telemetry.h" +#include "adf_tl_debugfs.h" + +#define TL_VALUE_MIN_PADDING 20 +#define TL_KEY_MIN_PADDING 23 +#define TL_RP_SRV_UNKNOWN "Unknown" + +static int tl_collect_values_u32(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u32 *regs_hist_buff; + u32 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +static int tl_collect_values_u64(struct adf_telemetry *telemetry, + size_t counter_offset, u64 *arr) +{ + unsigned int samples, hb_idx, i; + u64 *regs_hist_buff; + u64 counter_val; + + samples = min(telemetry->msg_cnt, telemetry->hbuffs); + hb_idx = telemetry->hb_num + telemetry->hbuffs - samples; + + mutex_lock(&telemetry->regs_hist_lock); + + for (i = 0; i < samples; i++) { + regs_hist_buff = telemetry->regs_hist_buff[hb_idx % telemetry->hbuffs]; + counter_val = regs_hist_buff[counter_offset / sizeof(counter_val)]; + arr[i] = counter_val; + hb_idx++; + } + + mutex_unlock(&telemetry->regs_hist_lock); + + return samples; +} + +/** + * avg_array() - Return average of values within an array. + * @array: Array of values. + * @len: Number of elements. + * + * This algorithm computes average of an array without running into overflow. + * + * Return: average of values. + */ +#define avg_array(array, len) ( \ +{ \ + typeof(&(array)[0]) _array = (array); \ + __unqual_scalar_typeof(_array[0]) _x = 0; \ + __unqual_scalar_typeof(_array[0]) _y = 0; \ + __unqual_scalar_typeof(_array[0]) _a, _b; \ + typeof(len) _len = (len); \ + size_t _i; \ + \ + for (_i = 0; _i < _len; _i++) { \ + _a = _array[_i]; \ + _b = do_div(_a, _len); \ + _x += _a; \ + if (_y >= _len - _b) { \ + _x++; \ + _y -= _len - _b; \ + } else { \ + _y += _b; \ + } \ + } \ + do_div(_y, _len); \ + (_x + _y); \ +}) + +/* Calculation function for simple counter. */ +static int tl_calc_count(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert CPP bus cycles to ns. */ +static int tl_cycles_to_ns(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + int ret; + + ret = tl_calc_count(telemetry, ctr, vals); + if (ret) + return ret; + + vals->curr *= cpp_ns_per_cycle; + vals->min *= cpp_ns_per_cycle; + vals->max *= cpp_ns_per_cycle; + vals->avg *= cpp_ns_per_cycle; + + return 0; +} + +/* + * Compute latency cumulative average with division of accumulated value + * by sample count. Returned value is in ns. + */ +static int tl_lat_acc_avg(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u8 cpp_ns_per_cycle = tl_data->cpp_ns_per_cycle; + u8 num_hbuff = tl_data->num_hbuff; + int sample_cnt, i; + u64 *hist_vals; + u64 *hist_cnt; + int ret = 0; + + hist_vals = kmalloc_array(num_hbuff, sizeof(*hist_vals), GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + hist_cnt = kmalloc_array(num_hbuff, sizeof(*hist_cnt), GFP_KERNEL); + if (!hist_cnt) { + ret = -ENOMEM; + goto out_free_hist_vals; + } + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u64(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_cnt; + + tl_collect_values_u32(telemetry, ctr->offset2, hist_cnt); + + for (i = 0; i < sample_cnt; i++) { + /* Avoid division by 0 if count is 0. */ + if (hist_cnt[i]) + hist_vals[i] = div_u64(hist_vals[i] * cpp_ns_per_cycle, + hist_cnt[i]); + else + hist_vals[i] = 0; + } + + vals->curr = hist_vals[sample_cnt - 1]; + vals->min = min_array(hist_vals, sample_cnt); + vals->max = max_array(hist_vals, sample_cnt); + vals->avg = avg_array(hist_vals, sample_cnt); + +out_free_hist_cnt: + kfree(hist_cnt); +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +/* Convert HW raw bandwidth units to Mbps. */ +static int tl_bw_hw_units_to_mbps(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct adf_tl_dbg_aggr_values *vals) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(telemetry->accel_dev); + u16 bw_hw_2_bits = tl_data->bw_units_to_bytes * BITS_PER_BYTE; + u64 *hist_vals; + int sample_cnt; + int ret = 0; + + hist_vals = kmalloc_array(tl_data->num_hbuff, sizeof(*hist_vals), + GFP_KERNEL); + if (!hist_vals) + return -ENOMEM; + + memset(vals, 0, sizeof(*vals)); + sample_cnt = tl_collect_values_u32(telemetry, ctr->offset1, hist_vals); + if (!sample_cnt) + goto out_free_hist_vals; + + vals->curr = div_u64(hist_vals[sample_cnt - 1] * bw_hw_2_bits, MEGA); + vals->min = div_u64(min_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->max = div_u64(max_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + vals->avg = div_u64(avg_array(hist_vals, sample_cnt) * bw_hw_2_bits, MEGA); + +out_free_hist_vals: + kfree(hist_vals); + return ret; +} + +static void tl_seq_printf_counter(struct adf_telemetry *telemetry, + struct seq_file *s, const char *name, + struct adf_tl_dbg_aggr_values *vals) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, name); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->curr); + if (atomic_read(&telemetry->state) > 1) { + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->min); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->max); + seq_printf(s, "%*llu", TL_VALUE_MIN_PADDING, vals->avg); + } + seq_puts(s, "\n"); +} + +static int tl_calc_and_print_counter(struct adf_telemetry *telemetry, + struct seq_file *s, + const struct adf_tl_dbg_counter *ctr, + const char *name) +{ + const char *counter_name = name ? name : ctr->name; + enum adf_tl_counter_type type = ctr->type; + struct adf_tl_dbg_aggr_values vals; + int ret; + + switch (type) { + case ADF_TL_SIMPLE_COUNT: + ret = tl_calc_count(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS: + ret = tl_cycles_to_ns(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_NS_AVG: + ret = tl_lat_acc_avg(telemetry, ctr, &vals); + break; + case ADF_TL_COUNTER_MBPS: + ret = tl_bw_hw_units_to_mbps(telemetry, ctr, &vals); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + tl_seq_printf_counter(telemetry, s, counter_name, &vals); + + return 0; +} + +static int tl_print_sl_counter(struct adf_telemetry *telemetry, + const struct adf_tl_dbg_counter *ctr, + struct seq_file *s, u8 cnt_id) +{ + size_t sl_regs_sz = GET_TL_DATA(telemetry->accel_dev).slice_reg_sz; + struct adf_tl_dbg_counter slice_ctr; + size_t offset_inc = cnt_id * sl_regs_sz; + char cnt_name[MAX_COUNT_NAME_SIZE]; + + snprintf(cnt_name, MAX_COUNT_NAME_SIZE, "%s%d", ctr->name, cnt_id); + slice_ctr = *ctr; + slice_ctr.offset1 += offset_inc; + + return tl_calc_and_print_counter(telemetry, s, &slice_ctr, cnt_name); +} + +static int tl_calc_and_print_sl_counters(struct adf_accel_dev *accel_dev, + struct seq_file *s, u8 cnt_type, u8 cnt_id) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *sl_tl_util_counters; + const struct adf_tl_dbg_counter *sl_tl_exec_counters; + const struct adf_tl_dbg_counter *ctr; + int ret; + + sl_tl_util_counters = tl_data->sl_util_counters; + sl_tl_exec_counters = tl_data->sl_exec_counters; + + ctr = &sl_tl_util_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice utilization counter type\n"); + return ret; + } + + ctr = &sl_tl_exec_counters[cnt_type]; + + ret = tl_print_sl_counter(telemetry, ctr, s, cnt_id); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid slice execution counter type\n"); + return ret; + } + + return 0; +} + +static void tl_print_msg_cnt(struct seq_file *s, u32 msg_cnt) +{ + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, SNAPSHOT_CNT_MSG); + seq_printf(s, "%*u\n", TL_VALUE_MIN_PADDING, msg_cnt); +} + +static int tl_print_dev_data(struct adf_accel_dev *accel_dev, + struct seq_file *s) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *dev_tl_counters; + u8 num_dev_counters = tl_data->num_dev_counters; + u8 *sl_cnt = (u8 *)&telemetry->slice_cnt; + const struct adf_tl_dbg_counter *ctr; + unsigned int i; + int ret; + u8 j; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + dev_tl_counters = tl_data->dev_counters; + + tl_print_msg_cnt(s, telemetry->msg_cnt); + + /* Print device level telemetry. */ + for (i = 0; i < num_dev_counters; i++) { + ctr = &dev_tl_counters[i]; + ret = tl_calc_and_print_counter(telemetry, s, ctr, NULL); + if (ret) { + dev_notice(&GET_DEV(accel_dev), + "invalid counter type\n"); + return ret; + } + } + + /* Print per slice telemetry. */ + for (i = 0; i < ADF_TL_SL_CNT_COUNT; i++) { + for (j = 0; j < sl_cnt[i]; j++) { + ret = tl_calc_and_print_sl_counters(accel_dev, s, i, j); + if (ret) + return ret; + } + } + + return 0; +} + +static int tl_dev_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + return tl_print_dev_data(accel_dev, s); +} +DEFINE_SHOW_ATTRIBUTE(tl_dev_data); + +static int tl_control_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + + if (!accel_dev) + return -EINVAL; + + seq_printf(s, "%d\n", atomic_read(&accel_dev->telemetry->state)); + + return 0; +} + +static ssize_t tl_control_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + struct adf_tl_hw_data *tl_data; + struct device *dev; + u32 input; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + tl_data = &GET_TL_DATA(accel_dev); + telemetry = accel_dev->telemetry; + dev = &GET_DEV(accel_dev); + + mutex_lock(&telemetry->wr_lock); + + ret = kstrtou32_from_user(userbuf, count, 10, &input); + if (ret) + goto unlock_and_exit; + + if (input > tl_data->num_hbuff) { + dev_info(dev, "invalid control input\n"); + ret = -EINVAL; + goto unlock_and_exit; + } + + /* If input is 0, just stop telemetry. */ + if (!input) { + ret = adf_tl_halt(accel_dev); + if (!ret) + ret = count; + + goto unlock_and_exit; + } + + /* If TL is already enabled, stop it. */ + if (atomic_read(&telemetry->state)) { + dev_info(dev, "already enabled, restarting.\n"); + ret = adf_tl_halt(accel_dev); + if (ret) + goto unlock_and_exit; + } + + ret = adf_tl_run(accel_dev, input); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_control); + +static int get_rp_index_from_file(const struct file *f, u8 *rp_id, u8 rp_num) +{ + char alpha; + u8 index; + int ret; + + ret = sscanf(f->f_path.dentry->d_name.name, ADF_TL_RP_REGS_FNAME, &alpha); + if (ret != 1) + return -EINVAL; + + index = ADF_TL_DBG_RP_INDEX_ALPHA(alpha); + *rp_id = index; + + return 0; +} + +static int adf_tl_dbg_change_rp_index(struct adf_accel_dev *accel_dev, + unsigned int new_rp_num, + unsigned int rp_regs_index) +{ + struct adf_hw_device_data *hw_data = GET_HW_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct device *dev = &GET_DEV(accel_dev); + unsigned int i; + u8 curr_state; + int ret; + + if (new_rp_num >= hw_data->num_rps) { + dev_info(dev, "invalid Ring Pair number selected\n"); + return -EINVAL; + } + + for (i = 0; i < hw_data->tl_data.max_rp; i++) { + if (telemetry->rp_num_indexes[i] == new_rp_num) { + dev_info(dev, "RP nr: %d is already selected in slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(i)); + return 0; + } + } + + dev_dbg(dev, "selecting RP nr %u into slot rp_%c_data\n", + new_rp_num, ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + + curr_state = atomic_read(&telemetry->state); + + if (curr_state) { + ret = adf_tl_halt(accel_dev); + if (ret) + return ret; + + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + + ret = adf_tl_run(accel_dev, curr_state); + if (ret) + return ret; + } else { + telemetry->rp_num_indexes[rp_regs_index] = new_rp_num; + } + + return 0; +} + +static void tl_print_rp_srv(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_idx) +{ + u32 banks_per_vf = GET_HW_DATA(accel_dev)->num_banks_per_vf; + enum adf_cfg_service_type svc; + + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_SERVICE_TYPE); + + svc = GET_SRV_TYPE(accel_dev, rp_idx % banks_per_vf); + switch (svc) { + case COMP: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_DC); + break; + case SYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_SYM); + break; + case ASYM: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, ADF_CFG_ASYM); + break; + default: + seq_printf(s, "%*s\n", TL_VALUE_MIN_PADDING, TL_RP_SRV_UNKNOWN); + break; + } +} + +static int tl_print_rp_data(struct adf_accel_dev *accel_dev, struct seq_file *s, + u8 rp_regs_index) +{ + struct adf_tl_hw_data *tl_data = &GET_TL_DATA(accel_dev); + struct adf_telemetry *telemetry = accel_dev->telemetry; + const struct adf_tl_dbg_counter *rp_tl_counters; + u8 num_rp_counters = tl_data->num_rp_counters; + size_t rp_regs_sz = tl_data->rp_reg_sz; + struct adf_tl_dbg_counter ctr; + unsigned int i; + u8 rp_idx; + int ret; + + if (!atomic_read(&telemetry->state)) { + dev_info(&GET_DEV(accel_dev), "not enabled\n"); + return -EPERM; + } + + rp_tl_counters = tl_data->rp_counters; + rp_idx = telemetry->rp_num_indexes[rp_regs_index]; + + if (rp_idx == ADF_TL_RP_REGS_DISABLED) { + dev_info(&GET_DEV(accel_dev), "no RP number selected in rp_%c_data\n", + ADF_TL_DBG_RP_ALPHA_INDEX(rp_regs_index)); + return -EPERM; + } + + tl_print_msg_cnt(s, telemetry->msg_cnt); + seq_printf(s, "%-*s", TL_KEY_MIN_PADDING, RP_NUM_INDEX); + seq_printf(s, "%*d\n", TL_VALUE_MIN_PADDING, rp_idx); + tl_print_rp_srv(accel_dev, s, rp_idx); + + for (i = 0; i < num_rp_counters; i++) { + ctr = rp_tl_counters[i]; + ctr.offset1 += rp_regs_sz * rp_regs_index; + ctr.offset2 += rp_regs_sz * rp_regs_index; + ret = tl_calc_and_print_counter(telemetry, s, &ctr, NULL); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), + "invalid RP counter type\n"); + return ret; + } + } + + return 0; +} + +static int tl_rp_data_show(struct seq_file *s, void *unused) +{ + struct adf_accel_dev *accel_dev = s->private; + u8 rp_regs_index; + u8 max_rp; + int ret; + + if (!accel_dev) + return -EINVAL; + + max_rp = GET_TL_DATA(accel_dev).max_rp; + ret = get_rp_index_from_file(s->file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + return ret; + } + + return tl_print_rp_data(accel_dev, s, rp_regs_index); +} + +static ssize_t tl_rp_data_write(struct file *file, const char __user *userbuf, + size_t count, loff_t *ppos) +{ + struct seq_file *seq_f = file->private_data; + struct adf_accel_dev *accel_dev; + struct adf_telemetry *telemetry; + unsigned int new_rp_num; + u8 rp_regs_index; + u8 max_rp; + int ret; + + accel_dev = seq_f->private; + if (!accel_dev) + return -EINVAL; + + telemetry = accel_dev->telemetry; + max_rp = GET_TL_DATA(accel_dev).max_rp; + + mutex_lock(&telemetry->wr_lock); + + ret = get_rp_index_from_file(file, &rp_regs_index, max_rp); + if (ret) { + dev_dbg(&GET_DEV(accel_dev), "invalid RP data file name\n"); + goto unlock_and_exit; + } + + ret = kstrtou32_from_user(userbuf, count, 10, &new_rp_num); + if (ret) + goto unlock_and_exit; + + ret = adf_tl_dbg_change_rp_index(accel_dev, new_rp_num, rp_regs_index); + if (ret) + goto unlock_and_exit; + + ret = count; + +unlock_and_exit: + mutex_unlock(&telemetry->wr_lock); + return ret; +} +DEFINE_SHOW_STORE_ATTRIBUTE(tl_rp_data); + +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *parent = accel_dev->debugfs_dir; + u8 max_rp = GET_TL_DATA(accel_dev).max_rp; + char name[ADF_TL_RP_REGS_FNAME_SIZE]; + struct dentry *dir; + unsigned int i; + + if (!telemetry) + return; + + dir = debugfs_create_dir("telemetry", parent); + accel_dev->telemetry->dbg_dir = dir; + debugfs_create_file("device_data", 0444, dir, accel_dev, &tl_dev_data_fops); + debugfs_create_file("control", 0644, dir, accel_dev, &tl_control_fops); + + for (i = 0; i < max_rp; i++) { + snprintf(name, sizeof(name), ADF_TL_RP_REGS_FNAME, + ADF_TL_DBG_RP_ALPHA_INDEX(i)); + debugfs_create_file(name, 0644, dir, accel_dev, &tl_rp_data_fops); + } +} + +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev) +{ + struct adf_telemetry *telemetry = accel_dev->telemetry; + struct dentry *dbg_dir; + + if (!telemetry) + return; + + dbg_dir = telemetry->dbg_dir; + + debugfs_remove_recursive(dbg_dir); + + if (atomic_read(&telemetry->state)) + adf_tl_halt(accel_dev); +} diff --git a/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..11cc9eae19b37af1b5b484456af49f646617fc72 --- /dev/null +++ b/drivers/crypto/intel/qat/qat_common/adf_tl_debugfs.h @@ -0,0 +1,117 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (c) 2023 Intel Corporation. */ +#ifndef ADF_TL_DEBUGFS_H +#define ADF_TL_DEBUGFS_H + +#include + +struct adf_accel_dev; + +#define MAX_COUNT_NAME_SIZE 32 +#define SNAPSHOT_CNT_MSG "sample_cnt" +#define RP_NUM_INDEX "rp_num" +#define PCI_TRANS_CNT_NAME "pci_trans_cnt" +#define MAX_RD_LAT_NAME "max_rd_lat" +#define RD_LAT_ACC_NAME "rd_lat_acc_avg" +#define MAX_LAT_NAME "max_gp_lat" +#define LAT_ACC_NAME "gp_lat_acc_avg" +#define BW_IN_NAME "bw_in" +#define BW_OUT_NAME "bw_out" +#define PAGE_REQ_LAT_NAME "at_page_req_lat_avg" +#define AT_TRANS_LAT_NAME "at_trans_lat_avg" +#define AT_MAX_UTLB_USED_NAME "at_max_tlb_used" +#define AT_GLOB_DTLB_HIT_NAME "at_glob_devtlb_hit" +#define AT_GLOB_DTLB_MISS_NAME "at_glob_devtlb_miss" +#define AT_PAYLD_DTLB_HIT_NAME "tl_at_payld_devtlb_hit" +#define AT_PAYLD_DTLB_MISS_NAME "tl_at_payld_devtlb_miss" +#define RP_SERVICE_TYPE "service_type" + +#define ADF_TL_DBG_RP_ALPHA_INDEX(index) ((index) + 'A') +#define ADF_TL_DBG_RP_INDEX_ALPHA(alpha) ((alpha) - 'A') + +#define ADF_TL_RP_REGS_FNAME "rp_%c_data" +#define ADF_TL_RP_REGS_FNAME_SIZE 16 + +#define ADF_TL_DATA_REG_OFF(reg, qat_gen) \ + offsetof(struct adf_##qat_gen##_tl_layout, reg) + +#define ADF_TL_DEV_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_device_data_regs, qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_device_data_regs, reg)) + +#define ADF_TL_SLICE_REG_OFF(slice, reg, qat_gen) \ + (ADF_TL_DEV_REG_OFF(slice##_slices[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_slice_data_regs, reg)) + +#define ADF_TL_RP_REG_OFF(reg, qat_gen) \ + (ADF_TL_DATA_REG_OFF(tl_ring_pairs_data_regs[0], qat_gen) + \ + offsetof(struct adf_##qat_gen##_tl_ring_pair_data_regs, reg)) + +/** + * enum adf_tl_counter_type - telemetry counter types + * @ADF_TL_COUNTER_UNSUPPORTED: unsupported counter + * @ADF_TL_SIMPLE_COUNT: simple counter + * @ADF_TL_COUNTER_NS: latency counter, value in ns + * @ADF_TL_COUNTER_NS_AVG: accumulated average latency counter, value in ns + * @ADF_TL_COUNTER_MBPS: bandwidth, value in MBps + */ +enum adf_tl_counter_type { + ADF_TL_COUNTER_UNSUPPORTED, + ADF_TL_SIMPLE_COUNT, + ADF_TL_COUNTER_NS, + ADF_TL_COUNTER_NS_AVG, + ADF_TL_COUNTER_MBPS, +}; + +/** + * struct adf_tl_dbg_counter - telemetry counter definition + * @name: name of the counter as printed in the report + * @adf_tl_counter_type: type of the counter + * @offset1: offset of 1st register + * @offset2: offset of 2nd optional register + */ +struct adf_tl_dbg_counter { + const char *name; + enum adf_tl_counter_type type; + size_t offset1; + size_t offset2; +}; + +#define ADF_TL_COUNTER(_name, _type, _offset) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset \ +} + +#define ADF_TL_COUNTER_LATENCY(_name, _type, _offset1, _offset2) \ +{ .name = _name, \ + .type = _type, \ + .offset1 = _offset1, \ + .offset2 = _offset2 \ +} + +/* Telemetry counter aggregated values. */ +struct adf_tl_dbg_aggr_values { + u64 curr; + u64 min; + u64 max; + u64 avg; +}; + +/** + * adf_tl_dbgfs_add() - Add telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Creates telemetry's debug fs folder and attributes in QAT debug fs root. + */ +void adf_tl_dbgfs_add(struct adf_accel_dev *accel_dev); + +/** + * adf_tl_dbgfs_rm() - Remove telemetry's debug fs entries. + * @accel_dev: Pointer to acceleration device. + * + * Removes telemetry's debug fs folder and attributes from QAT debug fs root. + */ +void adf_tl_dbgfs_rm(struct adf_accel_dev *accel_dev); + +#endif /* ADF_TL_DEBUGFS_H */ diff --git a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c index b05c3957a16019a63d3c6568c9573cef22d838b5..cdbb2d687b1b0dfc65226c7e058bcb24b2d430ec 100644 --- a/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c +++ b/drivers/crypto/intel/qat/qat_common/adf_vf_isr.c @@ -293,8 +293,6 @@ EXPORT_SYMBOL_GPL(adf_flush_vf_wq); /** * adf_init_vf_wq() - Init workqueue for VF * - * Function init workqueue 'adf_vf_stop_wq' for VF. - * * Return: 0 on success, error code otherwise. */ int __init adf_init_vf_wq(void) diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h index 019a6443834e0b11ca395e565f7ae410303ff5fc..63cf18e2a4e57d4d9e24b53754895825f7de83d2 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_init_admin.h @@ -5,6 +5,8 @@ #include "icp_qat_fw.h" +#define RL_MAX_RP_IDS 16 + enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_INIT_AE = 0, ICP_QAT_FW_TRNG_ENABLE = 1, @@ -16,10 +18,19 @@ enum icp_qat_fw_init_admin_cmd_id { ICP_QAT_FW_HEARTBEAT_SYNC = 7, ICP_QAT_FW_HEARTBEAT_GET = 8, ICP_QAT_FW_COMP_CAPABILITY_GET = 9, + ICP_QAT_FW_CRYPTO_CAPABILITY_GET = 10, ICP_QAT_FW_DC_CHAIN_INIT = 11, ICP_QAT_FW_HEARTBEAT_TIMER_SET = 13, + ICP_QAT_FW_RL_INIT = 15, ICP_QAT_FW_TIMER_GET = 19, + ICP_QAT_FW_CNV_STATS_GET = 20, ICP_QAT_FW_PM_STATE_CONFIG = 128, + ICP_QAT_FW_PM_INFO = 129, + ICP_QAT_FW_RL_ADD = 134, + ICP_QAT_FW_RL_UPDATE = 135, + ICP_QAT_FW_RL_REMOVE = 136, + ICP_QAT_FW_TL_START = 137, + ICP_QAT_FW_TL_STOP = 138, }; enum icp_qat_fw_init_admin_resp_status { @@ -27,6 +38,37 @@ enum icp_qat_fw_init_admin_resp_status { ICP_QAT_FW_INIT_RESP_STATUS_FAIL }; +struct icp_qat_fw_init_admin_tl_rp_indexes { + __u8 rp_num_index_0; + __u8 rp_num_index_1; + __u8 rp_num_index_2; + __u8 rp_num_index_3; +}; + +struct icp_qat_fw_init_admin_slice_cnt { + __u8 cpr_cnt; + __u8 xlt_cnt; + __u8 dcpr_cnt; + __u8 pke_cnt; + __u8 wat_cnt; + __u8 wcp_cnt; + __u8 ucs_cnt; + __u8 cph_cnt; + __u8 ath_cnt; +}; + +struct icp_qat_fw_init_admin_sla_config_params { + __u32 pcie_in_cir; + __u32 pcie_in_pir; + __u32 pcie_out_cir; + __u32 pcie_out_pir; + __u32 slice_util_cir; + __u32 slice_util_pir; + __u32 ae_util_cir; + __u32 ae_util_pir; + __u16 rp_ids[RL_MAX_RP_IDS]; +}; + struct icp_qat_fw_init_admin_req { __u16 init_cfg_sz; __u8 resrvd1; @@ -46,7 +88,15 @@ struct icp_qat_fw_init_admin_req { struct { __u32 heartbeat_ticks; }; + struct { + __u16 node_id; + __u8 node_type; + __u8 svc_type; + __u8 resrvd5[3]; + __u8 rp_count; + }; __u32 idle_filter; + struct icp_qat_fw_init_admin_tl_rp_indexes rp_indexes; }; __u32 resrvd4; @@ -64,6 +114,10 @@ struct icp_qat_fw_init_admin_resp { __u16 version_major_num; }; __u32 extended_features; + struct { + __u16 error_count; + __u16 latest_error; + }; }; __u64 opaque_data; union { @@ -103,9 +157,46 @@ struct icp_qat_fw_init_admin_resp { __u32 unsuccessful_count; __u64 resrvd8; }; + struct icp_qat_fw_init_admin_slice_cnt slices; + __u16 fw_capabilities; }; } __packed; #define ICP_QAT_FW_SYNC ICP_QAT_FW_HEARTBEAT_SYNC +#define ICP_QAT_FW_CAPABILITIES_GET ICP_QAT_FW_CRYPTO_CAPABILITY_GET + +#define ICP_QAT_NUMBER_OF_PM_EVENTS 8 + +struct icp_qat_fw_init_admin_pm_info { + __u16 max_pwrreq; + __u16 min_pwrreq; + __u16 resvrd1; + __u8 pwr_state; + __u8 resvrd2; + __u32 fusectl0; + struct_group(event_counters, + __u32 sys_pm; + __u32 host_msg; + __u32 unknown; + __u32 local_ssm; + __u32 timer; + ); + __u32 event_log[ICP_QAT_NUMBER_OF_PM_EVENTS]; + struct_group(pm, + __u32 fw_init; + __u32 pwrreq; + __u32 status; + __u32 main; + __u32 thread; + ); + struct_group(ssm, + __u32 pm_enable; + __u32 pm_active_status; + __u32 pm_managed_status; + __u32 pm_domain_status; + __u32 active_constraint; + ); + __u32 resvrd3[6]; +}; #endif diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h index 0c8883e2ccc6dc1979ac32811b2c72de38a27a3e..b8f1c4ffb8b5a7fc21a29c5a413bad192ca62d44 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_hw.h @@ -3,6 +3,8 @@ #ifndef _ICP_QAT_HW_H_ #define _ICP_QAT_HW_H_ +#include + enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_0 = 0, ICP_QAT_HW_AE_1 = 1, @@ -16,7 +18,12 @@ enum icp_qat_hw_ae_id { ICP_QAT_HW_AE_9 = 9, ICP_QAT_HW_AE_10 = 10, ICP_QAT_HW_AE_11 = 11, - ICP_QAT_HW_AE_DELIMITER = 12 + ICP_QAT_HW_AE_12 = 12, + ICP_QAT_HW_AE_13 = 13, + ICP_QAT_HW_AE_14 = 14, + ICP_QAT_HW_AE_15 = 15, + ICP_QAT_HW_AE_16 = 16, + ICP_QAT_HW_AE_DELIMITER = 17 }; enum icp_qat_hw_qat_id { @@ -93,7 +100,7 @@ enum icp_qat_capabilities_mask { /* Bits 10-11 are currently reserved */ ICP_ACCEL_CAPABILITIES_HKDF = BIT(12), ICP_ACCEL_CAPABILITIES_ECEDMONT = BIT(13), - /* Bit 14 is currently reserved */ + ICP_ACCEL_CAPABILITIES_EXT_ALGCHAIN = BIT(14), ICP_ACCEL_CAPABILITIES_SHA3_EXT = BIT(15), ICP_ACCEL_CAPABILITIES_AESGCM_SPC = BIT(16), ICP_ACCEL_CAPABILITIES_CHACHA_POLY = BIT(17), @@ -105,7 +112,10 @@ enum icp_qat_capabilities_mask { ICP_ACCEL_CAPABILITIES_CNV_INTEGRITY64 = BIT(23), ICP_ACCEL_CAPABILITIES_LZ4_COMPRESSION = BIT(24), ICP_ACCEL_CAPABILITIES_LZ4S_COMPRESSION = BIT(25), - ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26) + ICP_ACCEL_CAPABILITIES_AES_V2 = BIT(26), + /* Bits 27-28 are currently reserved */ + ICP_ACCEL_CAPABILITIES_ZUC_256 = BIT(29), + ICP_ACCEL_CAPABILITIES_WIRELESS_CRYPTO_EXT = BIT(30), }; #define QAT_AUTH_MODE_BITPOS 4 diff --git a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h index 69482abdb8b936c5c8ad194d9263f0bc136b2871..e28241bdd0f4efe045bfa8f326af71f68f383239 100644 --- a/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h +++ b/drivers/crypto/intel/qat/qat_common/icp_qat_uclo.h @@ -7,7 +7,7 @@ #define ICP_QAT_AC_C62X_DEV_TYPE 0x01000000 #define ICP_QAT_AC_C3XXX_DEV_TYPE 0x02000000 #define ICP_QAT_AC_4XXX_A_DEV_TYPE 0x08000000 -#define ICP_QAT_UCLO_MAX_AE 12 +#define ICP_QAT_UCLO_MAX_AE 17 #define ICP_QAT_UCLO_MAX_CTX 8 #define ICP_QAT_UCLO_MAX_UIMAGE (ICP_QAT_UCLO_MAX_AE * ICP_QAT_UCLO_MAX_CTX) #define ICP_QAT_UCLO_MAX_USTORE 0x4000 diff --git a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c index b533984906ece67a5a6a27a5e8b5f331403861ff..2ba4aa22e09279bbbd5cdb213fb3991b0cca297a 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c +++ b/drivers/crypto/intel/qat/qat_common/qat_comp_algs.c @@ -13,15 +13,6 @@ #include "qat_compression.h" #include "qat_algs_send.h" -#define QAT_RFC_1950_HDR_SIZE 2 -#define QAT_RFC_1950_FOOTER_SIZE 4 -#define QAT_RFC_1950_CM_DEFLATE 8 -#define QAT_RFC_1950_CM_DEFLATE_CINFO_32K 7 -#define QAT_RFC_1950_CM_MASK 0x0f -#define QAT_RFC_1950_CM_OFFSET 4 -#define QAT_RFC_1950_DICT_MASK 0x20 -#define QAT_RFC_1950_COMP_HDR 0x785e - static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; @@ -109,69 +100,6 @@ static void qat_comp_resubmit(struct work_struct *work) acomp_request_complete(areq, ret); } -static int parse_zlib_header(u16 zlib_h) -{ - int ret = -EINVAL; - __be16 header; - u8 *header_p; - u8 cmf, flg; - - header = cpu_to_be16(zlib_h); - header_p = (u8 *)&header; - - flg = header_p[0]; - cmf = header_p[1]; - - if (cmf >> QAT_RFC_1950_CM_OFFSET > QAT_RFC_1950_CM_DEFLATE_CINFO_32K) - return ret; - - if ((cmf & QAT_RFC_1950_CM_MASK) != QAT_RFC_1950_CM_DEFLATE) - return ret; - - if (flg & QAT_RFC_1950_DICT_MASK) - return ret; - - return 0; -} - -static int qat_comp_rfc1950_callback(struct qat_compression_req *qat_req, - void *resp) -{ - struct acomp_req *areq = qat_req->acompress_req; - enum direction dir = qat_req->dir; - __be32 qat_produced_adler; - - qat_produced_adler = cpu_to_be32(qat_comp_get_produced_adler32(resp)); - - if (dir == COMPRESSION) { - __be16 zlib_header; - - zlib_header = cpu_to_be16(QAT_RFC_1950_COMP_HDR); - scatterwalk_map_and_copy(&zlib_header, areq->dst, 0, QAT_RFC_1950_HDR_SIZE, 1); - areq->dlen += QAT_RFC_1950_HDR_SIZE; - - scatterwalk_map_and_copy(&qat_produced_adler, areq->dst, areq->dlen, - QAT_RFC_1950_FOOTER_SIZE, 1); - areq->dlen += QAT_RFC_1950_FOOTER_SIZE; - } else { - __be32 decomp_adler; - int footer_offset; - int consumed; - - consumed = qat_comp_get_consumed_ctr(resp); - footer_offset = consumed + QAT_RFC_1950_HDR_SIZE; - if (footer_offset + QAT_RFC_1950_FOOTER_SIZE > areq->slen) - return -EBADMSG; - - scatterwalk_map_and_copy(&decomp_adler, areq->src, footer_offset, - QAT_RFC_1950_FOOTER_SIZE, 0); - - if (qat_produced_adler != decomp_adler) - return -EBADMSG; - } - return 0; -} - static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { @@ -293,18 +221,6 @@ static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) memset(ctx, 0, sizeof(*ctx)); } -static int qat_comp_alg_rfc1950_init_tfm(struct crypto_acomp *acomp_tfm) -{ - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - int ret; - - ret = qat_comp_alg_init_tfm(acomp_tfm); - ctx->qat_comp_callback = &qat_comp_rfc1950_callback; - - return ret; -} - static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, unsigned int shdr, unsigned int sftr, unsigned int dhdr, unsigned int dftr) @@ -400,43 +316,6 @@ static int qat_comp_alg_decompress(struct acomp_req *req) return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } -static int qat_comp_alg_rfc1950_compress(struct acomp_req *req) -{ - if (!req->dst && req->dlen != 0) - return -EINVAL; - - if (req->dst && req->dlen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EINVAL; - - return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, - QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE); -} - -static int qat_comp_alg_rfc1950_decompress(struct acomp_req *req) -{ - struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(req); - struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); - struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); - struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; - u16 zlib_header; - int ret; - - if (req->slen <= QAT_RFC_1950_HDR_SIZE + QAT_RFC_1950_FOOTER_SIZE) - return -EBADMSG; - - scatterwalk_map_and_copy(&zlib_header, req->src, 0, QAT_RFC_1950_HDR_SIZE, 0); - - ret = parse_zlib_header(zlib_header); - if (ret) { - dev_dbg(&GET_DEV(accel_dev), "Error parsing zlib header\n"); - return ret; - } - - return qat_comp_alg_compress_decompress(req, DECOMPRESSION, QAT_RFC_1950_HDR_SIZE, - QAT_RFC_1950_FOOTER_SIZE, 0, 0); -} - static struct acomp_alg qat_acomp[] = { { .base = { .cra_name = "deflate", @@ -452,22 +331,7 @@ static struct acomp_alg qat_acomp[] = { { .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), -}, { - .base = { - .cra_name = "zlib-deflate", - .cra_driver_name = "qat_zlib_deflate", - .cra_priority = 4001, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_ctxsize = sizeof(struct qat_compression_ctx), - .cra_module = THIS_MODULE, - }, - .init = qat_comp_alg_rfc1950_init_tfm, - .exit = qat_comp_alg_exit_tfm, - .compress = qat_comp_alg_rfc1950_compress, - .decompress = qat_comp_alg_rfc1950_decompress, - .dst_free = sgl_free, - .reqsize = sizeof(struct qat_compression_req), -} }; +}}; int qat_comp_algs_register(void) { diff --git a/drivers/crypto/intel/qat/qat_common/qat_crypto.c b/drivers/crypto/intel/qat/qat_common/qat_crypto.c index 40c8e74d1cf9ed0d1dfaf98d0375a34c002ee980..101c6ea4167389a27814f4ac6cd7924570d9aaf6 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/intel/qat/qat_common/qat_crypto.c @@ -105,8 +105,8 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node) } /** - * qat_crypto_vf_dev_config() - * create dev config required to create crypto inst. + * qat_crypto_vf_dev_config() - create dev config required to create + * crypto inst. * * @accel_dev: Pointer to acceleration device. * diff --git a/drivers/crypto/intel/qat/qat_common/qat_hal.c b/drivers/crypto/intel/qat/qat_common/qat_hal.c index cbb946a800761d600b30e47a4bf1dcfd5839aafe..317cafa9d11f9eb19940a302f0668fba5e9acad3 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_hal.c +++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c @@ -697,12 +697,16 @@ static int qat_hal_chip_init(struct icp_qat_fw_loader_handle *handle, case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: handle->chip_info->mmp_sram_size = 0; handle->chip_info->nn = false; handle->chip_info->lm2lm3 = true; handle->chip_info->lm_size = ICP_QAT_UCLO_MAX_LMEM_REG_2X; handle->chip_info->icp_rst_csr = ICP_RESET_CPP0; - handle->chip_info->icp_rst_mask = 0x100015; + if (handle->pci_dev->device == ADF_420XX_PCI_DEVICE_ID) + handle->chip_info->icp_rst_mask = 0x100155; + else + handle->chip_info->icp_rst_mask = 0x100015; handle->chip_info->glb_clk_enable_csr = ICP_GLOBAL_CLK_ENABLE_CPP0; handle->chip_info->misc_ctl_csr = MISC_CONTROL_C4XXX; handle->chip_info->wakeup_event_val = 0x80000000; diff --git a/drivers/crypto/intel/qat/qat_common/qat_uclo.c b/drivers/crypto/intel/qat/qat_common/qat_uclo.c index 4bd150d1441a02aecb9b4d81e97327fd3af8f538..ad2c64af7427ee7c68ee2c259ea876fad4336778 100644 --- a/drivers/crypto/intel/qat/qat_common/qat_uclo.c +++ b/drivers/crypto/intel/qat/qat_common/qat_uclo.c @@ -200,7 +200,7 @@ static int qat_uclo_parse_num(char *str, unsigned int *num) unsigned long ae = 0; int i; - strncpy(buf, str, 15); + strscpy(buf, str, sizeof(buf)); for (i = 0; i < 16; i++) { if (!isdigit(buf[i])) { buf[i] = '\0'; @@ -733,6 +733,7 @@ qat_uclo_get_dev_type(struct icp_qat_fw_loader_handle *handle) case ADF_4XXX_PCI_DEVICE_ID: case ADF_401XX_PCI_DEVICE_ID: case ADF_402XX_PCI_DEVICE_ID: + case ADF_420XX_PCI_DEVICE_ID: return ICP_QAT_AC_4XXX_A_DEV_TYPE; default: pr_err("QAT: unsupported device 0x%x\n", diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c index 09551f949126530807db7f785b4a49cd189a3202..af14090cc4be311a3d7fbe5ad6fb9b1074687e06 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) /* Copyright(c) 2014 - 2021 Intel Corporation */ #include +#include #include #include #include diff --git a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c index 1e748e8ce12d5df17d92d7f921c1ffb16f8598cf..40b456b8035b5a242efd103dbf04359f49495e3d 100644 --- a/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xcc/adf_drv.c @@ -252,3 +252,4 @@ MODULE_FIRMWARE(ADF_DH895XCC_FW); MODULE_FIRMWARE(ADF_DH895XCC_MMP); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c index fefb85ceaeb9a2b261a06a1b8a1702e019c26d4a..d59cb1ba2ad5994b8f3b5b1c48ea21014953ec2b 100644 --- a/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c +++ b/drivers/crypto/intel/qat/qat_dh895xccvf/adf_drv.c @@ -226,3 +226,4 @@ MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Intel"); MODULE_DESCRIPTION("Intel(R) QuickAssist Technology"); MODULE_VERSION(ADF_DRV_VERSION); +MODULE_IMPORT_NS(CRYPTO_QAT); diff --git a/drivers/crypto/montage/Kconfig b/drivers/crypto/montage/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..e8e4b287a7927f3d29bc92bc7b7f449f88c3603e --- /dev/null +++ b/drivers/crypto/montage/Kconfig @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +source "drivers/crypto/montage/tsse/Kconfig" diff --git a/drivers/crypto/montage/Makefile b/drivers/crypto/montage/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a50415fe10c702c7ee41b58e44c6539a80fc9668 --- /dev/null +++ b/drivers/crypto/montage/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_CRYPTO_DEV_TSSE) += tsse/ diff --git a/drivers/crypto/montage/tsse/Kconfig b/drivers/crypto/montage/tsse/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5854f8e4525cf2fbd2dd61c81f16d1fb30d51a4b --- /dev/null +++ b/drivers/crypto/montage/tsse/Kconfig @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +config CRYPTO_DEV_TSSE + tristate "Support for Montage(R) TSSE" + depends on X86 && PCI + select FW_LOADER + help + Support for Montage(R) TSSE for accelerating crypto workloads. + + To compile this as a module, choose M here. \ No newline at end of file diff --git a/drivers/crypto/montage/tsse/Makefile b/drivers/crypto/montage/tsse/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..d67ffde3a5b046c114fb6953bb042265539fb07d --- /dev/null +++ b/drivers/crypto/montage/tsse/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# This file is part of tsse driver for Linux +# +# Copyright © 2023 Montage Technology. All rights reserved. + +obj-m += tsse.o + +tsse-objs := tsse_dev_mgr.o \ + tsse_ipc.o \ + tsse_fw_service.o \ + tsse_service.o \ + tsse_irq.o \ + tsse_dev_drv.o \ + tsse_vuart.o diff --git a/drivers/crypto/montage/tsse/tsse_dev.h b/drivers/crypto/montage/tsse/tsse_dev.h new file mode 100644 index 0000000000000000000000000000000000000000..c16d2ae7c414088aa585c698b776f2e9bb4acf03 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_H__ +#define __TSSE_DEV_H__ +#include +#include +#include +#include +#include +#include +#include +#include "tsse_ipc.h" + +#define TSSE_PCI_MAX_BARS 4 +#define TSSE_FW_VERSION_LEN 32 + +struct tsse_bar { + void __iomem *virt_addr; + resource_size_t addr; + resource_size_t size; +}; +struct tsse_dev_pci { + struct pci_dev *pci_dev; + struct tsse_bar bars[TSSE_PCI_MAX_BARS]; + u8 revid; +}; +enum tsse_dev_status_bit { + TSSE_DEV_STATUS_STARTING = 0, + TSSE_DEV_STATUS_STARTED = 1 + +}; +struct tsse_qpairs_bank { + struct tsse_dev *tsse_dev; + void __iomem *reg_base; + + u32 num_qparis; + u32 irq_vec; +}; +struct tsse_dev { + struct module *owner; + struct dentry *debugfs_dir; + unsigned long status; + struct list_head list; + struct tsse_dev_pci tsse_pci_dev; + struct tsse_qpairs_bank qpairs_bank; + atomic_t ref_count; + bool is_vf; + int id; + u32 num_irqs; + u32 num_vfs; + struct uart_port *port; + struct tsse_ipc *ipc; + void *adi; + void *mbx_hw; + const struct firmware *fw; + char fw_version[TSSE_FW_VERSION_LEN]; + bool fw_version_exist; +}; +#define TSSEDEV_TO_DEV(tssedev) (&((tssedev)->tsse_pci_dev.pci_dev->dev)) +#define TSSE_DEV_BARS(tssedev) ((tssedev)->tsse_pci_dev.bars) + +#include "tsse_log.h" + +struct list_head *tsse_devmgr_get_head(void); + +int tsse_dev_get(struct tsse_dev *tsse_dev); +void tsse_dev_put(struct tsse_dev *tsse_dev); +int tsse_devmgr_add_dev(struct tsse_dev *tsse_dev); +void tsse_devmgr_rm_dev(struct tsse_dev *tdev); +int tsse_prepare_restart_dev(struct tsse_dev *tdev); +int tsse_start_dev(struct tsse_dev *tdev); + +static inline struct tsse_dev *pci_to_tsse_dev(struct pci_dev *pci_dev) +{ + return (struct tsse_dev *)pci_get_drvdata(pci_dev); +} + +static inline int tsse_get_cur_node(void) +{ + int cpu, node; + + cpu = get_cpu(); + node = topology_physical_package_id(cpu); + put_cpu(); + + return node; +} + +static inline int tsse_dev_started(struct tsse_dev *tdev) +{ + return test_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); +} +static inline int tsse_dev_in_use(struct tsse_dev *tdev) +{ + return atomic_read(&tdev->ref_count) != 0; +} +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.c b/drivers/crypto/montage/tsse/tsse_dev_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..86c619d64f5ee3e6a76ab05ef607c424c40bd396 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.c @@ -0,0 +1,383 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev_drv.h" +#include "tsse_vuart.h" +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +#define CLUSTER_SLOT_CONFIG_OFFSET 0x5780000 +#define QPAIR_SETTING_OFFSET 0x50000 +#define BAR_START 2 +#define BAR_END 4 + +static DEFINE_IDA(tsse_ida); + +static inline void tsse_qpair_enable_pf(struct tsse_dev *tdev, bool enable) +{ + writel(enable ? 1 : 0, + TSSE_DEV_BARS(tdev)[2].virt_addr + + CLUSTER_SLOT_CONFIG_OFFSET + QPAIR_SETTING_OFFSET); +} +static int tsse_sriov_disable(struct tsse_dev *tdev) +{ + pci_disable_sriov(tdev->tsse_pci_dev.pci_dev); + tsse_qpair_enable_pf(tdev, true); + + return 0; +} + +static int tsse_sriov_configure(struct pci_dev *pdev, int num_vfs_param) +{ + int totalvfs = pci_sriov_get_totalvfs(pdev); + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + int ret = 0; + + if ((!tdev) || (num_vfs_param < 0) || (totalvfs <= 0)) { + dev_err(&pdev->dev, + "%s %d: failed to config sriov, tdev=%p totalvfs=%d num_vfs_param=%d\n", + __func__, __LINE__, tdev, totalvfs, num_vfs_param); + return -EBADE; + } + + if (num_vfs_param > totalvfs) + num_vfs_param = totalvfs; + + dev_info(&pdev->dev, "%s %d: has total %d vfs, and enable %d vfs\n", + __func__, __LINE__, totalvfs, num_vfs_param); + + if ((num_vfs_param > TSSE_PF_MAX_IRQ_NUM) || + (num_vfs_param > TSSE_PF_MAX_QPAIR_NUM)) { + tsse_dev_err( + tdev, + "vfs number is greater than pf's \"max_irq_num=%d or max_qpairs_num=%d\"\n", + TSSE_PF_MAX_IRQ_NUM, TSSE_PF_MAX_QPAIR_NUM); + return -EBADE; + } + + if (!tsse_dev_started(tdev)) { + dev_err(&pdev->dev, "%s %d: device is not started\n", __func__, + __LINE__); + return -EBADE; + } + + if (tsse_dev_in_use(tdev)) { + dev_err(&pdev->dev, "%s %d: device is busy\n", __func__, + __LINE__); + return -EBUSY; + } + + tsse_sriov_disable(tdev); + + tsse_prepare_restart_dev(tdev); + + tdev->num_vfs = num_vfs_param; + + if (tdev->num_vfs > 0) { + tdev->num_irqs = TSSE_SRIOV_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_SRIOV_PF_MAX_QPAIR_NUM; + } else { + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + } + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + ret = tsse_start_dev(tdev); + if (ret) { + dev_err(&pdev->dev, "%s %d: failed to start the device\n", + __func__, __LINE__); + return ret; + } + + if (num_vfs_param > 0) { + tsse_qpair_enable_pf(tdev, false); + pci_enable_sriov(pdev, num_vfs_param); + } + + return num_vfs_param; +} + +/** + * tsse_image_load_store() - This function will be called when user + * writes string to /sys/bus/pci/devices/.../tsse_image_load. + * Driver will always loads /lib/firmware/tsse_firmware.bin. + * @dev: device + * @attr: device attribute + * @buf: string that user writes + * @count: string length that user writes + * Return: the number of bytes used from the buffer, here it is just the count argument. + */ +static ssize_t tsse_image_load_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct pci_dev *pdev = NULL; + struct tsse_dev *tdev = NULL; + + pdev = container_of(dev, struct pci_dev, dev); + if (pdev) + tdev = pci_to_tsse_dev(pdev); + if (buf && count && tdev) { + tsse_dev_info(tdev, "receive command to load firmware %s\n", TSSE_FIRMWARE); + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + if (tsse_fw_manual_load_ipc(pdev)) + dev_err(&pdev->dev, "%s %d: firmware update failed\n", + __func__, __LINE__); + } + } + return count; +} + +DEVICE_ATTR_WO(tsse_image_load); + +static int device_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + int status = 0; + int bar; + u32 tmp_val; + struct tsse_dev *tdev; + + if (!pdev->is_physfn) { + dev_err(&pdev->dev, "%s %d: this is not Physical fn\n", + __func__, __LINE__); + return -EPERM; + } + + if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) { + dev_err(&pdev->dev, + "%s %d: invalid numa configuration for tsse\n", + __func__, __LINE__); + return -EINVAL; + } + + tdev = kzalloc_node(sizeof(*tdev), GFP_KERNEL, dev_to_node(&pdev->dev)); + + if (!tdev) + return -ENOMEM; + + status = pcim_enable_device(pdev); + + if (status) { + dev_err(&pdev->dev, "pcim_enable_device failed\n"); + goto out_err; + } + + pci_set_master(pdev); + + if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(48))) { + if ((dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) { + dev_err(&pdev->dev, + "failed to set tsse dma address width\n"); + status = -EFAULT; + goto out_err; + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + } + + } else { + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(48)); + } + + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + + status = pcim_iomap_regions(pdev, BIT(0) | BIT(2), TSSE_DEV_NAME); + if (status) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + goto out_err; + } + + for (bar = BAR_START; bar < BAR_END;) { + TSSE_DEV_BARS(tdev)[bar].addr = pci_resource_start(pdev, bar); + TSSE_DEV_BARS(tdev)[bar].size = pci_resource_len(pdev, bar); + TSSE_DEV_BARS(tdev) + [bar].virt_addr = pcim_iomap_table(pdev)[bar]; + + dev_info(&pdev->dev, + "bar[%d]: addr=0x%llx, size=0x%llx, virt_addr=0x%lx\n", + bar, TSSE_DEV_BARS(tdev)[bar].addr, + TSSE_DEV_BARS(tdev)[bar].size, + (ulong)TSSE_DEV_BARS(tdev)[bar].virt_addr); + + bar += 2; + } + + tdev->owner = THIS_MODULE; + tdev->is_vf = false; + tdev->tsse_pci_dev.pci_dev = pdev; + tdev->id = ida_alloc(&tsse_ida, GFP_KERNEL); + if (tdev->id < 0) { + dev_err(&pdev->dev, "Unable to get id\n"); + status = tdev->id; + goto out_err; + } + + pci_set_drvdata(pdev, tdev); + + tdev->num_irqs = TSSE_PF_MAX_IRQ_NUM; + tdev->qpairs_bank.num_qparis = TSSE_PF_MAX_QPAIR_NUM; + tdev->qpairs_bank.irq_vec = TSSE_PF_QPAIR_START_IRQ_VECTOR; + tdev->qpairs_bank.reg_base = + TSSE_DEV_BARS(tdev)[2].virt_addr + TSSE_PF_QPAIR_REG_BASE; + + tsse_qpair_enable_pf(tdev, true); + + tsse_dev_info( + tdev, + "num_irqs:%u num_qparis:%u qpairs' start irq vector index:%u qpairs' reg base:0x%lx\n", + tdev->num_irqs, tdev->qpairs_bank.num_qparis, + tdev->qpairs_bank.irq_vec, (ulong)tdev->qpairs_bank.reg_base); + + if (tsse_devmgr_add_dev(tdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_devmgr failed to add new device\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_ida_free; + } + + if (vuart_init_port(pdev)) { + dev_err(&pdev->dev, + "%s %d: vuart_init_port failed to init vuart.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_port_init; + } + + tdev->fw_version_exist = false; + /* Its result not break driver init process */ + if (!tsse_fw_load(pdev, TSSE_FIRMWARE, &tdev->fw)) { + if (!get_firmware_version(tdev->fw, tdev->fw_version)) + tdev->fw_version_exist = true; + } + + if (tsse_ipc_init(pdev)) { + dev_err(&pdev->dev, + "%s %d: tsse_ipc_init failed to tsse_ipc.\n", __func__, + __LINE__); + status = -EFAULT; + goto out_err_ipc; + } + + if (sysfs_create_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr)) { + dev_err(&pdev->dev, + "%s %d: sysfs_create_file failed for tsse image load.\n", + __func__, __LINE__); + status = -EFAULT; + goto out_err_image_load; + } + + tsse_dev_info(tdev, "successful\n"); + + pci_read_config_dword(pdev, 0x720, &tmp_val); + tsse_dev_dbg(tdev, "the value of FILTER_MASK_2_REG is 0x%x\n", tmp_val); + + return 0; +out_err_image_load: + tsse_ipc_deinit(tdev); +out_err_ipc: + vuart_uninit_port(pdev); +out_err_port_init: + tsse_devmgr_rm_dev(tdev); +out_err_ida_free: + ida_free(&tsse_ida, tdev->id); +out_err: + kfree(tdev); + return status; +} + +static void device_remove(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + + pr_info("%s %d: pci_dev 0x%lx tsse_dev 0x%lx\n", __func__, __LINE__, + (ulong)pdev, (ulong)tdev); + + tsse_sriov_disable(tdev); + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + } + sysfs_remove_file(&pdev->dev.kobj, &dev_attr_tsse_image_load.attr); + tsse_ipc_deinit(tdev); + vuart_uninit_port(pdev); + tsse_devmgr_rm_dev(tdev); + ida_free(&tsse_ida, tdev->id); + kfree(tdev); + dev_info(&pdev->dev, "%s %d: successful\n", __func__, __LINE__); +} + +static const struct pci_device_id pci_ids[] = { + { + PCI_DEVICE(0x1b00, 0xc011), + }, + { + PCI_DEVICE(0x1b00, 0xd011), + }, + { 0 } +}; + +static struct pci_driver pci_driver = { + .name = TSSE_DEV_NAME, + .id_table = pci_ids, + .probe = device_probe, + .remove = device_remove, + .sriov_configure = tsse_sriov_configure, +}; + +MODULE_DEVICE_TABLE(pci, pci_ids); + +static int __init tsse_init(void) +{ + int status; + + status = vuart_register(); + if (status) { + pr_err("vuart_register failed[%d].\n", status); + return status; + } + + status = pci_register_driver(&pci_driver); + if (status) { + vuart_unregister(); + return status; + } + + pr_info(KBUILD_MODNAME ": loaded.\n"); + + return 0; +} + +static void __exit tsse_exit(void) +{ + pci_unregister_driver(&pci_driver); + vuart_unregister(); + + pr_info(KBUILD_MODNAME ": unloaded.\n"); +} + +module_init(tsse_init); +module_exit(tsse_exit); + +MODULE_AUTHOR("montage-tech.com"); +MODULE_DESCRIPTION("TSSE device driver"); +MODULE_VERSION("1.0.0"); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TSSE_FIRMWARE); diff --git a/drivers/crypto/montage/tsse/tsse_dev_drv.h b/drivers/crypto/montage/tsse/tsse_dev_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..6a05572a3849457d698f221f947056a7956f6ce0 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_drv.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_DEV_DRV_H__ +#define __TSSE_DEV_DRV_H__ +#define TSSE_DEV_NAME "tsse" + +// TODO: need to support full qpairs +#define TSSE_PF_MAX_QPAIR_NUM 16 + +#define TSSE_PF_MAX_IRQ_NUM 96 +#define TSSE_PF_QPAIR_START_IRQ_VECTOR 32 + +#define TSSE_SRIOV_PF_MAX_QPAIR_NUM 0 +#define TSSE_SRIOV_PF_MAX_IRQ_NUM 16 + +#define TSSE_PF_QPAIR_REG_BASE 0x5700000 + +#include "tsse_dev.h" + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_dev_mgr.c b/drivers/crypto/montage/tsse/tsse_dev_mgr.c new file mode 100644 index 0000000000000000000000000000000000000000..39553eb96832380480a138593941c3c0f8bf26fa --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_dev_mgr.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" +static DEFINE_MUTEX(tsse_dev_table_lock); +static LIST_HEAD(tsse_dev_table); + +static DEFINE_MUTEX(algs_lock); + +static inline void tsse_list_del(struct list_head *entry) +{ + WRITE_ONCE(entry->next->prev, entry->prev); + WRITE_ONCE(entry->prev->next, entry->next); +} +static inline void tsse_list_add(struct list_head *new, struct list_head *prev, + struct list_head *next) +{ + WRITE_ONCE(new->next, next); + WRITE_ONCE(new->prev, prev); + mb(); /* Make sure new node updates first */ + WRITE_ONCE(next->prev, new); + WRITE_ONCE(prev->next, new); +} + +static int tsse_dev_pf_get(struct tsse_dev *vf_tsse_dev) +{ + int ret = 0; + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return 0; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_add_return(1, &pf_tsse_dev->ref_count) == 1) { + if (!try_module_get(pf_tsse_dev->owner)) + ret = -EFAULT; + } + } + return ret; +} + +static void tsse_dev_pf_put(struct tsse_dev *vf_tsse_dev) +{ + struct tsse_dev *pf_tsse_dev = NULL; + struct pci_dev *pf_pci_dev = NULL; + + pf_pci_dev = vf_tsse_dev->tsse_pci_dev.pci_dev->physfn; + + if (!pf_pci_dev) + return; + + pf_tsse_dev = pci_to_tsse_dev(pf_pci_dev); + if (pf_tsse_dev) { + if (atomic_sub_return(1, &pf_tsse_dev->ref_count) == 0) + module_put(pf_tsse_dev->owner); + } +} + +int tsse_dev_get(struct tsse_dev *tdev) +{ + int ref_count = atomic_add_return(1, &tdev->ref_count); + + if (!tsse_dev_started(tdev)) { + atomic_sub(1, &tdev->ref_count); + return -EAGAIN; + } + + if (ref_count == 1) { + if (!try_module_get(tdev->owner)) + return -EFAULT; + if (tdev->is_vf) + return tsse_dev_pf_get(tdev); + } + return 0; +} +void tsse_dev_put(struct tsse_dev *tdev) +{ + if (atomic_sub_return(1, &tdev->ref_count) == 0) { + module_put(tdev->owner); + if (tdev->is_vf) + tsse_dev_pf_put(tdev); + } +} + +static int tsse_stop_dev(struct tsse_dev *tdev, bool busy_exit) +{ + int times, max_retry = 150; + + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + + for (times = 0; times < max_retry; times++) { + if (!tsse_dev_in_use(tdev)) + break; + msleep(100); + } + + if (times >= max_retry) { + tsse_dev_err(tdev, "Failed to stop busy device\n"); + if (busy_exit) + return -EBUSY; + } + if (tdev->qpairs_bank.num_qparis != 0) { + mutex_lock(&tsse_dev_table_lock); + tsse_list_del(&tdev->list); + mutex_unlock(&tsse_dev_table_lock); + tsse_dev_info(tdev, "removed from active dev table list\n"); + } + + tsse_dev_info(tdev, "device stopped\n"); + + return 0; +} + +int tsse_start_dev(struct tsse_dev *tdev) +{ + struct tsse_dev *tmp_dev; + struct list_head *prev_node = &tsse_dev_table; + int ret = 0; + + if (tdev->qpairs_bank.num_qparis == 0) { + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_dev_info(tdev, "device started\n"); + return 0; + } + + set_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + + mutex_lock(&tsse_dev_table_lock); + + list_for_each_entry(tmp_dev, &tsse_dev_table, list) { + if (tmp_dev == tdev) { + ret = -EEXIST; + tsse_dev_err(tdev, + "The device cannot be added repeatedly\n"); + goto clear_status; + } + } + + set_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + tsse_list_add(&tdev->list, prev_node, prev_node->next); + + tsse_dev_info(tdev, "device started\n"); + mutex_unlock(&tsse_dev_table_lock); + + return 0; +clear_status: + mutex_unlock(&tsse_dev_table_lock); + clear_bit(TSSE_DEV_STATUS_STARTING, &tdev->status); + clear_bit(TSSE_DEV_STATUS_STARTED, &tdev->status); + return ret; +} + +int tsse_prepare_restart_dev(struct tsse_dev *tdev) +{ + return tsse_stop_dev(tdev, false); +} + +void tsse_devmgr_rm_dev(struct tsse_dev *tdev) +{ + tsse_stop_dev(tdev, false); + tsse_dev_free_irq_vectors(tdev); + msleep(300); +} + +int tsse_devmgr_add_dev(struct tsse_dev *tdev) +{ + int ret; + + ret = tsse_dev_alloc_irq_vectors(tdev); + if (ret == 0) { + atomic_set(&tdev->ref_count, 0); + tdev->status = 0; + ret = tsse_start_dev(tdev); + + if (ret != 0) + tsse_dev_free_irq_vectors(tdev); + } + return ret; +} + +struct list_head *tsse_devmgr_get_head(void) +{ + return &tsse_dev_table; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.c b/drivers/crypto/montage/tsse/tsse_fw_service.c new file mode 100644 index 0000000000000000000000000000000000000000..486352bc8f84af626f36325e008030174e0a8862 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_service.h" + +#define SEARCH_PATTERN "MT_CFG_BUILD_VERSION_DETAIL" +#define SPACE_CH ' ' + +static int fw_send_msg(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + u8 *h2d; + u32 int_reg; + + mutex_lock(&tsseipc->list_lock); + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + return -EFAULT; + } + if (msg->header.i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + dev_err(tsseipc->dev, "msg format error\n"); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + msg->header.i_len - sizeof(struct ipc_header)); + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + + dev_info(tsseipc->dev, "notify device to get firmware\n"); + mutex_unlock(&tsseipc->list_lock); + return 0; +} + +/** + * get_firmware_version() - Get version information from firmware + * @fw: firmware pointer + * @fw_version_out: firmware version string output + * Return: 0 on success, error code otherwise + */ +int get_firmware_version(const struct firmware *fw, char *fw_version_out) +{ + const char *pattern = SEARCH_PATTERN; + const uint8_t *fw_buffer = fw->data; + uint32_t pattern_i = 0, buffer_i = 0; + uint32_t pattern_len = strlen(pattern); // Not include "\0" + uint32_t version_start = 0; + uint32_t version_len = 0; + + while (buffer_i < fw->size) { + if (pattern[pattern_i] == (char) fw_buffer[buffer_i]) { + buffer_i++; + pattern_i++; + } + if (pattern_i == pattern_len) { + break; // pattern found + } else if ((buffer_i < fw->size) && + (pattern[pattern_i] != (char) fw_buffer[buffer_i])) { + // mismatch after pattern_i matches + if (pattern_i != 0) { + // since the pattern has no common prefix, when mismatch, + // the next compare should start from pattern beginning + pattern_i = 0; + } else { + buffer_i++; + } + } + } + if (pattern_i == pattern_len) { + buffer_i++; + version_start = buffer_i; + while (buffer_i < fw->size) { + if (fw_buffer[buffer_i] == SPACE_CH) { + version_len = buffer_i - version_start; + if (version_len >= TSSE_FW_VERSION_LEN - 1) + version_len = TSSE_FW_VERSION_LEN - 2; + strscpy(fw_version_out, fw_buffer + version_start, version_len + 1); + return 0; + } + buffer_i++; + } + } + return -EINVAL; +} + +/** + * fw_service() - Firmware service to handle IPC message from mainCPU. + * It will write init or manual load firmware to PCIe BAR and send message back. + * @tsseipc_t: pointer to a structure used for IPC + * @msg_t: pointer to IPC message + */ +void fw_service(void *tsseipc_t, void *msg_t) +{ + void __iomem *fw; + uint32_t size; + uint32_t task_offset; + struct fw_load *fw_task; + struct tsse_dev *tdev; + struct tsse_ipc *tsseipc = (struct tsse_ipc *)tsseipc_t; + struct ipc_msg *msg = (struct ipc_msg *)msg_t; + + task_offset = sizeof(struct msg_info); + fw_task = (struct fw_load *)((uint8_t *)msg->i_data + task_offset); + tdev = pci_to_tsse_dev(tsseipc->pdev); + + if (!tdev || !tdev->fw) { + fw_task->result = 1; + fw_task->size = 0; + dev_info(tsseipc->dev, "firmware loading failed\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + return; + } + + fw_task->result = 0; + fw_task->size = tdev->fw->size; + size = tdev->fw->size; + fw = tsseipc->virt_addr + fw_task->offset + FW_BASE; + + memcpy_toio((u8 *)fw, tdev->fw->data, size); + dev_info(tsseipc->dev, "firmware loading done\n"); + if (fw_send_msg(tsseipc, msg)) + dev_err(tsseipc->dev, "notify device failed\n"); + + if (tdev->fw_version_exist) + dev_info(tsseipc->dev, "firmware version: %s\n", tdev->fw_version); + + if (tdev->fw) { + release_firmware(tdev->fw); + tdev->fw = NULL; + memset(tdev->fw_version, 0, TSSE_FW_VERSION_LEN); + tdev->fw_version_exist = false; + } +} + +/** + * tsse_fw_load() - Load firmware from /lib/firmware + * @pdev: pci device + * @name: firmware file name + * @fw: pointer to firmware pointer + * Return: 0 on success, error code otherwise + */ +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw) +{ + int result; + + result = request_firmware(fw, name, &pdev->dev); + if (result) + dev_err(&pdev->dev, "%s failed for %s\n", __func__, name); + return result; +} diff --git a/drivers/crypto/montage/tsse/tsse_fw_service.h b/drivers/crypto/montage/tsse/tsse_fw_service.h new file mode 100644 index 0000000000000000000000000000000000000000..706ea6d297696cf1e49e51bc1150f63a141af1e0 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_fw_service.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_FW_SERVICE_H__ +#define __TSSE_FW_SERVICE_H__ + +#include + +#define FW_BASE 0x7000000 +#define TSSE_FIRMWARE "tsse_firmware.bin" + +void fw_service(void *tsseipc_t, void *msg_t); +int tsse_fw_load(struct pci_dev *pdev, const char *name, const struct firmware **fw); +int get_firmware_version(const struct firmware *fw, char *fw_version_out); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_ipc.c b/drivers/crypto/montage/tsse/tsse_ipc.c new file mode 100644 index 0000000000000000000000000000000000000000..b75ca97db6b67afa3c3159427a454c865e01a7f7 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.c @@ -0,0 +1,216 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include + +#include "tsse_ipc.h" +#include "tsse_dev.h" +#include "tsse_service.h" + +/** + * get_msginf() - Create ipc_msg and read message from BAR. + * Return the pointer to ipc_msg, the caller is responsible for free it. + * @d2h: device2host memory pointer + * Return: new ipc_msg pointer, which points to message read from device + */ +static struct ipc_msg *get_msginf(void __iomem *d2h) +{ + uint32_t u_len = 0; + struct ipc_msg *msg = NULL; + + uint8_t *device_msg_data = NULL; + struct ipc_header *ipc_info = (struct ipc_header *)d2h; + + // The memory layout in d2h should at least contains: + // ipc_header, msg_info and fw_load (message body) + if (ipc_info->i_len < sizeof(struct ipc_header) + + sizeof(struct msg_info) + sizeof(struct fw_load)) { + pr_info("%s(): msg format error\n", __func__); + return NULL; + } + u_len = ipc_info->i_len - sizeof(struct ipc_header); + msg = (struct ipc_msg *)(kzalloc(sizeof(struct ipc_msg) + u_len, + GFP_ATOMIC)); + if (!msg) { + pr_info("%s(): ipc_msg kzalloc failed\n", __func__); + return NULL; + } + + msg->header.inst_id = ipc_info->inst_id; + msg->header.tgid = ipc_info->tgid; + msg->header.i_len = ipc_info->i_len; + + device_msg_data = (uint8_t *)(d2h + sizeof(struct ipc_header)); + memcpy_fromio((uint8_t *)msg->i_data, device_msg_data, u_len); + + return msg; +} + +static irqreturn_t tsse_ipc_d2h_irqhandler(int irq, void *dev_id) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)dev_id; + + writel(0x0, tsseipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); + tasklet_hi_schedule(&tsseipc->ipc_handle); + dev_err(tsseipc->dev, "irq%d\n", irq); + return IRQ_HANDLED; +} + +bool check_send_enbit(struct tsse_ipc *tsseipc) +{ + u32 int_reg; + + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) == 0) + return true; + else + return false; +} + +void notify_device(struct tsse_ipc *tsseipc) +{ + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + return; + +} + +/** + * ipc_hw_init()- Enable main2host interrupt, cleanup interrupt + * set value in host2main and main2host. + * @hw_ipc: pointer to a structure used for IPC + */ +static void ipc_hw_init(struct tsse_ipc *hw_ipc) +{ + writel(0x1, hw_ipc->virt_addr + MAIN2HOST_INTR_ENABLE_OFFSET); + writel(0x0, hw_ipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + writel(0x0, hw_ipc->virt_addr + MAIN2HOST_INTR_SET_OFFSET); +} + +static int ipc_init_msg(struct tsse_ipc *tsseipc) +{ + u8 *h2d; + u32 int_reg; + u32 cmd_len; + u32 i_len; + struct ipc_msg *msg; + struct msg_info *info_msg; + + cmd_len = sizeof(uint32_t); + i_len = sizeof(struct ipc_header) + sizeof(struct msg_info) + cmd_len; + msg = (struct ipc_msg *)(kzalloc(i_len, GFP_ATOMIC)); + + if (!msg) { + pr_info("%s(): msg kzalloc failed\n", __func__); + return -EFAULT; + } + msg->header.i_len = i_len; + info_msg = (struct msg_info *)msg->i_data; + info_msg->msg_class = IPC_MESSAGE_BASIC; + *(uint32_t *)((uint8_t *)msg->i_data + sizeof(struct msg_info)) = IPC_BASIC_CMD_HOST_INIT; + + mutex_lock(&tsseipc->list_lock); + int_reg = readl(tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + if ((int_reg & IPC_REGISTER_INT_SET) != 0) { + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + return -EFAULT; + } + h2d = (u8 *)(tsseipc->virt_addr + HOST2MAIN_IPC_OFFSET); + + memcpy_toio(h2d, msg, sizeof(struct ipc_header)); + memcpy_toio(h2d + sizeof(struct ipc_header), (u8 *)msg->i_data, + sizeof(struct msg_info) + sizeof(uint32_t)); + + writel(0x1, tsseipc->virt_addr + HOST2MAIN_INTR_SET_OFFSET); + mutex_unlock(&tsseipc->list_lock); + kfree(msg); + + return 0; +} + +static void tsse_ipc_bh_handler(unsigned long data) +{ + struct tsse_ipc *tsseipc = (struct tsse_ipc *)data; + + void __iomem *d2h_payload = tsseipc->virt_addr + MAIN2HOST_IPC_OFFSET; + struct ipc_msg *msg = get_msginf(d2h_payload); + + if (!msg) { + dev_err(tsseipc->dev, "get_msginf is NULL\n"); + return; + } + if (service_rout(tsseipc, msg)) + dev_err(tsseipc->dev, "illegal message class\n"); + kfree(msg); +} + +int tsse_ipc_init(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc; + int rc; + + ipc = devm_kzalloc(&pdev->dev, sizeof(*ipc), GFP_KERNEL); + if (ipc == NULL) + return -ENOMEM; + tdev->ipc = ipc; + ipc->pdev = pdev; + ipc->dev = &pdev->dev; + ipc->virt_addr = TSSE_DEV_BARS(tdev)[2].virt_addr; + + mutex_init(&ipc->list_lock); + tasklet_init(&(ipc->ipc_handle), tsse_ipc_bh_handler, + (ulong)(ipc)); + + rc = request_threaded_irq(pci_irq_vector(pdev, 0), NULL, + tsse_ipc_d2h_irqhandler, IRQF_SHARED, + "pf-ipc", ipc); + if (rc) { + dev_err(&pdev->dev, "request_threaded_irq failed\n"); + return rc; + } + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) { + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + tsse_ipc_deinit(tdev); + } + return rc; +} + +void tsse_ipc_deinit(void *tdev_t) +{ + struct tsse_ipc *tsseipc; + struct pci_dev *pdev; + struct tsse_dev *tdev; + + tdev = tdev_t; + tsseipc = tdev->ipc; + pdev = tsseipc->pdev; + if (tsseipc) { + free_irq(pci_irq_vector(pdev, 0), tdev->ipc); + tdev->ipc = NULL; + } +} + +int tsse_fw_manual_load_ipc(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_ipc *ipc = tdev->ipc; + int rc = -EFAULT; + + if (ipc) { + ipc_hw_init(ipc); + rc = ipc_init_msg(ipc); + if (rc) + dev_err(&pdev->dev, "ipc_init_msg failed\n"); + } + return rc; +} diff --git a/drivers/crypto/montage/tsse/tsse_ipc.h b/drivers/crypto/montage/tsse/tsse_ipc.h new file mode 100644 index 0000000000000000000000000000000000000000..82f8df71c98371de379f57b3cba1b936309fbec7 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_ipc.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TM_HOST_IPC_H__ +#define __TM_HOST_IPC_H__ + +#include +#include +#include + +#define TSSE_PASID_SVA + +#define HOST2MAIN_INTR_SET_OFFSET 0x2000 +#define HOST2MAIN_INTR_ENABLE_OFFSET 0x2004 +#define HOST2MAIN_ACK_INTR_CLR_OFFSET 0x2008 +#define HOST2MAIN_ACK_INTR_ENABLE_OFFSET 0x200c +#define HOST2MAIN_VLD_INTR_STATUS_OFFSET 0x2010 +#define HOST2MAIN_ACK_INTR_STATUS_OFFSET 0x2014 +#define MSIX_MASK_EN_REG_OFFSET 0x2020 +#define INTR_MASK_BIT_OFFSET 0x2024 +#define INTR_PENDING_BIT_OFFSET 0x2028 +#define HOST2MAIN_IPC_OFFSET 0x2400 + +#define MAIN2HOST_INTR_SET_OFFSET 0x3000 +#define MAIN2HOST_INTR_ENABLE_OFFSET 0x3004 +#define MAIN2HOST_ACK_INTR_CLR_OFFSET 0x3008 +#define MAIN2HOST_ACK_INTR_ENABLE_OFFSET 0x300c +#define MAIN2HOST_VEN_MSI_FUNC_NUM_OFFSET 0x3010 +#define MAIN2HOST_VEN_MSI_VFUNC_ACTIVE_OFFSET 0x3014 +#define MAIN2HOST_IPC_OFFSET 0x3400 + +#define IPC_REGISTER_INT_SET BIT(0) +#define IPC_REGISTER_INT_MASK BIT(1) + +enum IPC_BASIC_CMD { + IPC_BASIC_CMD_HOST_INIT = 0x1, + IPC_BASIC_CMD_PING = 0x2 +}; + +enum IPC_BOOT_CMD { + IPC_BOOT_CMD_GET_FIRMWARE = 0x1 +}; + +enum IPC_MESSAGE_CLASS { + IPC_MESSAGE_BASIC = 1, + IPC_MESSAGE_BOOT, + IPC_MESSAGE_CLASS_NUM, +}; + +struct ipc_header { + uint32_t inst_id; + pid_t tgid; + uint32_t i_len; + uint32_t pasid : 20; + uint32_t reserved_1 : 4; + uint32_t pasid_en : 8; + + uint32_t reserved[2]; +}; + +struct ipc_msg { + struct ipc_header header; + uint32_t i_data[]; +}; + +struct fw_load { + uint32_t command; + uint32_t result; + uint8_t name[32]; + uint32_t offset; + uint32_t size; +}; + +struct msg_info { + uint32_t host_id; + uint32_t msg_class; + uint32_t flags; + uint32_t reserved[3]; +}; + +struct ipc_layout { + struct ipc_header header; + struct msg_info info; +}; + +struct tsse_ipc { + struct device *dev; + struct pci_dev *pdev; + void __iomem *virt_addr; + struct mutex list_lock; + struct tasklet_struct ipc_handle; +}; + +int tsse_ipc_init(struct pci_dev *pdev); +void tsse_ipc_deinit(void *tdev); +int tsse_fw_manual_load_ipc(struct pci_dev *pdev); +bool check_send_enbit(struct tsse_ipc *tsseipc); +void notify_device(struct tsse_ipc *tsseipc); +#endif diff --git a/drivers/crypto/montage/tsse/tsse_irq.c b/drivers/crypto/montage/tsse/tsse_irq.c new file mode 100644 index 0000000000000000000000000000000000000000..8cb94fea3da4e2c82a755ae61968fddbb9f6aa70 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.c @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include "tsse_dev.h" +#include "tsse_irq.h" + +#undef TSSE_IRQ_DBG + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev) +{ + int request_num = tdev->num_irqs; + int irq_num = pci_alloc_irq_vectors(tdev->tsse_pci_dev.pci_dev, + request_num, request_num, + PCI_IRQ_MSIX); + + if (irq_num < 0) { + dev_err(TSSEDEV_TO_DEV(tdev), + "%s %d :failed to alloc MSIX interrupt vectors\n", + __func__, __LINE__); + return irq_num; + } + + return 0; +} diff --git a/drivers/crypto/montage/tsse/tsse_irq.h b/drivers/crypto/montage/tsse/tsse_irq.h new file mode 100644 index 0000000000000000000000000000000000000000..09bed4e6d58a6f9ffe4af746122b79c2a8af95ad --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_irq.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_IRQ_H__ +#define __TSSE_IRQ_H__ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include "tsse_dev.h" + +static inline void tsse_dev_free_irq_vectors(struct tsse_dev *tdev) +{ + pci_free_irq_vectors(tdev->tsse_pci_dev.pci_dev); +} + +int tsse_dev_alloc_irq_vectors(struct tsse_dev *tdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_log.h b/drivers/crypto/montage/tsse/tsse_log.h new file mode 100644 index 0000000000000000000000000000000000000000..153cbe16374e71456efc3075c3dae6dcd9bd60ac --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_log.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_LOG_H__ +#define __TSSE_LOG_H__ + +#define tsse_dev_err(tssedev, fmt, ...) \ + dev_err(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_warn(tssedev, fmt, ...) \ + dev_warn(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_info(tssedev, fmt, ...) \ + dev_info(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) +#define tsse_dev_dbg(tssedev, fmt, ...) \ + dev_dbg(TSSEDEV_TO_DEV(tssedev), "%s %d: " fmt, __func__, __LINE__, \ + ##__VA_ARGS__) + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_service.c b/drivers/crypto/montage/tsse/tsse_service.c new file mode 100644 index 0000000000000000000000000000000000000000..e4be85535b7765f6dc3db73642b7d519950bd715 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.c @@ -0,0 +1,28 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ +#include +#include "tsse_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg) +{ + struct msg_info *info; + uint32_t msg_class; + int ret = 0; + + info = (struct msg_info *)msg->i_data; + msg_class = info->msg_class; + switch (msg_class) { + case IPC_MESSAGE_BOOT: + fw_service(tsseipc, msg); + break; + + default: + ret = -EINVAL; + break; + } + return ret; +} diff --git a/drivers/crypto/montage/tsse/tsse_service.h b/drivers/crypto/montage/tsse/tsse_service.h new file mode 100644 index 0000000000000000000000000000000000000000..d5fd87ee7dce430146e0560973c8a405a0ef0947 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_service.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_SERVICE_H__ +#define __TSSE_SERVICE_H__ + +#include "tsse_ipc.h" +#include "tsse_fw_service.h" + +int service_rout(struct tsse_ipc *tsseipc, struct ipc_msg *msg); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart.c b/drivers/crypto/montage/tsse/tsse_vuart.c new file mode 100644 index 0000000000000000000000000000000000000000..f49d4ffc9f3c7da2d24fa471b19613109f512191 --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.c @@ -0,0 +1,596 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsse_dev.h" +#include "tsse_vuart_regs.h" +#include "tsse_vuart.h" + +#ifdef DEBUG +#define VUART_PRINT(fmt, ...) pr_info(fmt, ##__VA_ARGS__) +#else +#define VUART_PRINT(fmt, ...) +#endif + +#define TSSE_VUART_BAUD (38400) +#define TSSE_VUART_MAX_RX_COUNT (256) +#define BOTH_EMPTY (VUART_FSR_TXFIFOE | VUART_FSR_RXFIFO) +struct tsse_vuart { + struct uart_port port; + unsigned int tx_threshold; + unsigned int rx_threshold; + unsigned int tx_loadsz; + unsigned char shutdown; + unsigned char confige_done; +}; + +#define SERIAL_LSR_NAME "tsse_vuart" + +static struct uart_driver g_vuart_reg = { + .owner = THIS_MODULE, + .driver_name = SERIAL_LSR_NAME, + .dev_name = "ttyTSSE", + .nr = TSSE_VUART_MAX_DEV, +}; + +static unsigned int g_trigger_level[4] = { 0, 31, 63, 111 }; +static unsigned long g_line[TSSE_VUART_BITMAP_SIZE]; + +static unsigned int vuart_serial_in(struct uart_port *port, int offset) +{ + unsigned int ret = le32_to_cpu(readl(port->membase + offset)); +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, ret); +#endif + return ret; +} + +static void vuart_serial_out(struct uart_port *port, int offset, int value) +{ +#ifdef DEBUG + pr_debug("%s offset 0x%x, v 0x%x\n", __func__, offset, value); +#endif + value = cpu_to_le32(value); + writel(value, port->membase + offset); +} + +static void vuart_wait_for_xmitr(struct uart_port *port) +{ + unsigned int status, tmout = 10000; + + for (;;) { + status = vuart_serial_in(port, VUART_FSR); + if (FIELD_GET(VUART_FSR_TXFIFOE, status)) + break; + if (--tmout == 0) { + pr_err("%s:timeout(10ms), TX is not empty.\n", + __func__); + break; + } + udelay(1); + touch_nmi_watchdog(); + } +} + +static unsigned int vuart_tx_empty(struct uart_port *port) +{ + unsigned long flags; + unsigned int lsr; + + spin_lock_irqsave(&port->lock, flags); + lsr = vuart_serial_in(port, VUART_FSR); + spin_unlock_irqrestore(&port->lock, flags); + + return (lsr & BOTH_EMPTY) == BOTH_EMPTY ? TIOCSER_TEMT : 0; +} + +static void vuart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ +} + +static unsigned int vuart_get_mctrl(struct uart_port *port) +{ + return 0; +} + +static void vuart_stop_tx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~VUART_IER_HETXEI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_tx_chars(struct uart_port *port) +{ + struct circ_buf *xmit = &port->state->xmit; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + int count; + + if (port->x_char) { + pr_err("x_char %d\n", port->x_char); + return; + } + + if (uart_tx_stopped(port) || uart_circ_empty(xmit)) { + vuart_stop_tx(port); + return; + } + + count = vuart->tx_loadsz; + do { + vuart_serial_out(port, VUART_TX, xmit->buf[xmit->tail]); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } while (--count > 0); + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); +} + +static void vuart_start_tx(struct uart_port *port) +{ + unsigned int ier, fsr; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + if (uart_tx_stopped(port)) { + vuart_stop_tx(port); + return; + } + + fsr = vuart_serial_in(port, VUART_FSR); + VUART_PRINT("==>Existing Data number in TX FIFO %ld\n", + FIELD_GET(VUART_FSR_TFIFODN, fsr)); + VUART_PRINT("==>Existing Data number in RX FIFO %ld\n", + FIELD_GET(VUART_FSR_RFIFODN, fsr)); + if (fsr & VUART_FSR_TXFIFOE) + vuart_tx_chars(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_HETXEI | VUART_IER_HETXUI; + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_throttle(struct uart_port *port) +{ +} + +static void vuart_unthrottle(struct uart_port *port) +{ +} + +static void vuart_stop_rx(struct uart_port *port) +{ + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (!vuart->confige_done) + return; + + ier = vuart_serial_in(port, VUART_IER); + ier &= ~(VUART_IER_HERXTOI | VUART_IER_HETXDRI | VUART_IER_HERXOI); + vuart_serial_out(port, VUART_IER, ier); +} + +static void vuart_enable_ms(struct uart_port *port) +{ +} + +static void vuart_break_ctl(struct uart_port *port, int ctl) +{ +} + +static irqreturn_t vuart_interrupt(int irq, void *port) +{ + int handled = 0; + struct uart_port *p = (struct uart_port *)port; + + if (p->handle_irq(p)) + handled = 1; + + return IRQ_RETVAL(handled); +} + +static void vuart_check_config_done(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (vuart_serial_in(port, VUART_CFG) == 1) + vuart->confige_done = 1; +} + +static int vuart_startup(struct uart_port *port) +{ + unsigned int ret, hcr, ier, fcr = 0; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (port->flags & UPF_SHARE_IRQ) + port->irqflags |= IRQF_SHARED; + ret = request_irq(port->irq, vuart_interrupt, port->irqflags, + "tsse_uart", port); + if (ret) + return ret; + + hcr = vuart_serial_in(port, VUART_HCR); + vuart->rx_threshold = FIELD_GET(VUART_HCR_RFIFOT, hcr); + vuart->tx_threshold = FIELD_GET(VUART_HCR_TFIFOT, hcr); + fcr |= FIELD_PREP(VUART_FCR_RFIFOT, vuart->rx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFOT, vuart->tx_threshold); + fcr |= FIELD_PREP(VUART_FCR_TFIFORST, 1); + fcr |= FIELD_PREP(VUART_FCR_RFIFORST, 1); + vuart_serial_out(port, VUART_FCR, fcr); + + vuart->rx_threshold = g_trigger_level[vuart->rx_threshold]; + vuart->tx_threshold = g_trigger_level[vuart->tx_threshold]; + + vuart_check_config_done(port); + ier = vuart_serial_in(port, VUART_IER); + ier |= VUART_IER_CCFGDI | VUART_IER_HETXDRI | VUART_IER_HERXTOI; + vuart_serial_out(port, VUART_IER, ier); + + vuart_serial_out(port, VUART_SCR, FIELD_PREP(VUART_SCR_SCR, 1)); + + vuart->shutdown = 0; + + return 0; +} + +static void vuart_shutdown(struct uart_port *port) +{ + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + vuart->shutdown = 1; + vuart_stop_rx(port); + vuart_stop_tx(port); + free_irq(port->irq, port); + vuart_serial_out(port, VUART_SCR, 0); +} + +static void vuart_set_termios(struct uart_port *port, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud; + unsigned long flags; + + if ((termios->c_cflag & CSIZE) != CS8) + pr_err("Warning:termios is not CS8.\n"); + + baud = uart_get_baud_rate(port, termios, old, 0, TSSE_VUART_BAUD); + + spin_lock_irqsave(&port->lock, flags); + uart_update_timeout(port, termios->c_cflag, baud); + + port->read_status_mask = + VUART_FSR_TXFIFOE | VUART_FSR_TXOE | VUART_FSR_RXDR; + if (termios->c_iflag & INPCK) + port->read_status_mask |= VUART_FSR_RXUE; + + port->ignore_status_mask = 0; + if (termios->c_iflag & IGNPAR) + port->ignore_status_mask |= VUART_FSR_RXUE; + if (termios->c_iflag & (IGNBRK | IGNPAR)) + port->ignore_status_mask |= VUART_FSR_TXFIFOE; + + if ((termios->c_cflag & CREAD) == 0) { + port->ignore_status_mask |= VUART_FSR_RXDR; + pr_err("Warning:termios is not set CREAD.\n"); + } + + spin_unlock_irqrestore(&port->lock, flags); + + if (tty_termios_baud_rate(termios)) + tty_termios_encode_baud_rate(termios, baud, baud); +} + +static void vuart_set_ldisc(struct uart_port *port, struct ktermios *ktermios) +{ +} + +static void vuart_pm(struct uart_port *port, unsigned int state, + unsigned int oldstate) +{ +} + +static void vuart_release_port(struct uart_port *port) +{ +} + +static int vuart_request_port(struct uart_port *port) +{ + return 0; +} + +static void vuart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) + port->type = PORT_16550A; +} + +static int vuart_verify_port(struct uart_port *port, struct serial_struct *ser) +{ + if (port->type != PORT_16550A) + return -EINVAL; + return 0; +} + +#ifdef CONFIG_CONSOLE_POLL +static void vuart_poll_put_char(struct uart_port *port, unsigned char c) +{ + unsigned int ier_save; + + ier_save = vuart_serial_in(port, VUART_IER); + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_TX, c); + + vuart_wait_for_xmitr(port); + vuart_serial_out(port, VUART_IER, ier_save); +} + +static int vuart_poll_get_char(struct uart_port *port) +{ + int status; + + status = vuart_serial_in(port, VUART_FSR); + if (!FIELD_GET(VUART_FSR_RXDR, status)) + return NO_POLL_CHAR; + + return vuart_serial_in(port, VUART_RX); +} + +#endif + +static const char *vuart_type(struct uart_port *port) +{ + return "tsse_vuart"; +} + +static const struct uart_ops vuart_ops = { + .tx_empty = vuart_tx_empty, + .set_mctrl = vuart_set_mctrl, + .get_mctrl = vuart_get_mctrl, + .stop_tx = vuart_stop_tx, + .start_tx = vuart_start_tx, + .throttle = vuart_throttle, + .unthrottle = vuart_unthrottle, + .stop_rx = vuart_stop_rx, + .enable_ms = vuart_enable_ms, + .break_ctl = vuart_break_ctl, + .startup = vuart_startup, + .shutdown = vuart_shutdown, + .set_termios = vuart_set_termios, + .set_ldisc = vuart_set_ldisc, + .pm = vuart_pm, + .type = vuart_type, + .release_port = vuart_release_port, + .request_port = vuart_request_port, + .config_port = vuart_config_port, + .verify_port = vuart_verify_port, +#ifdef CONFIG_CONSOLE_POLL + .poll_get_char = vuart_poll_get_char, + .poll_put_char = vuart_poll_put_char, +#endif +}; + +static unsigned int vuart_rx_chars(struct uart_port *port, unsigned int lsr) +{ + int max_count = TSSE_VUART_MAX_RX_COUNT; + unsigned char ch; + struct tty_port *tport = &port->state->port; + + do { + if (lsr & VUART_FSR_RXDR) + ch = vuart_serial_in(port, VUART_RX); + else + ch = 0; + port->icount.rx++; + if (lsr & VUART_FSR_RXUE) { + port->icount.overrun++; + pr_err("income byte underflow, record and clear int.\n"); + vuart_serial_out(port, VUART_IIR, VUART_IIR_RXUE); + } + + if (!uart_prepare_sysrq_char(port, ch)) { + if (tty_insert_flip_char(tport, ch, TTY_NORMAL) == 0) + ++port->icount.buf_overrun; + } + + if (--max_count == 0) + break; + lsr = vuart_serial_in(port, VUART_FSR); + } while (lsr & VUART_FSR_RXDR); + + tty_flip_buffer_push(&port->state->port); + return lsr; +} + +static int vuart_deal_irq(struct uart_port *port, unsigned int iir) +{ + unsigned char status; + unsigned int ier; + struct tsse_vuart *vuart = (struct tsse_vuart *)port; + + if (iir & VUART_IIR_CPUCD) + vuart->confige_done = 1; + + status = vuart_serial_in(port, VUART_FSR); + if (port->read_status_mask & VUART_FSR_RXDR) + vuart_rx_chars(port, status); + else + pr_err("read_status_mask not set VUART_FSR_RXDR, ignor rx.\n"); + + ier = vuart_serial_in(port, VUART_IER); + if (!(status & VUART_FSR_TXOE) && (status & VUART_FSR_TXFIFOE) && + (ier & VUART_IER_HETXEI)) + vuart_tx_chars(port); + + return 1; +} + +#ifdef DEBUG +static void vuart_debug_iir(unsigned int iir) +{ + VUART_PRINT("%s called iir %u.\n", __func__, iir); + if (iir & VUART_IIR_TXEI) + pr_err("TX FIFO empty interrupt.\n"); + + if (iir & VUART_IIR_RXTOI) + pr_err("Host RX FIFO character timeout interrupt.\n"); + + if (iir & VUART_IIR_RXDAI) + pr_err("Host RX FIFO data available interrupt.\n"); + + if (iir & VUART_IIR_RXUE) + pr_err("HOST RX FIFO Underflow error.\n"); + + if (iir & VUART_IIR_TXOE) + pr_err("HOST TX FIFO Overrun error.\n"); + + if (iir & VUART_IIR_CPUCD) + pr_err("CPU has finished configuration for virtual UART"); + + if (iir & VUART_IIR_TXFI) + pr_err("Host TX FIFO full interrupt.\n"); +} +#endif + +static int vuart_handle_irq(struct uart_port *port) +{ + unsigned int iir; + unsigned long flags; + int ret; + + iir = vuart_serial_in(port, VUART_IIR); + vuart_serial_out(port, VUART_IIR, iir); +#ifdef DEBUG + vuart_debug_iir(iir); +#endif + spin_lock_irqsave(&port->lock, flags); + ret = vuart_deal_irq(port, iir); + + uart_unlock_and_check_sysrq_irqrestore(port, flags); + + return ret; +} + +static int vuart_get_line(void) +{ + int bit = 0; + + bit = find_first_zero_bit(&g_line[0], TSSE_VUART_MAX_DEV); + if (bit >= TSSE_VUART_MAX_DEV) + return -ENOSPC; + set_bit(bit, &g_line[0]); + return bit; +} + +static void vuart_free_line(int line) +{ + clear_bit(line, &g_line[0]); +} + +int vuart_init_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = NULL; + struct uart_port *p = NULL; + int ret = 0; + int line = vuart_get_line(); + + if (line == -ENOSPC) { + dev_err(&pdev->dev, "device too more, max is 64.\n"); + return -ENOMEM; + } + + vuart = kzalloc_node(sizeof(struct tsse_vuart), GFP_KERNEL, + dev_to_node(&pdev->dev)); + if (!vuart) { + ret = -ENOMEM; + goto zalloc_fail; + } + vuart->shutdown = 1; + p = &(vuart->port); + p->mapbase = 0; + p->mapsize = 0; + p->membase = TSSE_DEV_BARS(tdev)[2].virt_addr + RLS_VUART_OFFSET; + p->irq = pci_irq_vector(pdev, RLS_VUART_IRQ_NUM); + p->handle_irq = vuart_handle_irq; + spin_lock_init(&p->lock); + p->line = line; + p->type = PORT_16550A; + p->uartclk = TSSE_VUART_BAUD * 16; + p->iotype = UPIO_MEM; + p->ops = &vuart_ops; + p->fifosize = 128; + vuart->tx_loadsz = 128; + p->flags = UPF_BOOT_AUTOCONF | UPF_FIXED_TYPE | UPF_FIXED_PORT | + UPF_SHARE_IRQ; + p->dev = &pdev->dev; + p->private_data = tdev; + + tdev->port = (struct uart_port *)vuart; + ret = uart_add_one_port(&g_vuart_reg, p); + if (ret != 0) { + dev_err(&pdev->dev, "add port fialed.[%d]\n", ret); + goto add_port_fail; + } + return 0; +add_port_fail: + kfree(vuart); +zalloc_fail: + vuart_free_line(line); + + return ret; +} + +void vuart_uninit_port(struct pci_dev *pdev) +{ + struct tsse_dev *tdev = pci_to_tsse_dev(pdev); + struct tsse_vuart *vuart = (struct tsse_vuart *)(tdev->port); + + if (tdev->port) { + if (!vuart->shutdown) + free_irq(tdev->port->irq, tdev->port); + vuart_free_line(tdev->port->line); + uart_remove_one_port(&g_vuart_reg, tdev->port); + kfree(vuart); + } +} + +int vuart_register(void) +{ + return uart_register_driver(&g_vuart_reg); +} + +void vuart_unregister(void) +{ + uart_unregister_driver(&g_vuart_reg); +} diff --git a/drivers/crypto/montage/tsse/tsse_vuart.h b/drivers/crypto/montage/tsse/tsse_vuart.h new file mode 100644 index 0000000000000000000000000000000000000000..1ed43368751af9218a4ce279adde3332e2c10c0c --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_H__ +#define __TSSE_VUART_H__ + +#include + +#define RLS_VUART_OFFSET (0x680000) +#define RLS_VUART_IRQ_NUM (10) +#define TSSE_VUART_MAX_DEV (64) +#define TSSE_VUART_BITMAP_SIZE (ALIGN(TSSE_VUART_MAX_DEV, 64) / 64) + +int vuart_register(void); +void vuart_unregister(void); +int vuart_init_port(struct pci_dev *pdev); +void vuart_uninit_port(struct pci_dev *pdev); + +#endif diff --git a/drivers/crypto/montage/tsse/tsse_vuart_regs.h b/drivers/crypto/montage/tsse/tsse_vuart_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..26fa62f5014a573dd4c78e23ebebc46568b0aedd --- /dev/null +++ b/drivers/crypto/montage/tsse/tsse_vuart_regs.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * This file is part of tsse driver for Linux + * + * Copyright © 2023 Montage Technology. All rights reserved. + */ + +#ifndef __TSSE_VUART_REGS_H__ +#define __TSSE_VUART_REGS_H__ + +#include +#include + +#define VUART_ID 0x0 +#define VUART_ID_MASK GENMASK(31, 0) + +#define VUART_HCR 0x10 +#define VUART_HCR_RFIFOT GENMASK(3, 2) +#define VUART_HCR_TFIFOT GENMASK(5, 4) + +#define INTRID_NONE BIT(0) +#define INTRID_CPU_LSR (BIT(2) | BIT(1)) +#define INTRID_TRIGGER_LEVEL BIT(2) +#define INTRID_RX_TIMEOUT (BIT(2) | BIT(3)) +#define INTRID_TX_EMPTY BIT(1) + +#define VUART_IIR 0x28 +#define VUART_IIR_TXEI GENMASK(0, 0) +#define VUART_IIR_RXTOI GENMASK(1, 1) +#define VUART_IIR_RXDAI GENMASK(2, 2) +#define VUART_IIR_CPUCD GENMASK(3, 3) +#define VUART_IIR_TXFI GENMASK(4, 4) +#define VUART_IIR_RXUE GENMASK(5, 5) +#define VUART_IIR_TXOE GENMASK(6, 6) + +#define VUART_FCR 0x30 +#define VUART_FCR_TFIFORST GENMASK(0, 0) +#define VUART_FCR_RFIFORST GENMASK(1, 1) +#define VUART_FCR_RFIFOT GENMASK(3, 2) +#define VUART_FCR_TFIFOT GENMASK(5, 4) + +#define VUART_FSR 0x34 +#define VUART_FSR_TXDR GENMASK(0, 0) +#define VUART_FSR_RXDR GENMASK(1, 1) +#define VUART_FSR_RXFIFO GENMASK(2, 2) +#define VUART_FSR_TXFIFOE GENMASK(3, 3) +#define VUART_FSR_RXFIFOF GENMASK(4, 4) +#define VUART_FSR_TXFIFOF GENMASK(5, 5) +#define VUART_FSR_TFIFODN GENMASK(13, 6) +#define VUART_FSR_RFIFODN GENMASK(21, 14) +#define VUART_FSR_TXOE GENMASK(23, 23) +#define VUART_FSR_RXUE GENMASK(24, 24) + +#define VUART_SCR 0x3c +#define VUART_SCR_SCR GENMASK(7, 0) + +#define VUART_TX 0x40 +#define VUART_RX 0x40 + +#define VUART_IER 0x48 +#define VUART_IER_HETXEI GENMASK(0, 0) +#define VUART_IER_HERXTOI GENMASK(1, 1) +#define VUART_IER_HETXDRI GENMASK(2, 2) +#define VUART_IER_CCFGDI GENMASK(3, 3) +#define VUART_IER_HETXFI GENMASK(4, 4) +#define VUART_IER_HETXUI GENMASK(5, 5) +#define VUART_IER_HERXOI GENMASK(6, 6) + +#define VUART_CFG 0x4c +#define VUART_CFG_CCFGD GENMASK(0, 0) + +#endif diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c index 1be549a07a21976dc20ef45ca1ad8c3c40030ee4..f0c3127941ae2f5cea71a4041aafc3e617af2df9 100644 --- a/drivers/crypto/padlock-aes.c +++ b/drivers/crypto/padlock-aes.c @@ -475,7 +475,7 @@ static struct skcipher_alg cbc_aes_alg = { }; static const struct x86_cpu_id padlock_cpu_id[] = { - X86_MATCH_FEATURE(X86_FEATURE_XCRYPT, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_XCRYPT }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_cpu_id); diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c index 6865c7f1fc1a2343611ed56fd561461bf5d7ec23..04858dc8b59794beabd98dbdfe59a2b1e1305a1a 100644 --- a/drivers/crypto/padlock-sha.c +++ b/drivers/crypto/padlock-sha.c @@ -491,7 +491,7 @@ static struct shash_alg sha256_alg_nano = { }; static const struct x86_cpu_id padlock_sha_ids[] = { - X86_MATCH_FEATURE(X86_FEATURE_PHE, NULL), + { X86_VENDOR_CENTAUR, 6, X86_MODEL_ANY, X86_FEATURE_PHE }, {} }; MODULE_DEVICE_TABLE(x86cpu, padlock_sha_ids); diff --git a/drivers/crypto/zhaoxin-aes.c b/drivers/crypto/zhaoxin-aes.c new file mode 100644 index 0000000000000000000000000000000000000000..e1d029fa9d1ab84df36ef5e6ec9b49d9cb2308be --- /dev/null +++ b/drivers/crypto/zhaoxin-aes.c @@ -0,0 +1,523 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +/* + * Number of data blocks actually fetched for each xcrypt insn. + * Processors with prefetch errata will fetch extra blocks. + */ +static unsigned int ecb_fetch_blocks = 2; +#define MAX_ECB_FETCH_BLOCKS (8) +#define ecb_fetch_bytes (ecb_fetch_blocks * AES_BLOCK_SIZE) + +static unsigned int cbc_fetch_blocks = 1; +#define MAX_CBC_FETCH_BLOCKS (4) +#define cbc_fetch_bytes (cbc_fetch_blocks * AES_BLOCK_SIZE) + +/* Control word. */ +struct cword { + unsigned int __packed + rounds:4, + algo:3, + keygen:1, + interm:1, + encdec:1, + ksize:2; +} __aligned(PADLOCK_ALIGNMENT); + +/* + * Whenever making any changes to the following structure *make sure* you keep E, d_data and cword + * aligned on 16 Bytes boundaries and the Hardware can access 16 * 16 bytes of E and d_data (only + * the first 15 * 16 bytes matter but the HW reads more). + */ +struct aes_ctx { + u32 E[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + u32 d_data[AES_MAX_KEYLENGTH_U32] __aligned(PADLOCK_ALIGNMENT); + struct { + struct cword encrypt; + struct cword decrypt; + } cword; + u32 *D; +}; + +static DEFINE_PER_CPU(struct cword *, zx_paes_last_cword); + +/* Tells whether the ACE is capable to generate the extended key for a given key_len. */ +static inline int aes_hw_extkey_available(uint8_t key_len) +{ + /* + * TODO: We should check the actual CPU model/stepping as it's possible that the + * capability will be added in the next CPU revisions. + */ + if (key_len == 16) + return 1; + return 0; +} + +static inline struct aes_ctx *aes_ctx_common(void *ctx) +{ + unsigned long addr = (unsigned long)ctx; + unsigned long align = PADLOCK_ALIGNMENT; + + if (align <= crypto_tfm_ctx_alignment()) + align = 1; + return (struct aes_ctx *)ALIGN(addr, align); +} + +static inline struct aes_ctx *aes_ctx(struct crypto_tfm *tfm) +{ + return aes_ctx_common(crypto_tfm_ctx(tfm)); +} + +static inline struct aes_ctx *skcipher_aes_ctx(struct crypto_skcipher *tfm) +{ + return aes_ctx_common(crypto_skcipher_ctx(tfm)); +} + +static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + const __le32 *key = (const __le32 *)in_key; + struct crypto_aes_ctx gen_aes; + int cpu; + + if (key_len % 8) + return -EINVAL; + + /* + * If the hardware is capable of generating the extended key itself we must supply the + * plain key for both encryption and decryption. + */ + ctx->D = ctx->E; + + ctx->E[0] = le32_to_cpu(key[0]); + ctx->E[1] = le32_to_cpu(key[1]); + ctx->E[2] = le32_to_cpu(key[2]); + ctx->E[3] = le32_to_cpu(key[3]); + + /* Prepare control words. */ + memset(&ctx->cword, 0, sizeof(ctx->cword)); + + ctx->cword.decrypt.encdec = 1; + ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; + ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; + ctx->cword.encrypt.ksize = (key_len - 16) / 8; + ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; + + /* Don't generate extended keys if the hardware can do it. */ + if (aes_hw_extkey_available(key_len)) + goto ok; + + ctx->D = ctx->d_data; + ctx->cword.encrypt.keygen = 1; + ctx->cword.decrypt.keygen = 1; + + if (aes_expandkey(&gen_aes, in_key, key_len)) + return -EINVAL; + + memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); + memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); + +ok: + for_each_online_cpu(cpu) + if (&ctx->cword.encrypt == per_cpu(zx_paes_last_cword, cpu) || + &ctx->cword.decrypt == per_cpu(zx_paes_last_cword, cpu)) + per_cpu(zx_paes_last_cword, cpu) = NULL; + + return 0; +} + +static int aes_set_key_skcipher(struct crypto_skcipher *tfm, const u8 *in_key, + unsigned int key_len) +{ + return aes_set_key(crypto_skcipher_tfm(tfm), in_key, key_len); +} + +/* ====== Encryption/decryption routines ====== */ + +/* These are the real call to PadLock. */ +static inline void padlock_reset_key(struct cword *cword) +{ + int cpu = raw_smp_processor_id(); + + if (cword != per_cpu(zx_paes_last_cword, cpu)) +#ifndef CONFIG_X86_64 + asm volatile ("pushfl; popfl"); +#else + asm volatile ("pushfq; popfq"); +#endif +} + +static inline void padlock_store_cword(struct cword *cword) +{ + per_cpu(zx_paes_last_cword, raw_smp_processor_id()) = cword; +} + +/* + * While the padlock instructions don't use FP/SSE registers, they generate a spurious DNA fault + * when CR0.TS is '1'. Fortunately, the kernel doesn't use CR0.TS. + */ +static inline void rep_xcrypt_ecb(const u8 *input, u8 *output, void *key, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *rep_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + struct cword *control_word, int count) +{ + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void ecb_crypt_copy(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_ECB_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + rep_xcrypt_ecb(tmp, out, key, cword, count); +} + +static u8 *cbc_crypt_copy(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, int count) +{ + /* + * Padlock prefetches extra data so we must provide mapped input buffers. + * Assume there are at least 16 bytes of stack already in use. + */ + u8 buf[AES_BLOCK_SIZE * (MAX_CBC_FETCH_BLOCKS - 1) + PADLOCK_ALIGNMENT - 1]; + u8 *tmp = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + memcpy(tmp, in, count * AES_BLOCK_SIZE); + return rep_xcrypt_cbc(tmp, out, key, iv, cword, count); +} + +static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key, struct cword *cword, int count) +{ + /* + * Padlock in ECB mode fetches at least ecb_fetch_bytes of data. + * We could avoid some copying here but it's probably not worth it. + */ + if (unlikely(offset_in_page(in) + ecb_fetch_bytes > PAGE_SIZE)) { + ecb_crypt_copy(in, out, key, cword, count); + return; + } + + rep_xcrypt_ecb(in, out, key, cword, count); +} + +static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key, u8 *iv, struct cword *cword, + int count) +{ + /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */ + if (unlikely(offset_in_page(in) + cbc_fetch_bytes > PAGE_SIZE)) + return cbc_crypt_copy(in, out, key, iv, cword, count); + + return rep_xcrypt_cbc(in, out, key, iv, cword, count); +} + +static inline void padlock_xcrypt_ecb(const u8 *input, u8 *output, void *key, void *control_word, + u32 count) +{ + u32 initial = count & (ecb_fetch_blocks - 1); + + if (count < ecb_fetch_blocks) { + ecb_crypt(input, output, key, control_word, count); + return; + } + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xc8" /* rep xcryptecb */ + : "+S"(input), "+D"(output) + : "d"(control_word), "b"(key), "c"(count)); +} + +static inline u8 *padlock_xcrypt_cbc(const u8 *input, u8 *output, void *key, u8 *iv, + void *control_word, u32 count) +{ + u32 initial = count & (cbc_fetch_blocks - 1); + + if (count < cbc_fetch_blocks) + return cbc_crypt(input, output, key, iv, control_word, count); + + count -= initial; + + if (initial) + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (initial)); + + asm volatile (".byte 0xf3,0x0f,0xa7,0xd0" /* rep xcryptcbc */ + : "+S" (input), "+D" (output), "+a" (iv) + : "d" (control_word), "b" (key), "c" (count)); + return iv; +} + +static void padlock_aes_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static void padlock_aes_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) +{ + struct aes_ctx *ctx = aes_ctx(tfm); + + padlock_reset_key(&ctx->cword.encrypt); + ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); + padlock_store_cword(&ctx->cword.encrypt); +} + +static struct crypto_alg aes_alg = { + .cra_name = "aes", + .cra_driver_name = "aes-padlock", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_flags = CRYPTO_ALG_TYPE_CIPHER, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct aes_ctx), + .cra_alignmask = PADLOCK_ALIGNMENT - 1, + .cra_module = THIS_MODULE, + .cra_u = { + .cipher = { + .cia_min_keysize = AES_MIN_KEY_SIZE, + .cia_max_keysize = AES_MAX_KEY_SIZE, + .cia_setkey = aes_set_key, + .cia_encrypt = padlock_aes_encrypt, + .cia_decrypt = padlock_aes_decrypt, + } + } +}; + +static int ecb_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->E, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static int ecb_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.decrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_ecb(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg ecb_aes_alg = { + .base.cra_name = "ecb(aes)", + .base.cra_driver_name = "ecb-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = ecb_aes_encrypt, + .decrypt = ecb_aes_decrypt, +}; + +static int cbc_aes_encrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + u8 *iv = padlock_xcrypt_cbc(walk.src.virt.addr, + walk.dst.virt.addr, ctx->E, + walk.iv, &ctx->cword.encrypt, + nbytes / AES_BLOCK_SIZE); + memcpy(walk.iv, iv, AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.decrypt); + + return err; +} + +static int cbc_aes_decrypt(struct skcipher_request *req) +{ + struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); + struct aes_ctx *ctx = skcipher_aes_ctx(tfm); + struct skcipher_walk walk; + unsigned int nbytes; + int err; + + padlock_reset_key(&ctx->cword.encrypt); + + err = skcipher_walk_virt(&walk, req, false); + + while ((nbytes = walk.nbytes) != 0) { + padlock_xcrypt_cbc(walk.src.virt.addr, walk.dst.virt.addr, + ctx->D, walk.iv, &ctx->cword.decrypt, + nbytes / AES_BLOCK_SIZE); + nbytes &= AES_BLOCK_SIZE - 1; + err = skcipher_walk_done(&walk, nbytes); + } + + padlock_store_cword(&ctx->cword.encrypt); + + return err; +} + +static struct skcipher_alg cbc_aes_alg = { + .base.cra_name = "cbc(aes)", + .base.cra_driver_name = "cbc-aes-padlock", + .base.cra_priority = PADLOCK_COMPOSITE_PRIORITY, + .base.cra_blocksize = AES_BLOCK_SIZE, + .base.cra_ctxsize = sizeof(struct aes_ctx), + .base.cra_alignmask = PADLOCK_ALIGNMENT - 1, + .base.cra_module = THIS_MODULE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .ivsize = AES_BLOCK_SIZE, + .setkey = aes_set_key_skcipher, + .encrypt = cbc_aes_encrypt, + .decrypt = cbc_aes_decrypt, +}; + +static const struct x86_cpu_id zhaoxin_cpu_id[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_XCRYPT }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_cpu_id); + +static int __init padlock_init(void) +{ + int ret; + + if (!x86_match_cpu(zhaoxin_cpu_id)) + return -ENODEV; + + if (!boot_cpu_has(X86_FEATURE_XCRYPT_EN)) { + pr_notice("ACE detected, but not enabled. Hmm, strange...\n"); + return -ENODEV; + } + + ret = crypto_register_alg(&aes_alg); + if (!!ret) + goto aes_err; + + ret = crypto_register_skcipher(&ecb_aes_alg); + if (!!ret) + goto ecb_aes_err; + + ret = crypto_register_skcipher(&cbc_aes_alg); + if (!!ret) + goto cbc_aes_err; + + pr_notice("Using ACE for AES algorithm.\n"); + +out: + return ret; + +cbc_aes_err: + crypto_unregister_skcipher(&ecb_aes_alg); +ecb_aes_err: + crypto_unregister_alg(&aes_alg); +aes_err: + pr_err("ACE AES initialization failed.\n"); + goto out; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_skcipher(&cbc_aes_alg); + crypto_unregister_skcipher(&ecb_aes_alg); + crypto_unregister_alg(&aes_alg); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE AES algorithm support"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("aes"); diff --git a/drivers/crypto/zhaoxin-sha.c b/drivers/crypto/zhaoxin-sha.c new file mode 100644 index 0000000000000000000000000000000000000000..840805f36838e5a2bf6791684e67f35d8698da90 --- /dev/null +++ b/drivers/crypto/zhaoxin-sha.c @@ -0,0 +1,304 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Support for ACE hardware crypto engine. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.0.0" + +static inline void padlock_output_block(uint32_t *src, uint32_t *dst, size_t count) +{ + while (count--) + *dst++ = swab32(*src++); +} + +/* + * Add two shash_alg instance for hardware-implemented multiple-parts hash + * supported by Zhaoxin Processor. + */ +static int padlock_sha1_init_zhaoxin(struct shash_desc *desc) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha1_state){ + .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, + }; + + return 0; +} + +static int padlock_sha1_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha1_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA1_DIGEST_SIZE); + + if ((partial + len) >= SHA1_BLOCK_SIZE) { + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buffer + partial, data, done + SHA1_BLOCK_SIZE); + src = sctx->buffer; + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA1_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from the input data */ + if (len - done >= SHA1_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xc8" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / SHA1_BLOCK_SIZE))); + done += ((len - done) - (len - done) % SHA1_BLOCK_SIZE); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA1_DIGEST_SIZE); + memcpy(sctx->buffer + partial, src, len - done); + + return 0; +} + +static int padlock_sha1_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha1_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha1_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 5); + + return 0; +} + +static int padlock_sha256_init_zhaoxin(struct shash_desc *desc) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + + *sctx = (struct sha256_state) { + .state = { + SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3, + SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7 + }, + }; + + return 0; +} + +static int padlock_sha256_update_zhaoxin(struct shash_desc *desc, const u8 *data, unsigned int len) +{ + struct sha256_state *sctx = shash_desc_ctx(desc); + unsigned int partial, done; + const u8 *src; + + /* The PHE require the out buffer must 128 bytes and 16-bytes aligned */ + u8 buf[128 + PADLOCK_ALIGNMENT - STACK_ALIGN] __aligned(STACK_ALIGN); + u8 *dst = PTR_ALIGN(&buf[0], PADLOCK_ALIGNMENT); + + partial = sctx->count & 0x3f; + sctx->count += len; + done = 0; + src = data; + memcpy(dst, (u8 *)(sctx->state), SHA256_DIGEST_SIZE); + + if ((partial + len) >= SHA256_BLOCK_SIZE) { + + /* Append the bytes in state's buffer to a block to handle */ + if (partial) { + done = -partial; + memcpy(sctx->buf + partial, data, done + SHA256_BLOCK_SIZE); + src = sctx->buf; + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"(1UL)); + done += SHA256_BLOCK_SIZE; + src = data + done; + } + + /* Process the left bytes from input data */ + if (len - done >= SHA256_BLOCK_SIZE) { + asm volatile (".byte 0xf3,0x0f,0xa6,0xd0" + : "+S"(src), "+D"(dst) + : "a"((long)-1), "c"((unsigned long)((len - done) / 64))); + done += ((len - done) - (len - done) % 64); + src = data + done; + } + partial = 0; + } + memcpy((u8 *)(sctx->state), dst, SHA256_DIGEST_SIZE); + memcpy(sctx->buf + partial, src, len - done); + + return 0; +} + +static int padlock_sha256_final_zhaoxin(struct shash_desc *desc, u8 *out) +{ + struct sha256_state *state = (struct sha256_state *)shash_desc_ctx(desc); + unsigned int partial, padlen; + __be64 bits; + static const u8 padding[64] = { 0x80, }; + + bits = cpu_to_be64(state->count << 3); + + /* Pad out to 56 mod 64 */ + partial = state->count & 0x3f; + padlen = (partial < 56) ? (56 - partial) : ((64+56) - partial); + padlock_sha256_update_zhaoxin(desc, padding, padlen); + + /* Append length field bytes */ + padlock_sha256_update_zhaoxin(desc, (const u8 *)&bits, sizeof(bits)); + + /* Swap to output */ + padlock_output_block((uint32_t *)(state->state), (uint32_t *)out, 8); + + return 0; +} + +static int padlock_sha_export_zhaoxin(struct shash_desc *desc, void *out) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(out, sctx, statesize); + return 0; +} + +static int padlock_sha_import_zhaoxin(struct shash_desc *desc, const void *in) +{ + int statesize = crypto_shash_statesize(desc->tfm); + void *sctx = shash_desc_ctx(desc); + + memcpy(sctx, in, statesize); + return 0; +} + +static struct shash_alg sha1_alg_zhaoxin = { + .digestsize = SHA1_DIGEST_SIZE, + .init = padlock_sha1_init_zhaoxin, + .update = padlock_sha1_update_zhaoxin, + .final = padlock_sha1_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha1_state), + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "sha1-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static struct shash_alg sha256_alg_zhaoxin = { + .digestsize = SHA256_DIGEST_SIZE, + .init = padlock_sha256_init_zhaoxin, + .update = padlock_sha256_update_zhaoxin, + .final = padlock_sha256_final_zhaoxin, + .export = padlock_sha_export_zhaoxin, + .import = padlock_sha_import_zhaoxin, + .descsize = sizeof(struct sha256_state), + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "sha256-padlock-zhaoxin", + .cra_priority = PADLOCK_CRA_PRIORITY, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_module = THIS_MODULE, + } +}; + +static const struct x86_cpu_id zhaoxin_sha_ids[] = { + { X86_VENDOR_CENTAUR, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + { X86_VENDOR_ZHAOXIN, 7, X86_MODEL_ANY, X86_STEPPING_ANY, X86_FEATURE_PHE }, + {} +}; +MODULE_DEVICE_TABLE(x86cpu, zhaoxin_sha_ids); + +static int __init padlock_init(void) +{ + int rc = -ENODEV; + struct shash_alg *sha1; + struct shash_alg *sha256; + + if (!x86_match_cpu(zhaoxin_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN)) + return -ENODEV; + + sha1 = &sha1_alg_zhaoxin; + sha256 = &sha256_alg_zhaoxin; + + rc = crypto_register_shash(sha1); + if (rc) + goto out; + + rc = crypto_register_shash(sha256); + if (rc) + goto out_unreg1; + + pr_notice("Using ACE for SHA1/SHA256 algorithms.\n"); + + return 0; + +out_unreg1: + crypto_unregister_shash(sha1); + +out: + pr_err("ACE SHA1/SHA256 initialization failed.\n"); + return rc; +} + +static void __exit padlock_fini(void) +{ + crypto_unregister_shash(&sha1_alg_zhaoxin); + crypto_unregister_shash(&sha256_alg_zhaoxin); +} + +module_init(padlock_init); +module_exit(padlock_fini); + +MODULE_DESCRIPTION("ACE SHA1/SHA256 algorithms support."); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Michal Ludvig"); +MODULE_VERSION(DRIVER_VERSION); + +MODULE_ALIAS_CRYPTO("sha1-all"); +MODULE_ALIAS_CRYPTO("sha256-all"); +MODULE_ALIAS_CRYPTO("sha1-padlock"); +MODULE_ALIAS_CRYPTO("sha256-padlock"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9b6642d0087130ca7450c8838878b525686e68c0..f02b70c911f19951ffc694b46aaeeb8313227b8a 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -96,6 +96,17 @@ int __amd64_write_pci_cfg_dword(struct pci_dev *pdev, int offset, return err; } +static u32 get_umc_base_f18h_m4h(u16 node, u8 channel) +{ + struct pci_dev *f3 = node_to_amd_nb(node)->misc; + u8 df_id; + + get_df_id(f3, &df_id); + df_id -= 4; + + return get_umc_base(channel) + (0x80000000 + (0x10000000 * df_id)); +} + /* * Select DCT to which PCI cfg accesses are routed */ @@ -1135,8 +1146,11 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr ctx.nid = nid; ctx.inst_id = umc; - /* Read D18F0x1B4 (DramOffset), check if base 1 is used. */ - if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) + /* Read DramOffset, check if base 1 is used. */ + if (hygon_f18h_m4h() && + df_indirect_read_instance(nid, 0, 0x214, umc, &ctx.tmp)) + goto out_err; + else if (df_indirect_read_instance(nid, 0, 0x1B4, umc, &ctx.tmp)) goto out_err; /* Remove HiAddrOffset from normalized address, if enabled: */ @@ -1160,6 +1174,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr goto out_err; } + intlv_num_sockets = 0; + if (hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 2) & 0x3; lgcy_mmio_hole_en = ctx.tmp & BIT(1); intlv_num_chan = (ctx.tmp >> 4) & 0xF; intlv_addr_sel = (ctx.tmp >> 8) & 0x7; @@ -1176,7 +1193,8 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x114 + (8 * base), umc, &ctx.tmp)) goto out_err; - intlv_num_sockets = (ctx.tmp >> 8) & 0x1; + if (!hygon_f18h_m4h()) + intlv_num_sockets = (ctx.tmp >> 8) & 0x1; intlv_num_dies = (ctx.tmp >> 10) & 0x3; dram_limit_addr = ((ctx.tmp & GENMASK_ULL(31, 12)) << 16) | GENMASK_ULL(27, 0); @@ -1194,6 +1212,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr hash_enabled = true; break; default: + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x4 && + intlv_num_chan == 2) + break; pr_err("%s: Invalid number of interleaved channels %d.\n", __func__, intlv_num_chan); goto out_err; @@ -1212,8 +1233,9 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* Add a bit if sockets are interleaved. */ num_intlv_bits += intlv_num_sockets; - /* Assert num_intlv_bits <= 4 */ - if (num_intlv_bits > 4) { + /* Assert num_intlv_bits in the correct range. */ + if ((hygon_f18h_m4h() && num_intlv_bits > 7) || + (!hygon_f18h_m4h() && num_intlv_bits > 4)) { pr_err("%s: Invalid interleave bits %d.\n", __func__, num_intlv_bits); goto out_err; @@ -1232,7 +1254,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr if (df_indirect_read_instance(nid, 0, 0x50, umc, &ctx.tmp)) goto out_err; - cs_fabric_id = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) + cs_fabric_id = (ctx.tmp >> 8) & 0x7FF; + else + cs_fabric_id = (ctx.tmp >> 8) & 0xFF; die_id_bit = 0; /* If interleaved over more than 1 channel: */ @@ -1252,8 +1277,13 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 die. */ if (intlv_num_dies) { sock_id_bit = die_id_bit + intlv_num_dies; - die_id_shift = (ctx.tmp >> 24) & 0xF; - die_id_mask = (ctx.tmp >> 8) & 0xFF; + if (hygon_f18h_m4h()) { + die_id_shift = (ctx.tmp >> 12) & 0xF; + die_id_mask = ctx.tmp & 0x7FF; + } else { + die_id_shift = (ctx.tmp >> 24) & 0xF; + die_id_mask = (ctx.tmp >> 8) & 0xFF; + } cs_id |= ((cs_fabric_id & die_id_mask) >> die_id_shift) << die_id_bit; } @@ -1261,7 +1291,10 @@ static int umc_normaddr_to_sysaddr(u64 norm_addr, u16 nid, u8 umc, u64 *sys_addr /* If interleaved over more than 1 socket. */ if (intlv_num_sockets) { socket_id_shift = (ctx.tmp >> 28) & 0xF; - socket_id_mask = (ctx.tmp >> 16) & 0xFF; + if (hygon_f18h_m4h()) + socket_id_mask = (ctx.tmp >> 16) & 0x7FF; + else + socket_id_mask = (ctx.tmp >> 16) & 0xFF; cs_id |= ((cs_fabric_id & socket_id_mask) >> socket_id_shift) << sock_id_bit; } @@ -1608,7 +1641,10 @@ static void umc_dump_misc_regs(struct amd64_pvt *pvt) u32 i, tmp, umc_base; for_each_umc(i) { - umc_base = get_umc_base(i); + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); umc = &pvt->umc[i]; edac_dbg(1, "UMC%d DIMM cfg: 0x%x\n", i, umc->dimm_cfg); @@ -1717,11 +1753,17 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) u32 mask_reg, mask_reg_sec; u32 *base, *base_sec; u32 *mask, *mask_sec; + u32 umc_base; int cs, umc; for_each_umc(umc) { - umc_base_reg = get_umc_base(umc) + UMCCH_BASE_ADDR; - umc_base_reg_sec = get_umc_base(umc) + UMCCH_BASE_ADDR_SEC; + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, umc); + else + umc_base = get_umc_base(umc); + + umc_base_reg = umc_base + UMCCH_BASE_ADDR; + umc_base_reg_sec = umc_base + UMCCH_BASE_ADDR_SEC; for_each_chip_select(cs, umc, pvt) { base = &pvt->csels[umc].csbases[cs]; @@ -1739,8 +1781,8 @@ static void umc_read_base_mask(struct amd64_pvt *pvt) umc, cs, *base_sec, base_reg_sec); } - umc_mask_reg = get_umc_base(umc) + UMCCH_ADDR_MASK; - umc_mask_reg_sec = get_umc_base(umc) + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); + umc_mask_reg = umc_base + UMCCH_ADDR_MASK; + umc_mask_reg_sec = umc_base + get_umc_reg(pvt, UMCCH_ADDR_MASK_SEC); for_each_chip_select_mask(cs, umc, pvt) { mask = &pvt->csels[umc].csmasks[cs]; @@ -1823,7 +1865,8 @@ static void umc_determine_memory_type(struct amd64_pvt *pvt) * Check if the system supports the "DDR Type" field in UMC Config * and has DDR5 DIMMs in use. */ - if (pvt->flags.zn_regs_v2 && ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { + if ((pvt->flags.zn_regs_v2 || hygon_f18h_m4h()) && + ((umc->umc_cfg & GENMASK(2, 0)) == 0x1)) { if (umc->dimm_cfg & BIT(5)) umc->dram_type = MEM_LRDDR5; else if (umc->dimm_cfg & BIT(4)) @@ -3057,7 +3100,11 @@ static inline void decode_bus_error(int node_id, struct mce *m) */ static void umc_get_err_info(struct mce *m, struct err_info *err) { - err->channel = (m->ipid & GENMASK(31, 0)) >> 20; + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + err->channel = (m->ipid & GENMASK(23, 0)) >> 20; + else + err->channel = (m->ipid & GENMASK(31, 0)) >> 20; err->csrow = m->synd & 0x7; } @@ -3068,6 +3115,7 @@ static void decode_umc_error(int node_id, struct mce *m) struct amd64_pvt *pvt; struct err_info err; u64 sys_addr; + u8 umc; node_id = fixup_node_id(node_id, m); @@ -3098,7 +3146,12 @@ static void decode_umc_error(int node_id, struct mce *m) pvt->ops->get_err_info(m, &err); - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) { + if (hygon_f18h_m4h() && boot_cpu_data.x86_model == 0x6) + umc = err.channel << 1; + else + umc = err.channel; + + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, umc, &sys_addr)) { err.err_code = ERR_NORM_ADDR; goto log_error; } @@ -3172,8 +3225,11 @@ static void umc_read_mc_regs(struct amd64_pvt *pvt) /* Read registers from each UMC */ for_each_umc(i) { + if (hygon_f18h_m4h()) + umc_base = get_umc_base_f18h_m4h(pvt->mc_node_id, i); + else + umc_base = get_umc_base(i); - umc_base = get_umc_base(i); umc = &pvt->umc[i]; amd_smn_read(nid, umc_base + get_umc_reg(pvt, UMCCH_DIMM_CFG), &umc->dimm_cfg); @@ -4104,6 +4160,18 @@ static int per_family_init(struct amd64_pvt *pvt) break; case 0x18: + if (pvt->model == 0x4) { + pvt->ctl_name = "F18h_M04h"; + pvt->max_mcs = 3; + break; + } else if (pvt->model == 0x5) { + pvt->ctl_name = "F18h_M05h"; + pvt->max_mcs = 1; + break; + } else if (pvt->model == 0x6) { + pvt->ctl_name = "F18h_M06h"; + break; + } pvt->ctl_name = "F18h"; break; @@ -4367,6 +4435,7 @@ static int __init amd64_edac_init(void) { const char *owner; int err = -ENODEV; + u16 instance_num; int i; if (ghes_get_devices()) @@ -4384,8 +4453,13 @@ static int __init amd64_edac_init(void) opstate_init(); + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + err = -ENOMEM; - ecc_stngs = kcalloc(amd_nb_num(), sizeof(ecc_stngs[0]), GFP_KERNEL); + ecc_stngs = kcalloc(instance_num, sizeof(ecc_stngs[0]), GFP_KERNEL); if (!ecc_stngs) goto err_free; @@ -4393,7 +4467,7 @@ static int __init amd64_edac_init(void) if (!msrs) goto err_free; - for (i = 0; i < amd_nb_num(); i++) { + for (i = 0; i < instance_num; i++) { err = probe_one_instance(i); if (err) { /* unwind properly */ @@ -4438,6 +4512,7 @@ static int __init amd64_edac_init(void) static void __exit amd64_edac_exit(void) { + u16 instance_num; int i; if (pci_ctl) @@ -4449,7 +4524,12 @@ static void __exit amd64_edac_exit(void) else amd_unregister_ecc_decoder(decode_bus_error); - for (i = 0; i < amd_nb_num(); i++) + if (hygon_f18h_m4h()) + instance_num = hygon_nb_num(); + else + instance_num = amd_nb_num(); + + for (i = 0; i < instance_num; i++) remove_one_instance(i); kfree(ecc_stngs); diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index 9215c06783df5f19dd64cb07b2f275f9ff8ee118..06e29d2b51d1ed32071d0371369bb1e9de2a3ba1 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -1187,8 +1187,13 @@ static void decode_smca_error(struct mce *m) pr_cont(", %s.\n", smca_mce_descs[bank_type].descs[xec]); if ((bank_type == SMCA_UMC || bank_type == SMCA_UMC_V2) && - xec == 0 && decode_dram_ecc) - decode_dram_ecc(topology_die_id(m->extcpu), m); + xec == 0 && decode_dram_ecc) { + if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) + decode_dram_ecc(topology_logical_die_id(m->extcpu), m); + else + decode_dram_ecc(topology_die_id(m->extcpu), m); + } } static inline void amd_decode_err_code(u16 ec) diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index b59e3041fd627585c4662f35c1fc34103df9730d..9cb284a6e7a16d7db1de98663489f6dec8edec62 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -155,7 +155,7 @@ config RASPBERRYPI_FIRMWARE config FW_CFG_SYSFS tristate "QEMU fw_cfg device support in sysfs" - depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86) + depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86 || SW64) depends on HAS_IOPORT_MAP default n help diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c index 285fe7ad490d1ddd904e0dd73518350bc135ef8c..0f7ef69071c0156326171421b62ca23eaf6e2e0a 100644 --- a/drivers/firmware/arm_sdei.c +++ b/drivers/firmware/arm_sdei.c @@ -188,6 +188,28 @@ int sdei_api_event_context(u32 query, u64 *result) } NOKPROBE_SYMBOL(sdei_api_event_context); +int sdei_api_event_interrupt_bind(int hwirq) +{ + u64 event_number; + + invoke_sdei_fn(SDEI_1_0_FN_SDEI_INTERRUPT_BIND, hwirq, 0, 0, 0, 0, + &event_number); + + return (int)event_number; +} + +int sdei_api_clear_eoi(int hwirq) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SDEI_CLEAR_EOI, hwirq, 0, 0, 0, 0, + NULL); +} + +int sdei_api_set_secure_timer_period(int sec) +{ + return invoke_sdei_fn(SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD, sec, 0, 0, 0, + 0, NULL); +} + static int sdei_api_event_get_info(u32 event, u32 info, u64 *result) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_GET_INFO, event, info, 0, @@ -379,7 +401,7 @@ static int sdei_platform_reset(void) return err; } -static int sdei_api_event_enable(u32 event_num) +int sdei_api_event_enable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_ENABLE, event_num, 0, 0, 0, 0, NULL); @@ -426,7 +448,7 @@ int sdei_event_enable(u32 event_num) return err; } -static int sdei_api_event_disable(u32 event_num) +int sdei_api_event_disable(u32 event_num) { return invoke_sdei_fn(SDEI_1_0_FN_SDEI_EVENT_DISABLE, event_num, 0, 0, 0, 0, NULL); diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 231f1c70d1db548566b56a63bc52dcbe324a4a3d..b76f9df4885a464040d381159fc5d0746c6e9ba1 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -224,7 +224,7 @@ config EFI_DISABLE_PCI_DMA config EFI_EARLYCON def_bool y - depends on SERIAL_EARLYCON && !ARM && !IA64 + depends on SERIAL_EARLYCON && !ARM && !IA64 && !SW64 select FONT_SUPPORT select ARCH_USE_MEMREMAP_PROT @@ -301,3 +301,12 @@ config UEFI_CPER_X86 bool depends on UEFI_CPER && X86 default y + +config YITIAN_CPER_RAWDATA + bool "Print Yitian custom raw data about platform error info" + depends on EFI && ACPI && ARM64 + help + Allow print Yitian custom raw data about platform error info, + including CMN, GIC, SMMU, DDR, etc. It gathers more useful error + information from hardware, which helps to debug and test RAS + feature. diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile index e489fefd23dae0bc21baaf48be6c1499ca5d63c7..7c1b924e8ea3fc24a0460b0011d5da7cb174e760 100644 --- a/drivers/firmware/efi/Makefile +++ b/drivers/firmware/efi/Makefile @@ -35,8 +35,11 @@ obj-$(CONFIG_SYSFB) += sysfb_efi.o arm-obj-$(CONFIG_EFI) := efi-init.o arm-runtime.o obj-$(CONFIG_ARM) += $(arm-obj-y) obj-$(CONFIG_ARM64) += $(arm-obj-y) +sw64-obj-$(CONFIG_EFI) := sunway-init.o sunway-runtime.o +obj-$(CONFIG_SW64) += $(sw64-obj-y) riscv-obj-$(CONFIG_EFI) := efi-init.o riscv-runtime.o obj-$(CONFIG_RISCV) += $(riscv-obj-y) +#obj-$(CONFIG_LOONGARCH) += efi-init.o obj-$(CONFIG_EFI_CAPSULE_LOADER) += capsule-loader.o obj-$(CONFIG_EFI_EARLYCON) += earlycon.o obj-$(CONFIG_UEFI_CPER_ARM) += cper-arm.o diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 35c37f667781c7071c714aef274e68dbddca026b..30a62a97ae98b7aa26de88c341907182cd922243 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c @@ -623,6 +623,142 @@ cper_estatus_print_section(const char *pfx, struct acpi_hest_generic_data *gdata pr_err(FW_WARN "error section length is too small\n"); } +#ifdef CONFIG_YITIAN_CPER_RAWDATA +static char *yitian_raw_err_type_str(u64 type) +{ + switch (type) { + case ERR_TYPE_GENERIC: return "GENERIC"; + case ERR_TYPE_CORE: return "CORE"; + case ERR_TYPE_GIC: return "GIC"; + case ERR_TYPE_CMN: return "CMN"; + case ERR_TYPE_SMMU: return "SMMU"; + case ERR_TYPE_DDR: return "DDR"; + case ERR_TYPE_PCI: return "PCI"; + default: return "Reserved"; + } +} + +void yitian_platform_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ras_common_reg *common_reg; + int sub_record_no = 0; + + yitian_estatus_for_each_raw_reg_common(header, common_reg, sub_record_no) { + pr_info("%s sub_type: 0x%x\n", pfx, + header->sub_type[sub_record_no]); + pr_info("%s fr: 0x%llx, ctrl: 0x%llx, status: 0x%llx, addr: 0x%llx\n", + pfx, common_reg->fr, common_reg->ctrl, + common_reg->status, common_reg->addr); + pr_info("%s misc0: 0x%llx, misc1: 0x%llx, misc2: 0x%llx, misc3: 0x%llx\n", + pfx, common_reg->misc0, common_reg->misc1, + common_reg->misc2, common_reg->misc3); + } +} + +static void yitian_ddr_raw_data_print(const char *pfx, + struct yitian_raw_data_header *header) +{ + struct yitian_ddr_raw_data *data; + + data = (struct yitian_ddr_raw_data *)(header + 1); + + switch (data->ex_type) { + case 0x1: + pr_info("%s Synchronous Exception taken in EL%d\n", pfx, data->el_nr); + break; + case 0x2: + pr_info("%s Interrupt: %d\n", pfx, data->intr); + break; + case 0x3: + pr_info("%s SError\n", pfx); + break; + default: + pr_info("%s Unknown interrupt type\n", pfx); + } + + /* System regs is valid only when it's a synchronous exception */ + if (data->ex_type == 1) { + struct yitian_ddr_sys_reg *sys_regs = &data->sys_regs; + + pr_info("%s ESR: 0x%llx, ELR: 0x%llx, FAR: 0x%llx, SCR: 0x%llx, SCTLR: 0x%llx, LR: 0x%llx\n", + pfx, sys_regs->esr, sys_regs->elr, sys_regs->far, + sys_regs->scr, sys_regs->sctlr, sys_regs->lr); + } + + /* ECC Data is valid only when it's a ECC error */ + if (data->err_type == 1) { + struct yitian_ddr_ecc_data *ecc_data = &data->ecc_data; + + pr_info("%s ECCERRCNT: 0x%x, ECCSTAT: 0x%x, ADVECCSTAT: 0x%x, ECCSYMBOL: 0x%x, ECCERRCNTSTAT: 0x%x, ECCERRCNT0: 0x%x, ECCERRCNT1: 0x%x, ECCCADDR0: 0x%x, ECCCADDR1: 0x%x, ECCCDATA0: 0x%x, ECCCDATA1: 0x%x, ECCUADDR0: 0x%x, ECCUADDR1: 0x%x, ECCUDATA0: 0x%x, ECCUDATA1: 0x%x\n", + pfx, ecc_data->eccerrcnt, ecc_data->eccstat, + ecc_data->adveccstat, ecc_data->eccsymbol, + ecc_data->eccerrcntstat, ecc_data->eccerrcnt0, + ecc_data->eccerrcnt1, ecc_data->ecccaddr0, + ecc_data->ecccaddr1, ecc_data->ecccdata0, + ecc_data->ecccdata1, ecc_data->eccuaddr0, + ecc_data->eccuaddr1, ecc_data->eccudata0, + ecc_data->eccudata1); + } +} + +bool yitian_estatus_check_header(const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (estatus->raw_data_length < sizeof(*header)) + return false; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + +#define YITIAN_SIGNATURE_16(A, B) ((A) | (B << 8)) +#define YITIAN_SIGNATURE_32(A, B, C, D) \ + (YITIAN_SIGNATURE_16(A, B) | (YITIAN_SIGNATURE_16(C, D) << 16)) + + if (header->signature != YITIAN_SIGNATURE_32('r', 'a', 'w', 'd')) + return false; + + /* + * ONLY processor, CMN, GIC, and SMMU has raw error data which follow + * any Generic Error Data Entries. The raw error data format is vendor + * implementation defined. + */ + if (!header->common_reg_nr) + return false; + + return true; +} + +void yitian_raw_data_print(const char *pfx, + const struct acpi_hest_generic_status *estatus) +{ + struct yitian_raw_data_header *header; + + if (!yitian_estatus_check_header(estatus)) + return; + + header = (struct yitian_raw_data_header *)((void *)estatus + + estatus->raw_data_offset); + + pr_info("%s type: %s (0x%x), common_reg_nr:%d\n", pfx, + yitian_raw_err_type_str(header->type), header->type, + header->common_reg_nr); + + switch (header->type) { + case ERR_TYPE_CORE: + case ERR_TYPE_GIC: + case ERR_TYPE_CMN: + case ERR_TYPE_SMMU: + yitian_platform_raw_data_print(pfx, header); + break; + case ERR_TYPE_DDR: + yitian_ddr_raw_data_print(pfx, header); + break; + } +} +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + void cper_estatus_print(const char *pfx, const struct acpi_hest_generic_status *estatus) { @@ -643,6 +779,11 @@ void cper_estatus_print(const char *pfx, cper_estatus_print_section(newpfx, gdata, sec_no); sec_no++; } + +#ifdef CONFIG_YITIAN_CPER_RAWDATA + if (estatus->raw_data_length) + yitian_raw_data_print(pfx, estatus); +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ } EXPORT_SYMBOL_GPL(cper_estatus_print); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 1974f0ad32badbea55d1f4c1a43f4f95c065c3b5..804c1f06395e5cda782e223fbefae5ea09a89620 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -807,7 +807,7 @@ int __init efi_systab_check_header(const efi_table_hdr_t *systab_hdr) return 0; } -#ifndef CONFIG_IA64 +#if !defined(CONFIG_IA64) && !defined(CONFIG_SW64) static const efi_char16_t *__init map_fw_vendor(unsigned long fw_vendor, size_t size) { diff --git a/drivers/firmware/efi/sunway-init.c b/drivers/firmware/efi/sunway-init.c new file mode 100644 index 0000000000000000000000000000000000000000..870abc2f5afea3ab7c4b9674fc0c4c7a4b93b1e9 --- /dev/null +++ b/drivers/firmware/efi/sunway-init.c @@ -0,0 +1,221 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013 - 2015 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "efi: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +unsigned long entSuspend; +unsigned long bios_version; + +static int __init is_memory(efi_memory_desc_t *md) +{ + if (md->attribute & (EFI_MEMORY_WB|EFI_MEMORY_WT|EFI_MEMORY_WC)) + return 1; + return 0; +} +static efi_config_table_type_t arch_tables[] __initdata = { + {SMBIOS3_TABLE_GUID, NULL, ""}, + {SLEEP_ENTRY_GUID, &entSuspend, "SLEEP ENTRY"}, + {BIOS_VERSION_GUID, &bios_version, "BIOS VERSION"}, + {}, +}; + +static int __init uefi_init(u64 efi_system_table) +{ + efi_char16_t *c16; + efi_config_table_t *config_tables; + efi_system_table_t *systab; + size_t table_size; + char vendor[100] = "unknown"; + int i, retval; + + systab = early_memremap(efi_system_table, + sizeof(efi_system_table_t)); + if (systab == NULL) { + pr_warn("Unable to map EFI system table.\n"); + return -ENOMEM; + } + + set_bit(EFI_BOOT, &efi.flags); + if (IS_ENABLED(CONFIG_64BIT)) + set_bit(EFI_64BIT, &efi.flags); + + /* + * Verify the EFI Table + */ + if (systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { + pr_err("System table signature incorrect\n"); + retval = -EINVAL; + goto out; + } + if ((systab->hdr.revision >> 16) < 2) + pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff); + + efi.runtime = systab->runtime; + efi.runtime_version = systab->hdr.revision; + + /* Show what we know for posterity */ + c16 = early_memremap(systab->fw_vendor, + sizeof(vendor) * sizeof(efi_char16_t)); + if (c16) { + for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) + vendor[i] = c16[i]; + vendor[i] = '\0'; + early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t)); + } + + pr_info("EFI v%u.%.02u by %s\n", + systab->hdr.revision >> 16, + systab->hdr.revision & 0xffff, vendor); + + table_size = sizeof(efi_config_table_64_t) * systab->nr_tables; + config_tables = early_memremap(systab->tables, table_size); + if (config_tables == NULL) { + pr_warn("Unable to map EFI config table array.\n"); + retval = -ENOMEM; + goto out; + } + + retval = efi_config_parse_tables(config_tables, systab->nr_tables, + arch_tables); + + early_memunmap(config_tables, table_size); +out: + early_memunmap(systab, sizeof(efi_system_table_t)); + + if (!bios_version) + retval = -EINVAL; + + return retval; +} + +/* + * Return true for regions that can be used as System RAM. + */ +static __init int is_usable_memory(efi_memory_desc_t *md) +{ + switch (md->type) { + case EFI_LOADER_CODE: + case EFI_LOADER_DATA: + case EFI_ACPI_RECLAIM_MEMORY: + case EFI_BOOT_SERVICES_CODE: + case EFI_BOOT_SERVICES_DATA: + case EFI_CONVENTIONAL_MEMORY: + case EFI_PERSISTENT_MEMORY: + /* + * According to the spec, these regions are no longer reserved + * after calling ExitBootServices(). However, we can only use + * them as System RAM if they can be mapped writeback cacheable. + */ + return (md->attribute & EFI_MEMORY_WB); + default: + break; + } + return false; +} + +static __init void reserve_regions(void) +{ + efi_memory_desc_t *md; + u64 paddr, npages, size; + + if (efi_enabled(EFI_DBG)) + pr_info("Processing EFI memory map:\n"); + + for_each_efi_memory_desc(md) { + paddr = md->phys_addr; + npages = md->num_pages; + + if (efi_enabled(EFI_DBG)) { + char buf[64]; + + pr_info(" 0x%012llx-0x%012llx %s\n", + paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, + efi_md_typeattr_format(buf, sizeof(buf), md)); + } + + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; + + if (is_memory(md)) { + early_init_dt_add_memory_arch(paddr, size); + + if (!is_usable_memory(md)) + memblock_mark_nomap(paddr, size); + + /* keep ACPI reclaim memory intact for kexec etc. */ + if (md->type == EFI_ACPI_RECLAIM_MEMORY) + memblock_reserve(paddr, size); + } + } +} + +void __init efi_init(void) +{ + struct efi_memory_map_data data; + u64 efi_system_table; + + if (sunway_boot_params->efi_systab == 0) { + pr_info("System Table is not exist, disabling EFI.\n"); + return; + } + + /* Grab UEFI information placed in struct boot_params by stub */ + efi_system_table = sunway_boot_params->efi_systab; + if (!efi_system_table) + return; + + data.desc_version = sunway_boot_params->efi_memdesc_version; + data.desc_size = sunway_boot_params->efi_memdesc_size; + data.size = sunway_boot_params->efi_memmap_size; + data.phys_map = sunway_boot_params->efi_memmap; + + if (efi_memmap_init_early(&data) < 0) { + /* + * If we are booting via UEFI, the UEFI memory map is the only + * description of memory we have, so there is little point in + * proceeding if we cannot access it. + */ + panic("Unable to map EFI memory map.\n"); + } + + WARN(efi.memmap.desc_version != 1, + "Unexpected EFI_MEMORY_DESCRIPTOR version %ld", + efi.memmap.desc_version); + + if (uefi_init(efi_system_table) < 0) { + efi_memmap_unmap(); + return; + } + + reserve_regions(); + + memblock_reserve(sunway_boot_params->efi_memmap & PAGE_MASK, + PAGE_ALIGN(sunway_boot_params->efi_memmap_size + + (sunway_boot_params->efi_memmap & ~PAGE_MASK))); + +} diff --git a/drivers/firmware/efi/sunway-runtime.c b/drivers/firmware/efi/sunway-runtime.c new file mode 100644 index 0000000000000000000000000000000000000000..6bd96cff7d5d8f82dbc96e7f62224b5d37b4df6c --- /dev/null +++ b/drivers/firmware/efi/sunway-runtime.c @@ -0,0 +1,85 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Extensible Firmware Interface + * + * Based on Extensible Firmware Interface Specification version 2.4 + * + * Copyright (C) 2013, 2014 Linaro Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +/* + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. + */ +static int __init sunway_enable_runtime_services(void) +{ + u64 mapsize; + + if (!efi_enabled(EFI_BOOT)) { + pr_info("EFI services will not be available.\n"); + return 0; + } + + efi_memmap_unmap(); + + mapsize = efi.memmap.desc_size * efi.memmap.nr_map; + + if (efi_memmap_init_late(efi.memmap.phys_map, mapsize)) { + pr_err("Failed to remap EFI memory map\n"); + return 0; + } + + if (efi_runtime_disabled()) { + pr_info("EFI runtime services will be disabled.\n"); + return 0; + } + + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + pr_info("EFI runtime services access via paravirt.\n"); + return 0; + } + + /* Set up runtime services function pointers */ + efi_native_runtime_setup(); + set_bit(EFI_RUNTIME_SERVICES, &efi.flags); + + return 0; +} +early_initcall(sunway_enable_runtime_services); + + +static int __init sunway_dmi_init(void) +{ + /* + * On SW64, DMI depends on UEFI, and dmi_scan_machine() needs to + * be called early because dmi_id_init(), which is an arch_initcall + * itself, depends on dmi_scan_machine() having been called already. + */ + dmi_setup(); + return 0; +} +core_initcall(sunway_dmi_init); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index a69399a6b7c0052fc5f66948e7928bbc9969ef3f..f4fea1ec3201c057ce25be6acf540d0f38d28194 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void) /* arch-specific ctrl & data register offsets are not available in ACPI, DT */ #if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF)) -# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64)) +# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_SW64)) # define FW_CFG_CTRL_OFF 0x08 # define FW_CFG_DATA_OFF 0x00 # define FW_CFG_DMA_OFF 0x10 diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 673bafb8be5887611f10c09a067a6c90f2c03661..3d186a39dd714765d018e04d885bd28a6c88a07f 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -246,6 +246,15 @@ config GPIO_DWAPB Say Y or M here to build support for the Synopsys DesignWare APB GPIO block. +config GPIO_SUNWAY + tristate "Sunway gpio driver" + depends on SW64 + select GPIO_GENERIC + select GENERIC_IRQ_CHIP + help + Say Y or M here to build support for the Sunway + GPIO block. + config GPIO_EIC_SPRD tristate "Spreadtrum EIC support" depends on ARCH_SPRD || COMPILE_TEST diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index eb73b5d633ebad2dc397d7894901421e845ee390..e44a700ec7d3dfa6e59fb59ae54672e54c74ca18 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile @@ -195,3 +195,4 @@ obj-$(CONFIG_GPIO_XTENSA) += gpio-xtensa.o obj-$(CONFIG_GPIO_ZEVIO) += gpio-zevio.o obj-$(CONFIG_GPIO_ZYNQ) += gpio-zynq.o obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN) += gpio-zynqmp-modepin.o +obj-$(CONFIG_GPIO_SUNWAY) += gpio-sunway.o diff --git a/drivers/gpio/gpio-sunway.c b/drivers/gpio/gpio-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..b9c6848317dbf48ef74aa066dcfe574e4c717fce --- /dev/null +++ b/drivers/gpio/gpio-sunway.c @@ -0,0 +1,861 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2011 Jamie Iles + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * All enquiries to support@picochip.com + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gpiolib.h" +#include "gpiolib-acpi.h" + + +#define GPIO_SWPORTA_DR (0x00UL<<7) +#define GPIO_SWPORTA_DDR (0X04UL<<7) +#define GPIO_SWPORTB_DR (0X0CUL<<7) +#define GPIO_SWPORTB_DDR (0X10UL<<7) +#define GPIO_SWPORTC_DR (0x18UL<<7) +#define GPIO_SWPORTC_DDR (0x1cUL<<7) +#define GPIO_SWPORTD_DR (0x24UL<<7) +#define GPIO_SWPORTD_DDR (0x28UL<<7) +#define GPIO_INTEN (0x30UL<<7) +#define GPIO_INTMASK (0x34UL<<7) +#define GPIO_INTTYPE_LEVEL (0x38UL<<7) +#define GPIO_INT_POLARITY (0x3cUL<<7) +#define GPIO_INTSTATUS (0x40UL<<7) +#define GPIO_PORTA_DEBOUNCE (0x48UL<<7) +#define GPIO_PORTA_EOI (0x4cUL<<7) +#define GPIO_EXT_PORTA (0x50UL<<7) +#define GPIO_EXT_PORTB (0x54UL<<7) +#define GPIO_EXT_PORTC (0x58UL<<7) +#define GPIO_EXT_PORTD (0x5cUL<<7) + +#define DWAPB_MAX_PORTS 4 +#define GPIO_EXT_PORT_STRIDE 0x04 /* register stride 32 bits */ +#define GPIO_SWPORT_DR_STRIDE 0x0c /* register stride 3*32 bits */ +#define GPIO_SWPORT_DDR_STRIDE 0x0c /* register stride 3*32 bits */ + +#define GPIO_REG_OFFSET_V2 1 + +#define GPIO_INTMASK_V2 0x44 +#define GPIO_INTTYPE_LEVEL_V2 0x34 +#define GPIO_INT_POLARITY_V2 0x38 +#define GPIO_INTSTATUS_V2 0x3c +#define GPIO_PORTA_EOI_V2 0x40 + +struct sunway_gpio; + +#ifdef CONFIG_PM_SLEEP +/* Store GPIO context across system-wide suspend/resume transitions */ +struct sunway_context { + u32 data; + u32 dir; + u32 ext; + u32 int_en; + u32 int_mask; + u32 int_type; + u32 int_pol; + u32 int_deb; + u32 wake_en; +}; +#endif + +struct sunway_gpio_port { + struct gpio_chip gc; + bool is_registered; + struct sunway_gpio *gpio; +#ifdef CONFIG_PM_SLEEP + struct sunway_context *ctx; +#endif + unsigned int idx; +}; + +struct sunway_gpio { + struct device *dev; + void __iomem *regs; + struct sunway_gpio_port *ports; + unsigned int nr_ports; + struct irq_domain *domain; + unsigned int flags; + struct reset_control *rst; + struct clk *clk; +}; + +static inline u32 gpio_reg_v2_convert(unsigned int offset) +{ + switch (offset) { + case GPIO_INTMASK: + return GPIO_INTMASK_V2; + case GPIO_INTTYPE_LEVEL: + return GPIO_INTTYPE_LEVEL_V2; + case GPIO_INT_POLARITY: + return GPIO_INT_POLARITY_V2; + case GPIO_INTSTATUS: + return GPIO_INTSTATUS_V2; + case GPIO_PORTA_EOI: + return GPIO_PORTA_EOI_V2; + } + + return offset; +} + +static inline u32 gpio_reg_convert(struct sunway_gpio *gpio, unsigned int offset) +{ + if (gpio->flags & GPIO_REG_OFFSET_V2) + return gpio_reg_v2_convert(offset); + + return offset; +} + +static inline u32 sunway_read(struct sunway_gpio *gpio, unsigned int offset) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + return gc->read_reg(reg_base + gpio_reg_convert(gpio, offset)); +} + +static inline void sunway_write(struct sunway_gpio *gpio, unsigned int offset, + u32 val) +{ + struct gpio_chip *gc = &gpio->ports[0].gc; + void __iomem *reg_base = gpio->regs; + + gc->write_reg(reg_base + gpio_reg_convert(gpio, offset), val); +} + +static int sunway_gpio_to_irq(struct gpio_chip *gc, unsigned int offset) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + + return irq_find_mapping(gpio->domain, offset); +} + +static struct sunway_gpio_port *sunway_offs_to_port(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port; + int i; + + for (i = 0; i < gpio->nr_ports; i++) { + port = &gpio->ports[i]; + if (port->idx == offs / 32) + return port; + } + + return NULL; +} + +static void sunway_toggle_trigger(struct sunway_gpio *gpio, unsigned int offs) +{ + struct sunway_gpio_port *port = sunway_offs_to_port(gpio, offs); + struct gpio_chip *gc; + u32 pol; + int val; + + if (!port) + return; + gc = &port->gc; + + pol = sunway_read(gpio, GPIO_INT_POLARITY); + /* Just read the current value right out of the data register */ + val = gc->get(gc, offs % 32); + if (val) + pol &= ~BIT(offs); + else + pol |= BIT(offs); + + sunway_write(gpio, GPIO_INT_POLARITY, pol); +} + +static u32 sunway_do_irq(struct sunway_gpio *gpio) +{ + u32 irq_status = sunway_read(gpio, GPIO_INTSTATUS); + u32 ret = irq_status; + + while (irq_status) { + int hwirq = fls(irq_status) - 1; + int gpio_irq = irq_find_mapping(gpio->domain, hwirq); + + generic_handle_irq(gpio_irq); + irq_status &= ~BIT(hwirq); + + if ((irq_get_trigger_type(gpio_irq) & IRQ_TYPE_SENSE_MASK) + == IRQ_TYPE_EDGE_BOTH) + sunway_toggle_trigger(gpio, hwirq); + } + + return ret; +} + +static void sunway_irq_handler(struct irq_desc *desc) +{ + struct sunway_gpio *gpio = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + + sunway_do_irq(gpio); + + if (chip->irq_eoi) + chip->irq_eoi(irq_desc_get_irq_data(desc)); +} + +static void sunway_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val |= BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static void sunway_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + u32 val; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + val = sunway_read(gpio, GPIO_INTEN); + val &= ~BIT(d->hwirq); + sunway_write(gpio, GPIO_INTEN, val); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); +} + +static int sunway_irq_reqres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int ret; + + ret = gpiochip_lock_as_irq(gc, irqd_to_hwirq(d)); + if (ret) { + dev_err(gpio->dev, "unable to lock HW IRQ %lu for IRQ\n", + irqd_to_hwirq(d)); + return ret; + } + return 0; +} + +static void sunway_irq_relres(struct irq_data *d) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + + gpiochip_unlock_as_irq(gc, irqd_to_hwirq(d)); +} + +static int sunway_irq_set_type(struct irq_data *d, u32 type) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct gpio_chip *gc = &gpio->ports[0].gc; + int bit = d->hwirq; + unsigned long level, polarity, flags; + + if (type & ~(IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | + IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + return -EINVAL; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + level = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + polarity = sunway_read(gpio, GPIO_INT_POLARITY); + + switch (type) { + case IRQ_TYPE_EDGE_BOTH: + level |= BIT(bit); + sunway_toggle_trigger(gpio, bit); + break; + case IRQ_TYPE_EDGE_RISING: + level |= BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_EDGE_FALLING: + level |= BIT(bit); + polarity &= ~BIT(bit); + break; + case IRQ_TYPE_LEVEL_HIGH: + level &= ~BIT(bit); + polarity |= BIT(bit); + break; + case IRQ_TYPE_LEVEL_LOW: + level &= ~BIT(bit); + polarity &= ~BIT(bit); + break; + } + + irq_setup_alt_chip(d, type); + + sunway_write(gpio, GPIO_INTTYPE_LEVEL, level); + if (type != IRQ_TYPE_EDGE_BOTH) + sunway_write(gpio, GPIO_INT_POLARITY, polarity); + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_irq_set_wake(struct irq_data *d, unsigned int enable) +{ + struct irq_chip_generic *igc = irq_data_get_irq_chip_data(d); + struct sunway_gpio *gpio = igc->private; + struct sunway_context *ctx = gpio->ports[0].ctx; + + if (enable) + ctx->wake_en |= BIT(d->hwirq); + else + ctx->wake_en &= ~BIT(d->hwirq); + + return 0; +} +#endif + +static int sunway_gpio_set_debounce(struct gpio_chip *gc, + unsigned int offset, unsigned int debounce) +{ + struct sunway_gpio_port *port = gpiochip_get_data(gc); + struct sunway_gpio *gpio = port->gpio; + unsigned long flags, val_deb; + unsigned long mask = BIT(offset); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + + val_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + if (debounce) + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb | mask); + else + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, val_deb & ~mask); + + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} + +static int sunway_gpio_set_config(struct gpio_chip *gc, unsigned int offset, + unsigned long config) +{ + u32 debounce; + + if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE) + return -ENOTSUPP; + + debounce = pinconf_to_config_argument(config); + return sunway_gpio_set_debounce(gc, offset, debounce); +} + +static irqreturn_t sunway_irq_handler_mfd(int irq, void *dev_id) +{ + u32 worked; + struct sunway_gpio *gpio = dev_id; + + worked = sunway_do_irq(gpio); + + return worked ? IRQ_HANDLED : IRQ_NONE; +} + +static void sunway_configure_irqs(struct sunway_gpio *gpio, + struct sunway_gpio_port *port, + struct sunway_port_property *pp) +{ + struct gpio_chip *gc = &port->gc; + struct fwnode_handle *fwnode = pp->fwnode; + struct irq_chip_generic *irq_gc = NULL; + unsigned int hwirq, ngpio = gc->ngpio; + struct irq_chip_type *ct; + int err, i; + + gpio->domain = irq_domain_create_linear(fwnode, ngpio, + &irq_generic_chip_ops, gpio); + if (!gpio->domain) + return; + + err = irq_alloc_domain_generic_chips(gpio->domain, ngpio, 2, + "gpio-dwapb", handle_level_irq, + IRQ_NOREQUEST, 0, + IRQ_GC_INIT_NESTED_LOCK); + if (err) { + dev_info(gpio->dev, "irq_alloc_domain_generic_chips failed\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc = irq_get_domain_generic_chip(gpio->domain, 0); + if (!irq_gc) { + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + + irq_gc->reg_base = gpio->regs; + irq_gc->private = gpio; + + for (i = 0; i < 2; i++) { + ct = &irq_gc->chip_types[i]; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_set_type = sunway_irq_set_type; + ct->chip.irq_enable = sunway_irq_enable; + ct->chip.irq_disable = sunway_irq_disable; + ct->chip.irq_request_resources = sunway_irq_reqres; + ct->chip.irq_release_resources = sunway_irq_relres; +#ifdef CONFIG_PM_SLEEP + ct->chip.irq_set_wake = sunway_irq_set_wake; +#endif + ct->regs.ack = gpio_reg_convert(gpio, GPIO_PORTA_EOI); + ct->regs.mask = gpio_reg_convert(gpio, GPIO_INTMASK); + ct->type = IRQ_TYPE_LEVEL_MASK; + } + + irq_gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK; + irq_gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH; + irq_gc->chip_types[1].handler = handle_edge_irq; + + if (!pp->irq_shared) { + int i; + + for (i = 0; i < pp->ngpio; i++) { + if (pp->irq[i] >= 0) + irq_set_chained_handler_and_data(pp->irq[i], + sunway_irq_handler, gpio); + } + } else { + /* + * Request a shared IRQ since where MFD would have devices + * using the same irq pin + */ + err = devm_request_irq(gpio->dev, pp->irq[0], + sunway_irq_handler_mfd, + IRQF_SHARED, "gpio-dwapb-mfd", gpio); + if (err) { + dev_err(gpio->dev, "error requesting IRQ\n"); + irq_domain_remove(gpio->domain); + gpio->domain = NULL; + return; + } + } + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_create_mapping(gpio->domain, hwirq); + + port->gc.to_irq = sunway_gpio_to_irq; +} + +static void sunway_irq_teardown(struct sunway_gpio *gpio) +{ + struct sunway_gpio_port *port = &gpio->ports[0]; + struct gpio_chip *gc = &port->gc; + unsigned int ngpio = gc->ngpio; + irq_hw_number_t hwirq; + + if (!gpio->domain) + return; + + for (hwirq = 0 ; hwirq < ngpio ; hwirq++) + irq_dispose_mapping(irq_find_mapping(gpio->domain, hwirq)); + + irq_domain_remove(gpio->domain); + gpio->domain = NULL; +} + +static int sunway_gpio_add_port(struct sunway_gpio *gpio, + struct sunway_port_property *pp, + unsigned int offs) +{ + struct sunway_gpio_port *port; + void __iomem *dat, *set, *dirout; + int err; + + port = &gpio->ports[offs]; + port->gpio = gpio; + port->idx = pp->idx; + +#ifdef CONFIG_PM_SLEEP + port->ctx = devm_kzalloc(gpio->dev, sizeof(*port->ctx), GFP_KERNEL); + if (!port->ctx) + return -ENOMEM; +#endif + + dat = gpio->regs + GPIO_EXT_PORTA + (pp->idx * GPIO_EXT_PORT_STRIDE); + set = gpio->regs + GPIO_SWPORTA_DR + (pp->idx * GPIO_SWPORT_DR_STRIDE); + dirout = gpio->regs + GPIO_SWPORTA_DDR + + (pp->idx * GPIO_SWPORT_DDR_STRIDE); + + /* This registers 32 GPIO lines per port */ + err = bgpio_init(&port->gc, gpio->dev, 4, dat, set, NULL, dirout, + NULL, 0); + if (err) { + dev_err(gpio->dev, "failed to init gpio chip for port%d\n", + port->idx); + return err; + } + +#ifdef CONFIG_OF_GPIO + port->gc.of_node = to_of_node(pp->fwnode); +#endif + port->gc.ngpio = pp->ngpio; + port->gc.base = pp->gpio_base; + + /* Only port A support debounce */ + if (pp->idx == 0) + port->gc.set_config = sunway_gpio_set_config; + + if (pp->has_irq) + sunway_configure_irqs(gpio, port, pp); + + err = gpiochip_add_data(&port->gc, port); + if (err) + dev_err(gpio->dev, "failed to register gpiochip for port%d\n", + port->idx); + else + port->is_registered = true; + + /* Add GPIO-signaled ACPI event support */ + if (pp->has_irq) + acpi_gpiochip_request_interrupts(&port->gc); + + return err; +} + +static void sunway_gpio_unregister(struct sunway_gpio *gpio) +{ + unsigned int m; + + for (m = 0; m < gpio->nr_ports; ++m) + if (gpio->ports[m].is_registered) + gpiochip_remove(&gpio->ports[m].gc); +} + +static struct sunway_platform_data * +sunway_gpio_get_pdata(struct device *dev) +{ + struct fwnode_handle *fwnode; + struct sunway_platform_data *pdata; + struct sunway_port_property *pp; + int nports; + int i, j; + + nports = device_get_child_node_count(dev); + if (nports == 0) + return ERR_PTR(-ENODEV); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); + + pdata->properties = devm_kcalloc(dev, nports, sizeof(*pp), GFP_KERNEL); + if (!pdata->properties) + return ERR_PTR(-ENOMEM); + + pdata->nports = nports; + + i = 0; + device_for_each_child_node(dev, fwnode) { + struct device_node *np = NULL; + + pp = &pdata->properties[i++]; + pp->fwnode = fwnode; + + if (fwnode_property_read_u32(fwnode, "reg", &pp->idx) || + pp->idx >= DWAPB_MAX_PORTS) { + dev_err(dev, + "missing/invalid port index for port%d\n", i); + fwnode_handle_put(fwnode); + return ERR_PTR(-EINVAL); + } + + if (fwnode_property_read_u32(fwnode, "snps,nr-gpios", + &pp->ngpio)) { + dev_info(dev, + "failed to get number of gpios for port%d\n", + i); + pp->ngpio = 32; + } + + pp->irq_shared = false; + pp->gpio_base = -1; + + /* + * Only port A can provide interrupts in all configurations of + * the IP. + */ + if (pp->idx != 0) + continue; + + if (dev->of_node && fwnode_property_read_bool(fwnode, + "interrupt-controller")) { + np = to_of_node(fwnode); + } + + for (j = 0; j < pp->ngpio; j++) { + pp->irq[j] = -ENXIO; + + if (np) + pp->irq[j] = of_irq_get(np, j); + else if (has_acpi_companion(dev)) + pp->irq[j] = platform_get_irq(to_platform_device(dev), j); + + if (pp->irq[j] >= 0) + pp->has_irq = true; + } + + if (!pp->has_irq) + dev_warn(dev, "no irq for port%d\n", pp->idx); + } + + return pdata; +} + +static const struct of_device_id sunway_of_match[] = { + { .compatible = "snps,sw-gpio", .data = (void *)0}, + { .compatible = "apm,xgene-gpio-v2", .data = (void *)GPIO_REG_OFFSET_V2}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway_of_match); + +static const struct acpi_device_id sunway_acpi_match[] = { + {"HISI0181", 0}, + {"APMC0D07", 0}, + {"APMC0D81", GPIO_REG_OFFSET_V2}, + { } +}; +MODULE_DEVICE_TABLE(acpi, sunway_acpi_match); + +static int sunway_gpio_probe(struct platform_device *pdev) +{ + unsigned int i; + struct resource *res; + struct sunway_gpio *gpio; + int err; + struct device *dev = &pdev->dev; + struct sunway_platform_data *pdata = dev_get_platdata(dev); + + if (!pdata) { + pdata = sunway_gpio_get_pdata(dev); + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + } + + if (!pdata->nports) + return -ENODEV; + + gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL); + if (!gpio) + return -ENOMEM; + + gpio->dev = &pdev->dev; + gpio->nr_ports = pdata->nports; + + gpio->rst = devm_reset_control_get_optional_shared(dev, NULL); + if (IS_ERR(gpio->rst)) + return PTR_ERR(gpio->rst); + + reset_control_deassert(gpio->rst); + + gpio->ports = devm_kcalloc(&pdev->dev, gpio->nr_ports, + sizeof(*gpio->ports), GFP_KERNEL); + if (!gpio->ports) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + gpio->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(gpio->regs)) + return PTR_ERR(gpio->regs); + + /* Optional bus clock */ + gpio->clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(gpio->clk)) { + err = clk_prepare_enable(gpio->clk); + if (err) { + dev_info(&pdev->dev, "Cannot enable clock\n"); + return err; + } + } + + gpio->flags = 0; + if (dev->of_node) { + gpio->flags = (uintptr_t)of_device_get_match_data(dev); + } else if (has_acpi_companion(dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(sunway_acpi_match, dev); + if (acpi_id) { + if (acpi_id->driver_data) + gpio->flags = acpi_id->driver_data; + } + } + + for (i = 0; i < gpio->nr_ports; i++) { + err = sunway_gpio_add_port(gpio, &pdata->properties[i], i); + if (err) + goto out_unregister; + } + platform_set_drvdata(pdev, gpio); + + return 0; + +out_unregister: + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + clk_disable_unprepare(gpio->clk); + + return err; +} + +static int sunway_gpio_remove(struct platform_device *pdev) +{ + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + + sunway_gpio_unregister(gpio); + sunway_irq_teardown(gpio); + reset_control_assert(gpio->rst); + clk_disable_unprepare(gpio->clk); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway_gpio_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + ctx->dir = sunway_read(gpio, offset); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + ctx->data = sunway_read(gpio, offset); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + ctx->ext = sunway_read(gpio, offset); + + /* Only port A can provide interrupts */ + if (idx == 0) { + ctx->int_mask = sunway_read(gpio, GPIO_INTMASK); + ctx->int_en = sunway_read(gpio, GPIO_INTEN); + ctx->int_pol = sunway_read(gpio, GPIO_INT_POLARITY); + ctx->int_type = sunway_read(gpio, GPIO_INTTYPE_LEVEL); + ctx->int_deb = sunway_read(gpio, GPIO_PORTA_DEBOUNCE); + + /* Mask out interrupts */ + sunway_write(gpio, GPIO_INTMASK, + 0xffffffff & ~ctx->wake_en); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + clk_disable_unprepare(gpio->clk); + + return 0; +} + +static int sunway_gpio_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sunway_gpio *gpio = platform_get_drvdata(pdev); + struct gpio_chip *gc = &gpio->ports[0].gc; + unsigned long flags; + int i; + + if (!IS_ERR(gpio->clk)) + clk_prepare_enable(gpio->clk); + + spin_lock_irqsave(&gc->bgpio_lock, flags); + for (i = 0; i < gpio->nr_ports; i++) { + unsigned int offset; + unsigned int idx = gpio->ports[i].idx; + struct sunway_context *ctx = gpio->ports[i].ctx; + + BUG_ON(!ctx); + + offset = GPIO_SWPORTA_DR + idx * GPIO_SWPORT_DR_STRIDE; + sunway_write(gpio, offset, ctx->data); + + offset = GPIO_SWPORTA_DDR + idx * GPIO_SWPORT_DDR_STRIDE; + sunway_write(gpio, offset, ctx->dir); + + offset = GPIO_EXT_PORTA + idx * GPIO_EXT_PORT_STRIDE; + sunway_write(gpio, offset, ctx->ext); + + /* Only port A can provide interrupts */ + if (idx == 0) { + sunway_write(gpio, GPIO_INTTYPE_LEVEL, ctx->int_type); + sunway_write(gpio, GPIO_INT_POLARITY, ctx->int_pol); + sunway_write(gpio, GPIO_PORTA_DEBOUNCE, ctx->int_deb); + sunway_write(gpio, GPIO_INTEN, ctx->int_en); + sunway_write(gpio, GPIO_INTMASK, ctx->int_mask); + + /* Clear out spurious interrupts */ + sunway_write(gpio, GPIO_PORTA_EOI, 0xffffffff); + } + } + spin_unlock_irqrestore(&gc->bgpio_lock, flags); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(sunway_gpio_pm_ops, sunway_gpio_suspend, + sunway_gpio_resume); + +static struct platform_driver sunway_gpio_driver = { + .driver = { + .name = "gpio-sunway", + .pm = &sunway_gpio_pm_ops, + .of_match_table = of_match_ptr(sunway_of_match), + .acpi_match_table = ACPI_PTR(sunway_acpi_match), + }, + .probe = sunway_gpio_probe, + .remove = sunway_gpio_remove, +}; + +module_platform_driver(sunway_gpio_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Jamie Iles"); +MODULE_DESCRIPTION("Sunway GPIO driver"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 3caa020391c75283c9145d18fb3be151496620af..5c1971f17cd94cecc7fac9d064c61d8dd5ff56f9 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -387,6 +387,8 @@ source "drivers/gpu/drm/solomon/Kconfig" source "drivers/gpu/drm/sprd/Kconfig" +source "drivers/gpu/drm/phytium/Kconfig" + config DRM_HYPERV tristate "DRM Support for Hyper-V synthetic video device" depends on DRM && PCI && MMU && HYPERV @@ -435,3 +437,9 @@ config DRM_LIB_RANDOM config DRM_PRIVACY_SCREEN bool default n + +config HYDCU_FIXUP_HEADER + bool "Enable fixup header support for HYDCU" + help + Choose this option if you want to use pci passthrough with HYDCU + HYDCU cannot support pci reset, so enable this module to disable pci reset diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 215e78e791250b8ad0d0a273e627b84296ecc4d9..017ff5a6ebe2dee6fac3f1227bc1ae03405c2187 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -198,3 +198,5 @@ obj-$(CONFIG_DRM_HYPERV) += hyperv/ obj-y += solomon/ obj-$(CONFIG_DRM_SPRD) += sprd/ obj-$(CONFIG_DRM_LOONGSON) += loongson/ +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hygon/hydcu-fixup-header/ +obj-$(CONFIG_DRM_PHYTIUM) += phytium/ diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..2dc816df42398c8988a3599f76c323055ab1839a --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_HYDCU_FIXUP_HEADER) += hydcu_pci_fixup_header.o \ No newline at end of file diff --git a/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c new file mode 100644 index 0000000000000000000000000000000000000000..962f0d74f703d2530b27c3d2ae94d29bafbc4435 --- /dev/null +++ b/drivers/gpu/drm/hygon/hydcu-fixup-header/hydcu_pci_fixup_header.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * HYGON DCU fixup driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: Baoshun Fang + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +#define PCI_VENDOR_ID_HYGON 0x1d94 + +#define DEVICE_Z100SM 0x51b7 +#define DEVICE_C878182 0x52b7 +#define DEVICE_C878186 0x53b7 +#define DEVICE_Z100 0x54b7 +#define DEVICE_Z100L 0x55b7 +#define DEVICE_C878181 0x56b7 +#define DEVICE_C878185 0x57b7 +#define DEVICE_C878188 0x58b7 +#define DEVICE_C878174 0x59b7 +#define DEVICE_KONGMING 0x61b7 +#define DEVICE_KONGMING_E 0x6210 + +#define DRIVER_VERSION "0.2" +#define DRIVER_AUTHOR "huangjun " +#define DRIVER_DESC "fix dcu header" + +static int hydcu_pci_fixup_header_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + dev_info(&pdev->dev, "add flags NO_BUS_RESET\n"); + pdev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET; + pdev->pm_cap = 0; + dev_info(&pdev->dev, "will abort probe\n"); + + return -EINVAL; +} + +static void hydcu_pci_fixup_header_remove(struct pci_dev *pdev) +{ +} + +static const struct pci_device_id hydcu_pci_fixup_header_ids[] = { + { PCI_VDEVICE(HYGON, DEVICE_Z100SM), }, + { PCI_VDEVICE(HYGON, DEVICE_C878182), }, + { PCI_VDEVICE(HYGON, DEVICE_C878186), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100), }, + { PCI_VDEVICE(HYGON, DEVICE_Z100L), }, + { PCI_VDEVICE(HYGON, DEVICE_C878181), }, + { PCI_VDEVICE(HYGON, DEVICE_C878185), }, + { PCI_VDEVICE(HYGON, DEVICE_C878188), }, + { PCI_VDEVICE(HYGON, DEVICE_C878174), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING), }, + { PCI_VDEVICE(HYGON, DEVICE_KONGMING_E), }, + {}, +}; + +static struct pci_driver hydcu_pci_fixup_header_driver = { + .name = "hydcu-fixup-header", + .id_table = hydcu_pci_fixup_header_ids, + .probe = hydcu_pci_fixup_header_probe, + .remove = hydcu_pci_fixup_header_remove, +}; + +static int __init hydcu_pci_fixup_header_init(void) +{ + /* Register and scan for devices */ + return pci_register_driver(&hydcu_pci_fixup_header_driver); +} + +static void __exit hydcu_pci_fixup_header_cleanup(void) +{ + pci_unregister_driver(&hydcu_pci_fixup_header_driver); +} + +module_init(hydcu_pci_fixup_header_init); +module_exit(hydcu_pci_fixup_header_cleanup); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); diff --git a/drivers/gpu/drm/loongson/loongson_module.c b/drivers/gpu/drm/loongson/loongson_module.c index d2a51bd395f6c9153b155515802bee7af025a885..37b7d97c4e70153bce54d2ce639a65ba0113fc4f 100644 --- a/drivers/gpu/drm/loongson/loongson_module.c +++ b/drivers/gpu/drm/loongson/loongson_module.c @@ -19,6 +19,21 @@ module_param_named(vblank, loongson_vblank, int, 0400); static int __init loongson_module_init(void) { + struct pci_dev *pdev = NULL; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev))) { + /* + * Multiple video card workaround + * + * This integrated video card will always be selected as + * default boot device by vgaarb subsystem. + */ + if (pdev->vendor != PCI_VENDOR_ID_LOONGSON || pdev->device == 0x1a05) { + pr_info("Discrete graphic card detected, abort\n"); + return 0; + } + } + if (!loongson_modeset || video_firmware_drivers_only()) return -ENODEV; diff --git a/drivers/gpu/drm/phytium/Kconfig b/drivers/gpu/drm/phytium/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..5f540962129a5a140ed576c617f406468f46339b --- /dev/null +++ b/drivers/gpu/drm/phytium/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only + +config DRM_PHYTIUM + tristate "DRM Support for Phytium Graphics Card" + depends on DRM && ARCH_PHYTIUM + select DRM_KMS_HELPER + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_DISPLAY_HDCP_HELPER + help + Choose this option if you have a phytium graphics card. + This driver provides kernel mode setting and buffer management to userspace. diff --git a/drivers/gpu/drm/phytium/Makefile b/drivers/gpu/drm/phytium/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1f68cdcd80dac4071c5d3f1578ea7e0d85fbccfd --- /dev/null +++ b/drivers/gpu/drm/phytium/Makefile @@ -0,0 +1,20 @@ +# SPDX-License-Identifier: GPL-2.0-only + +phytium-dc-drm-y := phytium_display_drv.o \ + phytium_plane.o \ + phytium_crtc.o \ + phytium_dp.o \ + phytium_fb.o \ + phytium_gem.o \ + phytium_fbdev.o \ + phytium_debugfs.o \ + px210_dp.o \ + phytium_panel.o \ + px210_dc.o \ + phytium_pci.o \ + pe220x_dp.o \ + pe220x_dc.o \ + phytium_platform.o + +obj-$(CONFIG_DRM_PHYTIUM) += phytium-dc-drm.o +CFLAGS_REMOVE_phytium_crtc.o += -mgeneral-regs-only diff --git a/drivers/gpu/drm/phytium/pe220x_dc.c b/drivers/gpu/drm/phytium/pe220x_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..8f74199f9a477422246896a885c20b4da5f3c6ec --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +void pe220x_dc_hw_disable(struct drm_crtc *crtc); + +static const unsigned int pe220x_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + DRM_FORMAT_NV16, + DRM_FORMAT_NV12, + DRM_FORMAT_NV21, +}; + +static uint64_t pe220x_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t pe220x_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int pe220x_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PE220X_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + 0, PE220X_DC_CMD_REGISTER(phys_pipe)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(phys_pipe), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void pe220x_dc_hw_reset(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* disable pixel clock for bmc mode */ + if (phys_pipe == 0) + pe220x_dc_hw_disable(crtc); + + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + config &= (~(DC0_CORE_RESET | DC1_CORE_RESET | AXI_RESET | AHB_RESET)); + + if (phys_pipe == 0) { + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC0_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } else { + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET | AHB_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET | AXI_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config | DC1_CORE_RESET, + 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + } +} + +void pe220x_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + /* clear framebuffer */ + phytium_writel_reg(priv, CLEAR_VALUE_BLACK, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + /* disable cursor */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + config = ((config & (~CURSOR_FORMAT_MASK)) | CURSOR_FORMAT_DISABLED); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + mdelay(20); + + /* reset pix clock */ + pe220x_dc_hw_config_pix_clock(crtc, 0); + + if (phys_pipe == 0) { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC0_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC0_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } else { + config = phytium_readl_reg(priv, 0, PE220X_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | DC1_CORE_RESET, 0, PE220X_DC_CLOCK_CONTROL); + udelay(20); + phytium_writel_reg(priv, config & (~DC1_CORE_RESET), 0, PE220X_DC_CLOCK_CONTROL); + } + udelay(20); +} + +int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + if (mode_cmd->modifier[count] != DRM_FORMAT_MOD_LINEAR) { + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[count]); + ret = -EINVAL; + } + + return ret; +} + +void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_primary_formats_modifiers; + *formats = pe220x_primary_formats; + *format_count = ARRAY_SIZE(pe220x_primary_formats); +} + +void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = pe220x_cursor_formats_modifiers; + *formats = pe220x_cursor_formats; + *format_count = ARRAY_SIZE(pe220x_cursor_formats); +} + +void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[1] >> U_PREFIX_SHIFT) & U_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS); + + phytium_writel_reg(priv, (phytium_plane->iova[2] >> V_PREFIX_SHIFT) & V_PREFIX_MASK, + priv->dc_reg_base[phys_pipe], PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS); +} + +void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + + config = ((iova >> CURSOR_PREFIX_SHIFT) & CURSOR_PREFIX_MASK); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PE220X_DC_CURSOR_HI_ADDRESS); +} diff --git a/drivers/gpu/drm/phytium/pe220x_dc.h b/drivers/gpu/drm/phytium/pe220x_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..f88a054cf0d0e82343132e1cc0f60f45c4347097 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dc.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display controller DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DC_H__ +#define __PE220X_DC_H__ + +#define PE220X_DC_PIX_CLOCK_MAX (594000) +#define PE220X_DC_HDISPLAY_MAX 3840 +#define PE220X_DC_VDISPLAY_MAX 2160 +#define PE220X_DC_ADDRESS_MASK 0x7f + +extern void pe220x_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void pe220x_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void pe220x_dc_hw_disable(struct drm_crtc *crtc); +extern int pe220x_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void pe220x_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void pe220x_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +extern void pe220x_dc_hw_update_cursor_hi_addr(struct drm_plane *plane, uint64_t iova); +void pe220x_dc_hw_reset(struct drm_crtc *crtc); +#endif /* __PE220X_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_dp.c b/drivers/gpu/drm/phytium/pe220x_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..54a6e8ac454b22e0cad76699a4ae59a6202f6de2 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.c @@ -0,0 +1,514 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "pe220x_reg.h" +#include "phytium_dp.h" +#include "pe220x_dp.h" + +static uint8_t pe220x_dp_source_lane_count[2] = {1, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, /* CP_PADJ */ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, /* CP_IADJ */ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, /* FILT_PADJ */ + {0x0061, 0x006C, 0x006C, 0x0051}, /* INTDIV */ + {0x3333, 0x0000, 0x0000, 0x0000}, /* FRACDIVL */ + {0x0000, 0x0000, 0x0000, 0x0000}, /* FRACDIVH */ + {0x0042, 0x0048, 0x0048, 0x0036}, /* HIGH_THR */ + {0x0002, 0x0002, 0x0002, 0x0002}, /* PDIAG_CTRL */ + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, /* VCOCAL_PLLCNT_START */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PEFCNT */ + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, /* LOCK_PLLCNT_START */ + {0x0005, 0x0005, 0x0005, 0x0005}, /* LOCK_PLLCNT_THR */ +}; + +/* [link_rate][swing][emphasis] */ +static int mgnfs_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x001f, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +/* [link_rate][swing][emphasis] */ +static int cpost_val[4][4][4] = { + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 5.4Gbps */ + { + {0x0005, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int pe220x_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, uint32_t link_rate) +{ + int port = phytium_dp->port%2; + int i = 0, data, tmp, tmp1, index = 0, mask = 0; + int timeout = 500, ret = 0; + + /* set pma powerdown */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A3_POWERDOWN3 << (i * A3_POWERDOWN3_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* lane pll disable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + mask |= (((1<source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* set pma power active */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("dp(%d) phy pll lock failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_set_phy_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set) +{ + int port = phytium_dp->port % 3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + default: + voltage_swing = 0; + break; + } + + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + default: + pre_emphasis = 0; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), LOCK); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_TXCC_CTRL(port), TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DRV(port), TX_DRV); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_MGNFS(port), + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_CPOST(port), + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL0_TX_DIAG_ACYA(port), UNLOCK); +} + +static int pe220x_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + phytium_phy_writel(phytium_dp, PE220X_PHY_APB_RESET(port), APB_RESET); + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET); + + /* config lane to dp mode */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (LANE_BIT << (i * LANE_BIT_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_MODE(port), data); + + /* pll clock enable */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (PLL_EN << (i * PLL_EN_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_EN(port), data); + + /* config input 20 bit */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (BIT_20 << (i * BIT_20_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA_WIDTH(port), data); + + /* config lane active power state */ + data = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) + data |= (A0_ACTIVE << (i * A0_ACTIVE_SHIFT)); + phytium_phy_writel(phytium_dp, PE220X_PHY_PMA0_POWER(port), data); + + /* link reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_LINK_RESET(port), LINK_RESET); + + phytium_phy_writel(phytium_dp, PE220X_PHY_SGMII_DPSEL_INIT(port), DP_SEL); + + /* config single link */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PLL_CFG(port), SINGLE_LINK); + + /* pipe reset */ + phytium_phy_writel(phytium_dp, PE220X_PHY_PIPE_RESET(port), RESET_DEASSERT); + + mask = PLL0_LOCK_DONE; + do { + mdelay(1); + timeout--; + tmp = phytium_phy_readl(phytium_dp, PE220X_PHY_PMA_CONTROL2(port)); + } while ((!(tmp & mask)) && timeout); + + if (timeout == 0) { + DRM_ERROR("reset dp(%d) phy failed\n", port); + ret = -1; + } + udelay(1); + + return ret; +} + +static void pe220x_dp_hw_poweron_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void pe220x_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void pe220x_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void pe220x_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t pe220x_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int pe220x_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0; + int ret = 0; + + if (level > PE220X_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, 0, PE220X_DC_CMD_REGISTER(port)); + ret = phytium_wait_cmd_done(priv, PE220X_DC_CMD_REGISTER(port), + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); +out: + return ret; +} + +bool pe220x_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + return false; +} + +int pe220x_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, DP_RESET, group_offset, PE220X_DP_CONTROLLER_RESET); + udelay(500); + phytium_writel_reg(priv, AUX_CLK_DIVIDER_100, group_offset, PHYTIUM_DP_AUX_CLK_DIVIDER); + phytium_writel_reg(priv, SUPPORT_EDP_1_4, group_offset, PHYTIUM_EDP_CRC_ENABLE); + + return 0; +} + +uint8_t pe220x_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return pe220x_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func pe220x_dp_funcs = { + .dp_hw_get_source_lane_count = pe220x_dp_hw_get_source_lane_count, + .dp_hw_reset = pe220x_dp_hw_reset, + .dp_hw_spread_is_enable = pe220x_dp_hw_spread_is_enable, + .dp_hw_set_backlight = pe220x_dp_hw_set_backlight, + .dp_hw_get_backlight = pe220x_dp_hw_get_backlight, + .dp_hw_disable_backlight = pe220x_dp_hw_disable_backlight, + .dp_hw_enable_backlight = pe220x_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = pe220x_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = pe220x_dp_hw_poweron_panel, + .dp_hw_init_phy = pe220x_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = pe220x_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = pe220x_dp_hw_set_phy_lane_and_rate, +}; + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &pe220x_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/pe220x_dp.h b/drivers/gpu/drm/phytium/pe220x_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..6b763d9966310115dd8fdeed8895c0b9b64e9bd7 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_dp.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium display port DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_DP_H__ +#define __PE220X_DP_H__ + +#define PE220X_DP_BACKLIGHT_MAX 100 + +void pe220x_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PE220X_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/pe220x_reg.h b/drivers/gpu/drm/phytium/pe220x_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..88fc9c7383a58da2d059187bc7b0067a3f4ef378 --- /dev/null +++ b/drivers/gpu/drm/phytium/pe220x_reg.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Phytium Pe220x display engine register + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PE220X_REG_H__ +#define __PE220X_REG_H__ + +#include "phytium_reg.h" + +/* dc register */ +#define PE220X_DC_CLOCK_CONTROL 0x0000 +#define DC1_CORE_RESET (1<<18) +#define DC0_CORE_RESET (1<<17) +#define AXI_RESET (1<<16) +#define AHB_RESET (1<<12) + +#define PE220X_DC_CMD_REGISTER(pipe) (PE220X_DC_BASE(0) + 0x00F0 + 0x4*(pipe)) +#define FLAG_REPLY (1<<31) +#define FLAG_REQUEST (1<<30) +#define CMD_PIXEL_CLOCK (0x0 << 28) +#define CMD_BACKLIGHT (0x1 << 28) +#define CMD_DC_DP_RESET (0x3 << 28) +#define BACKLIGHT_SHIFT 21 +#define BACKLIGHT_MASK 0x7f +#define BACKLIGHT_MAX 100 +#define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) +#define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) +#define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) +#define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) +#define PIXEL_CLOCK_MASK (0x1fffff) + +#define PE220X_DC_FRAMEBUFFER_Y_HI_ADDRESS 0x1404 +#define PREFIX_MASK 0xff +#define PREFIX_SHIFT 32 + +#define PE220X_DC_CURSOR_HI_ADDRESS 0x1490 +#define CURSOR_PREFIX_MASK 0xff +#define CURSOR_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_U_HI_ADDRESS 0x1534 +#define U_PREFIX_MASK 0xff +#define U_PREFIX_SHIFT 32 + +#define PE220X_DC_FRAMEBUFFER_V_HI_ADDRESS 0x153c +#define V_PREFIX_MASK 0xff +#define V_PREFIX_SHIFT 32 + +/* dp register */ +#define PE220X_DP_CONTROLLER_RESET 0x0850 +#define DP_RESET 0x1 + +/* address transform register */ +#define PE220X_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define SRC_ADDR_OFFSET 22 +#define SRC_ADDR_MASK 0xffffffffff + +#define PE220X_DC_ADDRESS_TRANSFORM_SIZE 0x4 +#define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) +#define SIZE_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DST_ADDR 0x8 +#define DST_ADDR_OFFSET 22 + +#define PE220X_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 +#define DC_DP_RESET_STATUS(pipe) (1 << pipe) +#define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) + +#define PE220X_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c +#define BACKLIGHT_VALUE_MASK (0x7f) +#define BACKLIGHT_VALUE_SHIFT 16 + +/* phy register start */ +#define PE220X_PHY_BASE(pipe) (0x100000*pipe) + +#define PE220X_PHY_PIPE_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40254) +#define RESET 0x0 +#define RESET_DEASSERT 0x1 + +#define PE220X_PHY_MODE(pipe) (PE220X_PHY_BASE(pipe) + 0x40034) +#define LANE_BIT (0x3) +#define LANE_BIT_SHIFT 0x2 + +#define PE220X_PHY_LINK_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x40044) +#define LANE_MASTER 0x1 +#define LANE_MASTER_SHIFT 1 + +#define PE220X_PHY_PLL_EN(pipe) (PE220X_PHY_BASE(pipe) + 0x40214) +#define PLL_EN 0x1 +#define PLL_EN_SHIFT 1 + +#define PE220X_PHY_PMA_WIDTH(pipe) (PE220X_PHY_BASE(pipe) + 0x4021c) +#define BIT_20 0x5 +#define BIT_20_SHIFT 4 + +#define PE220X_PHY_PLL_SOURCE_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x4004C) + +#define PE220X_PHY_PMA0_POWER(pipe) (PE220X_PHY_BASE(pipe) + 0x402bc) +#define A0_ACTIVE 0x1 +#define A0_ACTIVE_SHIFT 8 +#define A3_POWERDOWN3 0x8 +#define A3_POWERDOWN3_SHIFT 8 + +#define PE220X_PHY_LINK_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40258) +#define LINK_RESET 0x1 +#define LINK_RESET_MASK 0x1 +#define LINTK_RESET_SHIFT 0x1 + +#define PE220X_PHY_SGMII_DPSEL_INIT(pipe) (PE220X_PHY_BASE(pipe) + 0x40260) +#define DP_SEL 0x1 + +#define PE220X_PHY_APB_RESET(pipe) (PE220X_PHY_BASE(pipe) + 0x40250) +#define APB_RESET 0x1 + +/* phy origin register */ +#define PE220X_PHY_PLL_CFG(pipe) (PE220X_PHY_BASE(pipe) + 0x30038) +#define SINGLE_LINK 0x0 + +#define PE220X_PHY_PMA_CONTROL(pipe) (PE220X_PHY_BASE(pipe) + 0x3800c) +#define CONTROL_ENABLE 0x1 +#define CONTROL_ENABLE_MASK 0x1 +#define CONTROL_ENABLE_SHIFT 0x1 + +#define PE220X_PHY_PMA_CONTROL2(pipe) (PE220X_PHY_BASE(pipe) + 0x38004) +#define PLL0_LOCK_DONE (0x1 << 6) + +#define PE220X_PHY_PLL0_CLK_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0X684) +#define PLL_LINK_RATE_162000 0xf01 +#define PLL_LINK_RATE_270000 0x701 +#define PLL_LINK_RATE_540000 0x301 +#define PLL_LINK_RATE_810000 0x200 + +#define PE220X_PHY_HSCLK0_SEL(pipe) (PE220X_PHY_BASE(pipe) + 0x18398) +#define HSCLK_LINK_0 0x0 +#define HSCLK_LINK_1 0x1 + +#define PE220X_PHY_HSCLK0_DIV(pipe) (PE220X_PHY_BASE(pipe) + 0x1839c) +#define HSCLK_LINK_RATE_162000 0x2 +#define HSCLK_LINK_RATE_270000 0x1 +#define HSCLK_LINK_RATE_540000 0x0 +#define HSCLK_LINK_RATE_810000 0x0 + +#define PE220X_PHY_PLLDRC0_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18394) +#define PLLDRC_LINK0 0x1 +#define PLLDRC_LINK1 0x9 + +#define PE220X_PHY_PLL0_DSM_M0(pipe) (PE220X_PHY_BASE(pipe) + 0x250) +#define PLL0_DSM_M0 0x4 +#define PE220X_PHY_PLL0_VCOCAL_START(pipe) (PE220X_PHY_BASE(pipe) + 0x218) +#define PLL0_VCOCAL_START 0xc5e +#define PE220X_PHY_PLL0_VCOCAL_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x208) +#define PLL0_VCOCAL_CTRL 0x3 + +#define PE220X_PHY_PLL0_CP_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x690) +#define PE220X_PHY_PLL0_CP_IADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x694) +#define PE220X_PHY_PLL0_CP_FILT_PADJ(pipe) (PE220X_PHY_BASE(pipe) + 0x698) +#define PE220X_PHY_PLL0_INTDIV(pipe) (PE220X_PHY_BASE(pipe) + 0x240) +#define PE220X_PHY_PLL0_FRACDIVL(pipe) (PE220X_PHY_BASE(pipe) + 0x244) +#define PE220X_PHY_PLL0_FRACDIVH(pipe) (PE220X_PHY_BASE(pipe) + 0x248) +#define PE220X_PHY_PLL0_HIGH_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x24c) +#define PE220X_PHY_PLL0_PDIAG_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x680) +#define PE220X_PHY_PLL0_VCOCAL_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x220) +#define PE220X_PHY_PLL0_LOCK_PEFCNT(pipe) (PE220X_PHY_BASE(pipe) + 0x270) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_START(pipe) (PE220X_PHY_BASE(pipe) + 0x278) +#define PE220X_PHY_PLL0_LOCK_PLLCNT_THR(pipe) (PE220X_PHY_BASE(pipe) + 0x27c) + +#define PE220X_PHY_PLL0_TX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x18400) +#define PLL0_TX_PSC_A0 0xfb +#define PE220X_PHY_PLL0_TX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x18408) +#define PLL0_TX_PSC_A2 0x4aa +#define PE220X_PHY_PLL0_TX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x1840c) +#define PLL0_TX_PSC_A3 0x4aa +#define PE220X_PHY_PLL0_RX_PSC_A0(pipe) (PE220X_PHY_BASE(pipe) + 0x28000) +#define PLL0_RX_PSC_A0 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A2(pipe) (PE220X_PHY_BASE(pipe) + 0x28008) +#define PLL0_RX_PSC_A2 0x0 +#define PE220X_PHY_PLL0_RX_PSC_A3(pipe) (PE220X_PHY_BASE(pipe) + 0x2800C) +#define PLL0_RX_PSC_A3 0x0 +#define PE220X_PHY_PLL0_RX_PSC_CAL(pipe) (PE220X_PHY_BASE(pipe) + 0x28018) +#define PLL0_RX_PSC_CAL 0x0 + +#define PE220X_PHY_PLL0_XCVR_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x183a8) +#define PLL0_XCVR_CTRL 0xf + +#define PE220X_PHY_PLL0_RX_GCSM1_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28420) +#define PLL0_RX_GCSM1_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_GCSM2_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28440) +#define PLL0_RX_GCSM2_CTRL 0x0 +#define PE220X_PHY_PLL0_RX_PERGCSM_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x28460) +#define PLL0_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PE220X_PHY_PLL0_TX_DIAG_ACYA(pipe) (PE220X_PHY_BASE(pipe) + 0x1879c) +#define LOCK 1 +#define UNLOCK 0 + +#define PE220X_PHY_PLL0_TX_TXCC_CTRL(pipe) (PE220X_PHY_BASE(pipe) + 0x18100) +#define TX_TXCC_CTRL 0x8a4 + +#define PE220X_PHY_PLL0_TX_DRV(pipe) (PE220X_PHY_BASE(pipe) + 0x18318) +#define TX_DRV 0x3 + +#define PE220X_PHY_PLL0_TX_MGNFS(pipe) (PE220X_PHY_BASE(pipe) + 0x18140) + +#define PE220X_PHY_PLL0_TX_CPOST(pipe) (PE220X_PHY_BASE(pipe) + 0x18130) + +#endif /* __PE220X_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_crtc.c b/drivers/gpu/drm/phytium/phytium_crtc.c new file mode 100644 index 0000000000000000000000000000000000000000..628357837da6e34ca839e859eb85b75a7f705397 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.c @@ -0,0 +1,484 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_dp.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define MAXKERNELSIZE 9 +#define SUBPIXELINDEXBITS 5 +#define SUBPIXELCOUNT (1 << SUBPIXELINDEXBITS) +#define SUBPIXELLOADCOUNT (SUBPIXELCOUNT / 2 + 1) +#define WEIGHTSTATECOUNT (((SUBPIXELLOADCOUNT * MAXKERNELSIZE + 1) & ~1) / 2) +#define KERNELTABLESIZE (SUBPIXELLOADCOUNT * MAXKERNELSIZE * sizeof(uint16_t)) +#define PHYALIGN(n, align) (((n) + ((align) - 1)) & ~((align) - 1)) +#define KERNELSTATES (PHYALIGN(KERNELTABLESIZE + 4, 8)) +#define PHYPI 3.14159265358979323846f + +#define MATH_Add(X, Y) ((float)((X) + (Y))) +#define MATH_Multiply(X, Y) ((float)((X) * (Y))) +#define MATH_Divide(X, Y) ((float)((X) / (Y))) +#define MATH_DivideFromUInteger(X, Y) ((float)(X) / (float)(Y)) +#define MATH_I2Float(X) ((float)(X)) + +struct filter_blit_array { + uint8_t kernelSize; + uint32_t scaleFactor; + uint32_t *kernelStates; +}; + +static void phytium_crtc_gamma_set(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + struct drm_crtc_state *state = crtc->state; + struct drm_color_lut *lut; + int i; + + if (state->gamma_lut) { + if (WARN((state->gamma_lut->length/sizeof(struct drm_color_lut) != GAMMA_INDEX_MAX), + "gamma size is not match\n")) + return; + lut = (struct drm_color_lut *)state->gamma_lut->data; + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((lut[i].red >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((lut[i].green >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((lut[i].blue >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } + } +} + +static void phytium_crtc_gamma_init(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + uint32_t config = 0; + uint16_t *red, *green, *blue; + int i; + + if (WARN((crtc->gamma_size != GAMMA_INDEX_MAX), "gamma size is not match\n")) + return; + + red = crtc->gamma_store; + green = red + crtc->gamma_size; + blue = green + crtc->gamma_size; + + for (i = 0; i < GAMMA_INDEX_MAX; i++) { + phytium_writel_reg(priv, i, group_offset, PHYTIUM_DC_GAMMA_INDEX); + config = ((*red++ >> 6) & GAMMA_RED_MASK) << GAMMA_RED_SHIFT; + config |= (((*green++ >> 6) & GAMMA_GREEN_MASK) << GAMMA_GREEN_SHIFT); + config |= (((*blue++ >> 6) & GAMMA_BLUE_MASK) << GAMMA_BLUE_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_GAMMA_DATA); + } +} + +static void phytium_crtc_destroy(struct drm_crtc *crtc) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_cleanup(crtc); + kfree(phytium_crtc); +} + +struct drm_crtc_state * +phytium_crtc_atomic_duplicate_state(struct drm_crtc *crtc) +{ + struct phytium_crtc_state *phytium_crtc_state = NULL; + + phytium_crtc_state = kmemdup(crtc->state, sizeof(*phytium_crtc_state), + GFP_KERNEL); + if (!phytium_crtc_state) + return NULL; + __drm_atomic_helper_crtc_duplicate_state(crtc, + &phytium_crtc_state->base); + + return &phytium_crtc_state->base; +} + +void +phytium_crtc_atomic_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct phytium_crtc_state *phytium_crtc_state = + to_phytium_crtc_state(state); + + phytium_crtc_state = to_phytium_crtc_state(state); + __drm_atomic_helper_crtc_destroy_state(state); + kfree(phytium_crtc_state); +} + +static int phytium_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_ENABLE, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_INT_ENABLE); + + return 0; +} + +static void phytium_disable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_INT_ENABLE); +} + +static const struct drm_crtc_funcs phytium_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = phytium_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = phytium_crtc_atomic_duplicate_state, + .atomic_destroy_state = phytium_crtc_atomic_destroy_state, + .enable_vblank = phytium_enable_vblank, + .disable_vblank = phytium_disable_vblank, +}; + +static void +phytium_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_display_mode *mode = &crtc->state->adjusted_mode; + struct drm_connector_state *new_conn_state; + struct drm_connector *conn; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + int config = 0, i = 0; + + for_each_new_connector_in_state(state, conn, new_conn_state, i) { + if (new_conn_state->crtc != crtc) + continue; + + switch (conn->display_info.bpc) { + case 10: + phytium_crtc->bpc = DP_RGB101010; + break; + case 6: + phytium_crtc->bpc = DP_RGB666; + break; + default: + phytium_crtc->bpc = DP_RGB888; + break; + } + } + + /* config pix clock */ + phytium_crtc->dc_hw_config_pix_clock(crtc, mode->clock); + + config = ((mode->crtc_hdisplay & HDISPLAY_END_MASK) << HDISPLAY_END_SHIFT) + | ((mode->crtc_htotal&HDISPLAY_TOTAL_MASK) << HDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HDISPLAY); + config = ((mode->crtc_hsync_start & HSYNC_START_MASK) << HSYNC_START_SHIFT) + | ((mode->crtc_hsync_end & HSYNC_END_MASK) << HSYNC_END_SHIFT) + | HSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : HSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_HSYNC); + config = ((mode->crtc_vdisplay & VDISPLAY_END_MASK) << VDISPLAY_END_SHIFT) + | ((mode->crtc_vtotal & VDISPLAY_TOTAL_MASK) << VDISPLAY_TOTAL_SHIFT); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VDISPLAY); + config = ((mode->crtc_vsync_start & VSYNC_START_MASK) << VSYNC_START_SHIFT) + | ((mode->crtc_vsync_end & VSYNC_END_MASK) << VSYNC_END_SHIFT) + | VSYNC_PULSE_ENABLED; + config |= (mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : VSYNC_NEGATIVE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_VSYNC); + config = PANEL_DATAENABLE_ENABLE | PANEL_DATA_ENABLE | PANEL_CLOCK_ENABLE; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_PANEL_CONFIG); + config = phytium_crtc->bpc | OUTPUT_DP; + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_DP_CONFIG); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->active) + config |= FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET; + else + config &= (~(FRAMEBUFFER_OUTPUT | FRAMEBUFFER_RESET)); + + if (phytium_crtc->scale_enable) + config |= FRAMEBUFFER_SCALE_ENABLE; + else + config &= (~FRAMEBUFFER_SCALE_ENABLE); + + if (crtc->state->gamma_lut) + phytium_crtc_gamma_set(crtc); + else + phytium_crtc_gamma_init(crtc); + + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + drm_crtc_vblank_on(crtc); +} + +static void +phytium_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + drm_crtc_vblank_off(crtc); + phytium_crtc->dc_hw_disable(crtc); +} + +static void phytium_crtc_update_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode) +{ + if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + drm_mode->crtc_hdisplay = native_mode->crtc_hdisplay; + drm_mode->crtc_vdisplay = native_mode->crtc_vdisplay; + drm_mode->crtc_clock = native_mode->crtc_clock; + drm_mode->crtc_hblank_start = native_mode->crtc_hblank_start; + drm_mode->crtc_hblank_end = native_mode->crtc_hblank_end; + drm_mode->crtc_hsync_start = native_mode->crtc_hsync_start; + drm_mode->crtc_hsync_end = native_mode->crtc_hsync_end; + drm_mode->crtc_htotal = native_mode->crtc_htotal; + drm_mode->crtc_hskew = native_mode->crtc_hskew; + drm_mode->crtc_vblank_start = native_mode->crtc_vblank_start; + drm_mode->crtc_vblank_end = native_mode->crtc_vblank_end; + drm_mode->crtc_vsync_start = native_mode->crtc_vsync_start; + drm_mode->crtc_vsync_end = native_mode->crtc_vsync_end; + drm_mode->crtc_vtotal = native_mode->crtc_vtotal; + } +} + +static int +phytium_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + struct drm_plane_state *new_plane_state = NULL; + int ret = 0; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + uint32_t i; + struct phytium_dp_device *phytium_dp = NULL; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (new_con_state->crtc == crtc) { + phytium_dp = connector_to_dp_device(connector); + break; + } + } + if (phytium_dp) + phytium_crtc_update_timing_for_drm_display_mode(&crtc_state->adjusted_mode, + &phytium_dp->native_mode); + + new_plane_state = drm_atomic_get_new_plane_state(crtc_state->state, + crtc->primary); + if (crtc_state->enable && new_plane_state && !new_plane_state->crtc) { + ret = -EINVAL; + goto fail; + } + + return 0; +fail: + return ret; +} + +static void +phytium_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + if (config & FRAMEBUFFER_RESET) { + phytium_writel_reg(priv, config | FRAMEBUFFER_VALID_PENDING, + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } +} + +static void phytium_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + struct phytium_crtc_state *phytium_crtc_state = NULL; + int phys_pipe = phytium_crtc->phys_pipe, config; + uint32_t group_offset = priv->dc_reg_base[phys_pipe]; + + DRM_DEBUG_KMS("crtc->state active:%d enable:%d\n", + crtc->state->active, crtc->state->enable); + phytium_crtc_state = to_phytium_crtc_state(crtc->state); + + if (crtc->state->color_mgmt_changed) + phytium_crtc_gamma_set(crtc); + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + phytium_writel_reg(priv, config&(~FRAMEBUFFER_VALID_PENDING), + group_offset, PHYTIUM_DC_FRAMEBUFFER_CONFIG); + + if (crtc->state->event) { + DRM_DEBUG_KMS("vblank->refcount:%d\n", + atomic_read(&dev->vblank[0].refcount)); + spin_lock_irq(&dev->event_lock); + if (drm_crtc_vblank_get(crtc) == 0) + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + else + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + spin_unlock_irq(&dev->event_lock); + } +} + +static enum drm_mode_status +phytium_crtc_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (mode->crtc_clock > priv->info.crtc_clock_max) + return MODE_CLOCK_HIGH; + + if (mode->hdisplay > priv->info.hdisplay_max) + return MODE_BAD_HVALUE; + + if (mode->vdisplay > priv->info.vdisplay_max) + return MODE_BAD_VVALUE; + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + return MODE_OK; +} + +static const struct drm_crtc_helper_funcs phytium_crtc_helper_funcs = { + .mode_valid = phytium_crtc_mode_valid, + .atomic_check = phytium_crtc_atomic_check, + .atomic_begin = phytium_crtc_atomic_begin, + .atomic_flush = phytium_crtc_atomic_flush, + .atomic_enable = phytium_crtc_atomic_enable, + .atomic_disable = phytium_crtc_atomic_disable, +}; + +void phytium_crtc_resume(struct drm_device *drm_dev) +{ + struct drm_crtc *crtc; + struct phytium_crtc *phytium_crtc = NULL; + + drm_for_each_crtc(crtc, drm_dev) { + phytium_crtc = to_phytium_crtc(crtc); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(crtc); + phytium_crtc_gamma_init(crtc); + } +} + +int phytium_crtc_init(struct drm_device *dev, int phys_pipe) +{ + struct phytium_crtc *phytium_crtc; + struct phytium_crtc_state *phytium_crtc_state; + struct phytium_plane *phytium_primary_plane = NULL; + struct phytium_plane *phytium_cursor_plane = NULL; + struct phytium_display_private *priv = dev->dev_private; + int ret; + + phytium_crtc = kzalloc(sizeof(*phytium_crtc), GFP_KERNEL); + if (!phytium_crtc) { + ret = -ENOMEM; + goto failed_malloc_crtc; + } + + phytium_crtc_state = kzalloc(sizeof(*phytium_crtc_state), GFP_KERNEL); + if (!phytium_crtc_state) { + ret = -ENOMEM; + goto failed_malloc_crtc_state; + } + + phytium_crtc_state->base.crtc = &phytium_crtc->base; + phytium_crtc->base.state = &phytium_crtc_state->base; + phytium_crtc->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_crtc->dc_hw_config_pix_clock = px210_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = px210_dc_hw_disable; + phytium_crtc->dc_hw_reset = NULL; + priv->dc_reg_base[phys_pipe] = PX210_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = PX210_DCREQ_BASE(phys_pipe); + priv->address_transform_base = PX210_ADDRESS_TRANSFORM_BASE; + } else if (IS_PE220X(priv)) { + phytium_crtc->dc_hw_config_pix_clock = pe220x_dc_hw_config_pix_clock; + phytium_crtc->dc_hw_disable = pe220x_dc_hw_disable; + phytium_crtc->dc_hw_reset = pe220x_dc_hw_reset; + priv->dc_reg_base[phys_pipe] = PE220X_DC_BASE(phys_pipe); + priv->dcreq_reg_base[phys_pipe] = 0x0; + priv->address_transform_base = PE220X_ADDRESS_TRANSFORM_BASE; + } + + phytium_primary_plane = phytium_primary_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_primary_plane)) { + ret = PTR_ERR(phytium_primary_plane); + DRM_ERROR("create primary plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_primary; + } + + phytium_cursor_plane = phytium_cursor_plane_create(dev, phys_pipe); + if (IS_ERR(phytium_cursor_plane)) { + ret = PTR_ERR(phytium_cursor_plane); + DRM_ERROR("create cursor plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_create_cursor; + } + + ret = drm_crtc_init_with_planes(dev, &phytium_crtc->base, + &phytium_primary_plane->base, + &phytium_cursor_plane->base, + &phytium_crtc_funcs, + "phys_pipe %d", phys_pipe); + + if (ret) { + DRM_ERROR("init crtc with plane failed, phys_pipe(%d)\n", phys_pipe); + goto failed_crtc_init; + } + drm_crtc_helper_add(&phytium_crtc->base, &phytium_crtc_helper_funcs); + drm_crtc_vblank_reset(&phytium_crtc->base); + drm_mode_crtc_set_gamma_size(&phytium_crtc->base, GAMMA_INDEX_MAX); + drm_crtc_enable_color_mgmt(&phytium_crtc->base, 0, false, GAMMA_INDEX_MAX); + if (phytium_crtc->dc_hw_reset) + phytium_crtc->dc_hw_reset(&phytium_crtc->base); + phytium_crtc_gamma_init(&phytium_crtc->base); + + return 0; + +failed_crtc_init: +failed_create_cursor: + /* drm_mode_config_cleanup() will free any crtcs/planes already initialized */ +failed_create_primary: + kfree(phytium_crtc_state); +failed_malloc_crtc_state: + kfree(phytium_crtc); +failed_malloc_crtc: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_crtc.h b/drivers/gpu/drm/phytium/phytium_crtc.h new file mode 100644 index 0000000000000000000000000000000000000000..78a841c1c68412b939ba4f1dc7cc40b5a9e6bedd --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_crtc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_CRTC_H__ +#define __PHYTIUM_CRTC_H__ + +struct phytium_crtc { + struct drm_crtc base; + int phys_pipe; + unsigned int bpc; + + /* scale */ + uint32_t src_width; + uint32_t src_height; + uint32_t dst_width; + uint32_t dst_height; + uint32_t dst_x; + uint32_t dst_y; + bool scale_enable; + bool reserve[3]; + + void (*dc_hw_config_pix_clock)(struct drm_crtc *crtc, int clock); + void (*dc_hw_disable)(struct drm_crtc *crtc); + void (*dc_hw_reset)(struct drm_crtc *crtc); +}; + +struct phytium_crtc_state { + struct drm_crtc_state base; +}; + +#define to_phytium_crtc(x) container_of(x, struct phytium_crtc, base) +#define to_phytium_crtc_state(x) container_of(x, struct phytium_crtc_state, base) + +void phytium_crtc_resume(struct drm_device *drm_dev); +int phytium_crtc_init(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_CRTC_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.c b/drivers/gpu/drm/phytium/phytium_debugfs.c new file mode 100644 index 0000000000000000000000000000000000000000..eedad22c153653c1dde2052db8b49c3cb6388f34 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.c @@ -0,0 +1,456 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_reg.h" + +const char *const mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT] = { + "Memory_Vram_Total", + "Memory_Vram_Alloc", + "Memory_System_Carveout_Total", + "Memory_System_Carveout_Alloc", + "Memory_System_Alloc", +}; + +static ssize_t +phytium_dp_register_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_register_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_M_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_M_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_N_VID, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_N_VID)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_TRANSFER_UNIT_SIZE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_DATA_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_DATA_COUNT)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HRES)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH)); + seq_printf(m, "addr:h0x%08x h0x%08x\n", PHYTIUM_DP_MAIN_LINK_HSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_HSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VTOTAL, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VRES, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VRES)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSWIDTH, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_VSTART, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_VSTART)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC0, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_MAIN_LINK_MISC1, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_USER_SYNC_POLARITY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_VIDEO_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SECONDARY_STREAM_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE)); + seq_puts(m, "audio:\n"); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_INPUT_SELECT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DIRECT_CLKDIV, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_COUNT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CHANNEL_MAP, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_DATA_WINDOW, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_CATEGORY_CODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_MAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_MAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_NAUD, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_NAUD)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CLOCK_MODE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_SOURCE_FORMAT, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY)); + seq_printf(m, "addr:h'0x%08x h'0x%08x\n", PHYTIUM_DP_SEC_AUDIO_ENABLE, + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE)); + + return 0; +} + +static int phytium_dp_register_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_register_show, inode->i_private); +} + +static const struct file_operations phytium_dp_register_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_register_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_register_write, +}; + +static ssize_t +phytium_dp_trigger_train_fail_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + if (kstrtouint(tmp, 10, &phytium_dp->trigger_train_fail) != 0) + return -EINVAL; + + return len; +} + +static int phytium_dp_trigger_train_fail_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "trigger_train_fail: %d\n", phytium_dp->trigger_train_fail); + seq_printf(m, "train_retry_count: %d\n", phytium_dp->train_retry_count); + + return 0; +} + +static int phytium_dp_trigger_train_fail_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_trigger_train_fail_show, inode->i_private); +} + +static const struct file_operations phytium_dp_trigger_train_fail_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_trigger_train_fail_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_trigger_train_fail_write, +}; + +static int phytium_edp_backlight_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "backlight: %s\n", phytium_dp->panel.backlight_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_backlight_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_backlight_show, inode->i_private); +} + +static const struct file_operations phytium_edp_backlight_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_backlight_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int phytium_edp_power_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!phytium_dp->is_edp) + return -ENODEV; + + mutex_lock(&phytium_dp->panel.panel_lock); + seq_printf(m, "power: %s\n", phytium_dp->panel.power_enabled?"enabled":"disabled"); + mutex_unlock(&phytium_dp->panel.panel_lock); + + return 0; +} + +static int phytium_edp_power_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_edp_power_show, inode->i_private); +} + +static const struct file_operations phytium_edp_power_fops = { + .owner = THIS_MODULE, + .open = phytium_edp_power_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +struct dpcd_block { + /* DPCD dump start address. */ + unsigned int offset; + /* DPCD dump end address, inclusive. If unset, .size will be used. */ + unsigned int end; + /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */ + size_t size; + /* Only valid for eDP. */ + bool edp; +}; + +static const struct dpcd_block phytium_dpcd_debug[] = { + { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE }, + { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS }, + { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 }, + { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET }, + { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 }, + { .offset = DP_SET_POWER }, + { .offset = DP_EDP_DPCD_REV }, + { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 }, + { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB }, + { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET }, + { .offset = DP_DEVICE_SERVICE_IRQ_VECTOR, .size = 1 }, + { .offset = DP_TEST_REQUEST, .end = DP_TEST_PATTERN }, +}; + +static int phytium_dpcd_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + uint8_t buf[16], i; + ssize_t err; + + if (connector->status != connector_status_connected) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_dpcd_debug); i++) { + const struct dpcd_block *b = &phytium_dpcd_debug[i]; + size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1); + + if (WARN_ON(size > sizeof(buf))) + continue; + + err = drm_dp_dpcd_read(&phytium_dp->aux, b->offset, buf, size); + if (err <= 0) { + DRM_ERROR("dpcd read (%zu bytes at %u) failed (%zd)\n", + size, b->offset, err); + continue; + } + + seq_printf(m, "%04x: %*ph\n", b->offset, (int) size, buf); + } + + return 0; +} + +static int phytium_dpcd_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dpcd_show, inode->i_private); +} + +static const struct file_operations phytium_dpcd_fops = { + .owner = THIS_MODULE, + .open = phytium_dpcd_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +phytium_dp_state_write(struct file *filp, + const char __user *ubuf, + size_t len, + loff_t *ppos) +{ + char tmp[16]; + + if (len >= sizeof(tmp)) + return -EINVAL; + + memset(tmp, 0, sizeof(tmp)); + if (copy_from_user(tmp, ubuf, len)) + return -EFAULT; + tmp[len] = '\0'; + + return len; +} + +static int phytium_dp_state_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + seq_printf(m, "port number: %d\n", phytium_dp->port); + seq_printf(m, "source_max_lane_count: %d\n", phytium_dp->source_max_lane_count); + seq_printf(m, "max_source_rates: %d\n", + phytium_dp->source_rates[phytium_dp->num_source_rates-1]); + if (connector->status == connector_status_connected) { + seq_printf(m, "sink_max_lane_count: %d\n", phytium_dp->sink_max_lane_count); + seq_printf(m, "max_sink_rates: %d\n", + phytium_dp->sink_rates[phytium_dp->num_sink_rates-1]); + seq_printf(m, "link_rate: %d\n", phytium_dp->link_rate); + seq_printf(m, "link_lane_count: %d\n", phytium_dp->link_lane_count); + seq_printf(m, "train_set[0]: %d\n", phytium_dp->train_set[0]); + seq_printf(m, "has_audio: %s\n", phytium_dp->has_audio?"yes":"no"); + } + + return 0; +} + +static int phytium_dp_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_dp_state_show, inode->i_private); +} + +static const struct file_operations phytium_dp_state_fops = { + .owner = THIS_MODULE, + .open = phytium_dp_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = phytium_dp_state_write, +}; + +static const struct phytium_debugfs_files { + const char *name; + const struct file_operations *fops; +} phytium_debugfs_connector_files[] = { + {"dp_state", &phytium_dp_state_fops}, + {"dpcd", &phytium_dpcd_fops}, + {"dp_register", &phytium_dp_register_fops}, + {"dp_trigger_train_fail", &phytium_dp_trigger_train_fail_fops}, +}; + +static const struct phytium_debugfs_files phytium_edp_debugfs_connector_files[] = { + {"edp_power", &phytium_edp_power_fops}, + {"edp_backlight", &phytium_edp_backlight_fops}, +}; + +int phytium_debugfs_connector_add(struct drm_connector *connector) +{ + struct dentry *root = connector->debugfs_entry; + struct dentry *ent; + int i; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (!root) + return -ENODEV; + + for (i = 0; i < ARRAY_SIZE(phytium_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + if (phytium_dp->is_edp) + for (i = 0; i < ARRAY_SIZE(phytium_edp_debugfs_connector_files); i++) { + ent = debugfs_create_file(phytium_edp_debugfs_connector_files[i].name, + 0644, + root, + connector, + phytium_edp_debugfs_connector_files[i].fops); + if (!ent) + return -ENOMEM; + } + + return 0; +} + +static int phytium_mem_state_show(struct seq_file *m, void *data) +{ + struct phytium_display_private *priv = m->private; + uint8_t i; + + for (i = 0; i < ARRAY_SIZE(mem_state); i++) + seq_printf(m, "%-34s %10lld\n", mem_state[i], priv->mem_state[i]); + + return 0; +} + +static int phytium_mem_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, phytium_mem_state_show, inode->i_private); +} + +static const struct file_operations phytium_mem_state_fops = { + .owner = THIS_MODULE, + .open = phytium_mem_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct phytium_debugfs_files phytium_debugfs_display_files[] = { + {"mem_state", &phytium_mem_state_fops}, +}; + +int phytium_debugfs_display_register(struct phytium_display_private *priv) +{ + struct drm_minor *minor = priv->dev->primary; + struct dentry *root = minor->debugfs_root; + struct dentry *ent; + + if (!root) + return -ENODEV; + + ent = debugfs_create_file(phytium_debugfs_display_files[0].name, + 0644, + root, + priv, + phytium_debugfs_display_files[0].fops); + if (!ent) + return -ENOMEM; + + return 0; +} diff --git a/drivers/gpu/drm/phytium/phytium_debugfs.h b/drivers/gpu/drm/phytium/phytium_debugfs.h new file mode 100644 index 0000000000000000000000000000000000000000..dc784bc557a7355db3f311e6b864b2e54d7185cc --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_debugfs.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DEBUGFS_H__ +#define __PHYTIUM_DEBUGFS_H__ + +int phytium_debugfs_connector_add(struct drm_connector *connector); +int phytium_debugfs_display_register(struct phytium_display_private *priv); + +#endif /* __PHYTIUM_DEBUGFS_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.c b/drivers/gpu/drm/phytium/phytium_display_drv.c new file mode 100644 index 0000000000000000000000000000000000000000..60c7a20e7ca2a52d070ff2134b2802ee0eacb993 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_crtc.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "phytium_fb.h" +#include "phytium_fbdev.h" +#include "phytium_reg.h" +#include "phytium_pci.h" +#include "phytium_platform.h" +#include "phytium_debugfs.h" + +int dc_fake_mode_enable; +module_param(dc_fake_mode_enable, int, 0644); +MODULE_PARM_DESC(dc_fake_mode_enable, "Enable DC fake mode (0-disabled; 1-enabled; default-0)"); + +int dc_fast_training_check = 1; +module_param(dc_fast_training_check, int, 0644); +MODULE_PARM_DESC(dc_fast_training_check, "Check dp fast training (0-disabled; 1-enabled; default-1)"); + +int num_source_rates = 4; +module_param(num_source_rates, int, 0644); +MODULE_PARM_DESC(num_source_rates, "set the source max rates (1-1.62Gbps; 2-2.7Gbps; 3-5.4Gbps; 4-8.1Gbps; default-4)"); + +int source_max_lane_count = 4; +module_param(source_max_lane_count, int, 0644); +MODULE_PARM_DESC(source_max_lane_count, "set the source lane count (1-1lane; 2-2lane; 4-4lane; default-4)"); + +int link_dynamic_adjust; +module_param(link_dynamic_adjust, int, 0644); +MODULE_PARM_DESC(link_dynamic_adjust, "dynamic select the train pamameter according to the display mode (0-disabled; 1-enabled; default-1)"); + +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit) +{ + int timeout = 500, config = 0, ret = 0; + + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((!(config & reply_bit)) && timeout); + + phytium_writel_reg(priv, config & (~request_bit), 0, register_offset); + + if (timeout == 0) { + DRM_ERROR("wait cmd reply timeout\n"); + ret = -EBUSY; + } else { + timeout = 500; + do { + mdelay(1); + timeout--; + config = phytium_readl_reg(priv, 0, register_offset); + } while ((config & reply_bit) && timeout); + if (timeout == 0) { + DRM_ERROR("clear cmd timeout\n"); + ret = -EBUSY; + } + } + mdelay(5); + + return ret; +} + +static void phytium_irq_preinstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static void phytium_irq_uninstall(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i, status; + + for_each_pipe_masked(priv, i) { + status = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + phytium_writel_reg(priv, INT_DISABLE, priv->dc_reg_base[i], PHYTIUM_DC_INT_ENABLE); + } +} + +static irqreturn_t phytium_display_irq_handler(int irq, void *data) +{ + struct drm_device *dev = data; + struct phytium_display_private *priv = dev->dev_private; + bool enabled = 0; + int i = 0, virt_pipe = 0; + irqreturn_t ret = IRQ_NONE, ret1 = IRQ_NONE; + + for_each_pipe_masked(priv, i) { + enabled = phytium_readl_reg(priv, priv->dc_reg_base[i], PHYTIUM_DC_INT_STATUS); + if (enabled & INT_STATUS) { + virt_pipe = phytium_get_virt_pipe(priv, i); + if (virt_pipe < 0) + return IRQ_NONE; + drm_handle_vblank(dev, virt_pipe); + ret = IRQ_HANDLED; + if (priv->dc_hw_clear_msi_irq) + priv->dc_hw_clear_msi_irq(priv, i); + } + } + + ret1 = phytium_dp_hpd_irq_handler(priv); + if (ret == IRQ_HANDLED || ret1 == IRQ_HANDLED) + return IRQ_HANDLED; + + return IRQ_NONE; +} + +static const struct drm_mode_config_funcs phytium_mode_funcs = { + .fb_create = phytium_fb_create, + .output_poll_changed = drm_fb_helper_output_poll_changed, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static void phytium_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + + drm_atomic_helper_commit_modeset_disables(dev, state); + drm_atomic_helper_commit_planes(dev, state, false); + drm_atomic_helper_commit_modeset_enables(dev, state); + drm_atomic_helper_commit_hw_done(state); + drm_atomic_helper_wait_for_flip_done(dev, state); + drm_atomic_helper_cleanup_planes(dev, state); +} + +static struct drm_mode_config_helper_funcs phytium_mode_config_helpers = { + .atomic_commit_tail = phytium_atomic_commit_tail, +}; + +static int phytium_modeset_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int i = 0, ret; + + drm_mode_config_init(dev); + dev->mode_config.min_width = 0; + dev->mode_config.min_height = 0; + dev->mode_config.max_width = 16384; + dev->mode_config.max_height = 16384; + dev->mode_config.cursor_width = 32; + dev->mode_config.cursor_height = 32; + + dev->mode_config.preferred_depth = 24; + dev->mode_config.prefer_shadow = 1; + dev->mode_config.fb_modifiers_not_supported = false; + + dev->mode_config.funcs = &phytium_mode_funcs; + dev->mode_config.helper_private = &phytium_mode_config_helpers; + + for_each_pipe_masked(priv, i) { + ret = phytium_crtc_init(dev, i); + if (ret) { + DRM_ERROR("phytium_crtc_init(pipe %d) return failed\n", i); + goto failed_crtc_init; + } + } + + for_each_pipe_masked(priv, i) { + ret = phytium_dp_init(dev, i); + if (ret) { + DRM_ERROR("phytium_dp_init(pipe %d) return failed\n", i); + goto failed_dp_init; + } + } + + drm_mode_config_reset(dev); + + return 0; +failed_dp_init: +failed_crtc_init: + drm_mode_config_cleanup(dev); + return ret; +} + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe) +{ + int i = 0; + int virt_pipe = 0; + + for_each_pipe_masked(priv, i) { + if (i != phys_pipe) + virt_pipe++; + else + return virt_pipe; + } + + DRM_ERROR("%s %d failed\n", __func__, phys_pipe); + return -EINVAL; +} + +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe) +{ + int i = 0; + int tmp = 0; + + for_each_pipe_masked(priv, i) { + if (tmp != virt_pipe) + tmp++; + else + return i; + } + + DRM_ERROR("%s %d failed\n", __func__, virt_pipe); + return -EINVAL; +} + +static int phytium_display_load(struct drm_device *dev, unsigned long flags) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + ret = drm_vblank_init(dev, priv->info.num_pipes); + if (ret) { + DRM_ERROR("vblank init failed\n"); + goto failed_vblank_init; + } + + ret = phytium_modeset_init(dev); + if (ret) { + DRM_ERROR("phytium_modeset_init failed\n"); + goto failed_modeset_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + phytium_irq_preinstall(dev); + ret = request_irq(priv->irq, phytium_display_irq_handler, + IRQF_SHARED, dev->driver->name, dev); + if (ret) { + DRM_ERROR("install irq failed\n"); + goto failed_irq_install; + } + + ret = phytium_drm_fbdev_init(dev); + if (ret) + DRM_ERROR("failed to init dev\n"); + + phytium_debugfs_display_register(priv); + + return ret; + +failed_irq_install: + drm_mode_config_cleanup(dev); +failed_modeset_init: +failed_vblank_init: + return ret; +} + +static void phytium_display_unload(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + + phytium_drm_fbdev_fini(dev); + phytium_irq_uninstall(dev); + free_irq(priv->irq, dev); + drm_mode_config_cleanup(dev); +} + +static const struct drm_ioctl_desc phytium_ioctls[] = { + /* for test, none so far */ +}; + +static const struct file_operations phytium_drm_driver_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .compat_ioctl = drm_compat_ioctl, + .poll = drm_poll, + .read = drm_read, + .llseek = no_llseek, + .mmap = phytium_gem_mmap, +}; + +struct drm_driver phytium_display_drm_driver = { + .driver_features = DRIVER_HAVE_IRQ | + DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM, + .load = phytium_display_load, + .unload = phytium_display_unload, + .lastclose = drm_fb_helper_lastclose, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_import_sg_table = phytium_gem_prime_import_sg_table, + .dumb_create = phytium_gem_dumb_create, + .ioctls = phytium_ioctls, + .num_ioctls = ARRAY_SIZE(phytium_ioctls), + .fops = &phytium_drm_driver_fops, + .name = DRV_NAME, + .desc = DRV_DESC, + .date = DRV_DATE, + .major = DRV_MAJOR, + .minor = DRV_MINOR, +}; + +static void phytium_display_shutdown(struct drm_device *dev) +{ + drm_atomic_helper_shutdown(dev); +} + +static int phytium_display_pm_suspend(struct drm_device *dev) +{ + struct drm_atomic_state *state; + struct phytium_display_private *priv = dev->dev_private; + int ret, ret1; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 1); + state = drm_atomic_helper_suspend(dev); + if (IS_ERR(state)) { + DRM_ERROR("drm_atomic_helper_suspend failed: %ld\n", PTR_ERR(state)); + ret = PTR_ERR(state); + goto suspend_failed; + } + dev->mode_config.suspend_state = state; + ret = phytium_gem_suspend(dev); + if (ret) { + DRM_ERROR("phytium_gem_suspend failed: %d\n", ret); + goto gem_suspend_failed; + } + + return 0; + +gem_suspend_failed: + ret1 = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret1) + DRM_ERROR("Failed to resume (%d)\n", ret1); + dev->mode_config.suspend_state = NULL; +suspend_failed: + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return ret; +} + +static int phytium_display_pm_resume(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + int ret = 0; + + if (WARN_ON(!dev->mode_config.suspend_state)) + return -EINVAL; + + ret = phytium_dp_resume(dev); + if (ret) + return -EIO; + + phytium_crtc_resume(dev); + phytium_gem_resume(dev); + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) + priv->vram_hw_init(priv); + + ret = drm_atomic_helper_resume(dev, dev->mode_config.suspend_state); + if (ret) { + DRM_ERROR("Failed to resume (%d)\n", ret); + return ret; + } + + dev->mode_config.suspend_state = NULL; + drm_fb_helper_set_suspend_unlocked(dev->fb_helper, 0); + phytium_dp_hpd_irq_setup(dev, true); + + return 0; +} + +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev) +{ + INIT_LIST_HEAD(&priv->gem_list_head); + spin_lock_init(&priv->hotplug_irq_lock); + INIT_WORK(&priv->hotplug_work, phytium_dp_hpd_work_func); + memset(priv->mem_state, 0, sizeof(priv->mem_state)); + priv->dev = dev; + priv->display_shutdown = phytium_display_shutdown; + priv->display_pm_suspend = phytium_display_pm_suspend; + priv->display_pm_resume = phytium_display_pm_resume; +} + +static int __init phytium_display_init(void) +{ + int ret = 0; + + ret = platform_driver_register(&phytium_platform_driver); + if (ret) + return ret; + + ret = pci_register_driver(&phytium_pci_driver); + + return ret; +} + +static void __exit phytium_display_exit(void) +{ + pci_unregister_driver(&phytium_pci_driver); + + platform_driver_unregister(&phytium_platform_driver); +} + +module_init(phytium_display_init); +module_exit(phytium_display_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Yang Xun "); +MODULE_DESCRIPTION("Phytium Display Controller"); diff --git a/drivers/gpu/drm/phytium/phytium_display_drv.h b/drivers/gpu/drm/phytium/phytium_display_drv.h new file mode 100644 index 0000000000000000000000000000000000000000..70080dad86219a05329a9abc0f43f24677a0384f --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_display_drv.h @@ -0,0 +1,174 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DISPLAY_DRV_H__ +#define __PHYTIUM_DISPLAY_DRV_H__ + +#include +#include + +#define DEBUG_LOG 0 + +#define PHYTIUM_FORMAT_MAX_PLANE 3 +#define DP_MAX_DOWNSTREAM_PORTS 0x10 + +#define DRV_NAME "dc" +#define DRV_DESC "phytium dc" +#define DRV_DATE "20201220" +#define DRV_MAJOR 1 +#define DRV_MINOR 1 + +/* come from GPU */ +#define DRM_FORMAT_MOD_VENDOR_PHYTIUM 0x92 + +/* dc:mode0 8x8 16bpp gpu: FBCDC_8X8_V10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC fourcc_mod_code(PHYTIUM, 21) +/* dc:mode3 8x4 32bpp gpu: FBCDC_16X4_v10 */ +#define DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC fourcc_mod_code(PHYTIUM, 22) + +#define PIPE_MASK_SHIFT 0x0 +#define PIPE_MASK_MASK 0x7 +#define EDP_MASK_SHIFT 0x3 +#define EDP_MASK_MASK 0x7 + +enum phytium_platform { + PHYTIUM_PLATFORM_UNINITIALIZED = 0, + PHYTIUM_PLATFORM_PX210, + PHYTIUM_PLATFORM_PE220X, +}; + +enum phytium_mem_state_type { + PHYTIUM_MEM_VRAM_TOTAL = 0, + PHYTIUM_MEM_VRAM_ALLOC, + PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL, + PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC, + PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC, + PHYTIUM_MEM_STATE_TYPE_COUNT, +}; + +#define MEMORY_TYPE_VRAM 0x1 +#define MEMORY_TYPE_SYSTEM_CARVEOUT 0x2 +#define MEMORY_TYPE_SYSTEM_UNIFIED 0x4 + +#define IS_PLATFORM(priv, p) ((priv)->info.platform_mask & BIT(p)) + +#define IS_PX210(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PX210) +#define IS_PE220X(priv) IS_PLATFORM(priv, PHYTIUM_PLATFORM_PE220X) + +struct phytium_device_info { + unsigned char platform_mask; + unsigned char pipe_mask; + unsigned char num_pipes; + unsigned char total_pipes; + unsigned char edp_mask; + unsigned int crtc_clock_max; + unsigned int hdisplay_max; + unsigned int vdisplay_max; + unsigned int backlight_max; + unsigned long address_mask; +}; + +struct phytium_display_private { + /* hw */ + void __iomem *regs; + void __iomem *vram_addr; + struct phytium_device_info info; + char support_memory_type; + char reserve[3]; + uint32_t dc_reg_base[3]; + uint32_t dcreq_reg_base[3]; + uint32_t dp_reg_base[3]; + uint32_t address_transform_base; + uint32_t phy_access_base[3]; + + /* drm */ + struct drm_device *dev; + int irq; + + /* fb_dev */ + struct drm_fb_helper fbdev_helper; + struct phytium_gem_object *fbdev_phytium_gem; + + int save_reg[3]; + struct list_head gem_list_head; + + struct work_struct hotplug_work; + spinlock_t hotplug_irq_lock; + + void (*vram_hw_init)(struct phytium_display_private *priv); + void (*display_shutdown)(struct drm_device *dev); + int (*display_pm_suspend)(struct drm_device *dev); + int (*display_pm_resume)(struct drm_device *dev); + void (*dc_hw_clear_msi_irq)(struct phytium_display_private *priv, uint32_t phys_pipe); + int (*dc_hw_fb_format_check)(const struct drm_mode_fb_cmd2 *mode_cmd, int count); + + struct gen_pool *memory_pool; + resource_size_t pool_phys_addr; + resource_size_t pool_size; + void *pool_virt_addr; + uint64_t mem_state[PHYTIUM_MEM_STATE_TYPE_COUNT]; + + /* DMA info */ + int dma_inited; + struct dma_chan *dma_chan; +}; + +static inline unsigned int +phytium_readl_reg(struct phytium_display_private *priv, uint32_t group_offset, uint32_t reg_offset) +{ + unsigned int data; + + data = readl(priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Read 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif + return data; +} + +static inline void +phytium_writel_reg(struct phytium_display_private *priv, uint32_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + + writel(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 32'h%08x\n", group_offset + reg_offset, data); +#endif +} + +static inline void +phytium_writeb_reg(struct phytium_display_private *priv, uint8_t data, + uint32_t group_offset, uint32_t reg_offset) +{ + writeb(data, priv->regs + group_offset + reg_offset); +#if DEBUG_LOG + pr_info("Write 32'h%08x 8'h%08x\n", group_offset + reg_offset, data); +#endif +} + +#define for_each_pipe(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) + +#define for_each_pipe_masked(__dev_priv, __p) \ + for ((__p) = 0; (__p) < __dev_priv->info.total_pipes; (__p)++) \ + for_each_if((__dev_priv->info.pipe_mask) & BIT(__p)) + +int phytium_get_virt_pipe(struct phytium_display_private *priv, int phys_pipe); +int phytium_get_phys_pipe(struct phytium_display_private *priv, int virt_pipe); +int phytium_wait_cmd_done(struct phytium_display_private *priv, + uint32_t register_offset, + uint32_t request_bit, + uint32_t reply_bit); +void phytium_display_private_init(struct phytium_display_private *priv, struct drm_device *dev); + +extern struct drm_driver phytium_display_drm_driver; +extern int dc_fake_mode_enable; +extern int dc_fast_training_check; +extern int num_source_rates; +extern int source_max_lane_count; +extern int link_dynamic_adjust; + +#endif /* __PHYTIUM_DISPLAY_DRV_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_dp.c b/drivers/gpu/drm/phytium/phytium_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..98a06ccbc48d0ca5e49cc1f999ff91f28e5e2628 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.c @@ -0,0 +1,2639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_debugfs.h" +#include "px210_dp.h" +#include "pe220x_dp.h" +#include "phytium_panel.h" +#include "phytium_reg.h" + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp); +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged); +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp); +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp); +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp); + +static int phytium_rate[] = {162000, 270000, 540000, 810000}; +static int codec_id = PHYTIUM_DP_AUDIO_ID; + +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + +#if DEBUG_LOG + pr_info("phy address write: 0x%x data:0x%x\n", address, data); +#endif + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, data, group_offset, PHYTIUM_PHY_WRITE_DATA); + phytium_writel_reg(priv, ACCESS_WRITE, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); +} + +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->phy_access_base[port]; + uint32_t data; + + phytium_writel_reg(priv, address, group_offset, PHYTIUM_PHY_ACCESS_ADDRESS); + phytium_writel_reg(priv, ACCESS_READ, group_offset, PHYTIUM_PHY_ACCESS_CTRL); + udelay(10); + data = phytium_readl_reg(priv, group_offset, PHYTIUM_PHY_READ_DATA); +#if DEBUG_LOG + pr_info("phy address read: 0x%x data:0x%x\n", address, data); +#endif + + return data; +} + +static int +phytium_dp_hw_aux_transfer_write(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0, j = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + /* clear PX210_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + for (j = 0; j < msg->size; j++) + phytium_writeb_reg(priv, data[j], group_offset, PHYTIUM_DP_AUX_WRITE_FIFO); + + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= (msg->size-1) & BYTE_COUNT_MASK; + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux write reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux write reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux write reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) write reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) write reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) write reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = msg->size; +out: + return ret; +} + +static int +phytium_dp_hw_aux_transfer_read(struct phytium_dp_device *phytium_dp, struct drm_dp_aux_msg *msg) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned int i = 0; + unsigned int cmd = 0; + unsigned int aux_status = 0, interrupt_status = 0; + unsigned char *data = msg->buffer; + int count_timeout = 0; + long ret = 0; + + for (i = 0; i < 3; i++) { + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + phytium_writel_reg(priv, msg->address, group_offset, PHYTIUM_DP_AUX_ADDRESS); + cmd = ((msg->request & COMMAND_MASK) << COMMAND_SHIFT); + if (msg->size == 0) + cmd |= ADDRESS_ONLY; + else + cmd |= ((msg->size-1) & BYTE_COUNT_MASK); + phytium_writel_reg(priv, cmd, group_offset, PHYTIUM_DP_AUX_COMMAND); + + count_timeout = 0; + do { + mdelay(5); + interrupt_status = phytium_readl_reg(priv, group_offset, + PHYTIUM_DP_INTERRUPT_RAW_STATUS); + aux_status = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_STATUS); + if ((aux_status & REPLY_RECEIVED) || (aux_status & REPLY_ERROR) + || (interrupt_status & REPLY_TIMEOUT)) { + DRM_DEBUG_KMS("aux wait exit\n"); + break; + } + count_timeout++; + } while (count_timeout < 6); + + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + if (interrupt_status & REPLY_TIMEOUT) { + DRM_DEBUG_KMS("aux read reply timeout\n"); + continue; + } else if (aux_status & REPLY_ERROR) { + DRM_DEBUG_KMS("aux read reply error\n"); + continue; + } else if (aux_status & REPLY_RECEIVED) { + DRM_DEBUG_KMS("aux read reply received succussful\n"); + break; + } + } + + if (interrupt_status & REPLY_TIMEOUT) { + DRM_NOTE("aux(%d) read reply timeout\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if (aux_status & REPLY_ERROR) { + DRM_ERROR("aux(%d) read reply error\n", phytium_dp->port); + ret = -EIO; + goto out; + } else if ((aux_status & REPLY_RECEIVED) != REPLY_RECEIVED) { + DRM_ERROR("aux(%d) read reply no response\n", phytium_dp->port); + ret = -EIO; + goto out; + } + + msg->reply = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_CODE); + ret = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA_COUNT); + + if (ret > msg->size) { + ret = msg->size; + } else if (ret != msg->size) { + DRM_DEBUG_KMS("aux read count error(ret:0x%lx != 0x%lx)\n", ret, msg->size); + ret = -EBUSY; + goto out; + } + + for (i = 0; i < ret; i++) + data[i] = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_AUX_REPLY_DATA); + +out: + return ret; +} + +static void phytium_get_native_mode(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_mode *t, *mode; + struct drm_connector *connector = &phytium_dp->connector; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + + list_for_each_entry_safe(mode, t, &connector->probed_modes, head) { + if (mode->type & DRM_MODE_TYPE_PREFERRED) { + if (mode->hdisplay != native_mode->hdisplay || + mode->vdisplay != native_mode->vdisplay) { + memcpy(native_mode, mode, sizeof(*mode)); + drm_mode_set_crtcinfo(native_mode, 0); + } + break; + } + } + + if (&mode->head == &connector->probed_modes) + native_mode->clock = 0; +} + +static int phytium_connector_add_common_modes(struct phytium_dp_device *phytium_dp) +{ + int i = 0, ret = 0; + struct drm_device *dev = phytium_dp->dev; + struct drm_display_mode *mode = NULL, *current_mode = NULL; + struct drm_display_mode *native_mode = &phytium_dp->native_mode; + bool mode_existed = false; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_mode[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + if (native_mode->clock == 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(common_mode); i++) { + mode_existed = false; + + if (common_mode[i].w > native_mode->hdisplay || + common_mode[i].h > native_mode->vdisplay || + (common_mode[i].w == native_mode->hdisplay && + common_mode[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(current_mode, &phytium_dp->connector.probed_modes, head) { + if (common_mode[i].w == current_mode->hdisplay && + common_mode[i].h == current_mode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = drm_mode_duplicate(dev, native_mode); + if (mode == NULL) + continue; + + mode->hdisplay = common_mode[i].w; + mode->vdisplay = common_mode[i].h; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strscpy(mode->name, common_mode[i].name, DRM_DISPLAY_MODE_LEN); + drm_mode_probed_add(&phytium_dp->connector, mode); + ret++; + } + + return ret; +} + +static int phytium_connector_get_modes(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + struct edid *edid; + int ret = 0; + + if (phytium_dp->is_edp) + edid = phytium_dp->edp_edid; + else + edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + + if (edid && drm_edid_is_valid(edid)) { + drm_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + phytium_dp->has_audio = drm_detect_monitor_audio(edid); + phytium_get_native_mode(phytium_dp); + if (dc_fake_mode_enable) + ret += phytium_connector_add_common_modes(phytium_dp); + } else { + drm_connector_update_edid_property(connector, NULL); + phytium_dp->has_audio = false; + } + + if (!phytium_dp->is_edp) + kfree(edid); + + return ret; +} + +static struct drm_encoder *phytium_dp_best_encoder(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + return &phytium_dp->encoder; +} + +static const +struct drm_connector_helper_funcs phytium_connector_helper_funcs = { + .get_modes = phytium_connector_get_modes, + .best_encoder = phytium_dp_best_encoder, +}; + +static void phytium_dp_set_sink_rates(struct phytium_dp_device *phytium_dp) +{ + static const int dp_rates[] = {162000, 270000, 540000, 810000}; + int i, max_rate; + + max_rate = drm_dp_bw_code_to_link_rate(phytium_dp->dpcd[DP_MAX_LINK_RATE]); + for (i = 0; i < ARRAY_SIZE(dp_rates); i++) { + if (dp_rates[i] > max_rate) + break; + phytium_dp->sink_rates[i] = dp_rates[i]; + } + phytium_dp->num_sink_rates = i; +} + +static int get_common_rates(const int *source_rates, int source_len, const int *sink_rates, + int sink_len, int *common_rates) +{ + int i = 0, j = 0, k = 0; + + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES)) + return k; + common_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static void phytium_dp_set_common_rates(struct phytium_dp_device *phytium_dp) +{ + WARN_ON(!phytium_dp->num_source_rates || !phytium_dp->num_sink_rates); + + phytium_dp->num_common_rates = get_common_rates(phytium_dp->source_rates, + phytium_dp->num_source_rates, + phytium_dp->sink_rates, + phytium_dp->num_sink_rates, + phytium_dp->common_rates); + + if (WARN_ON(phytium_dp->num_common_rates == 0)) { + phytium_dp->common_rates[0] = 162000; + phytium_dp->num_common_rates = 1; + } +} + +static bool phytium_dp_get_dpcd(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char sink_count = 0; + + /* get dpcd capability,but don't check data error; so check revision */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, 0x00, phytium_dp->dpcd, + sizeof(phytium_dp->dpcd)); + if (ret < 0) { + DRM_ERROR("port %d get DPCD capability fail\n", phytium_dp->port); + return false; + } + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0) { + DRM_ERROR("DPCD data error: 0x%x\n", phytium_dp->dpcd[DP_DPCD_REV]); + return false; + } + + /* parse sink support link */ + phytium_dp_set_sink_rates(phytium_dp); + phytium_dp_set_common_rates(phytium_dp); + phytium_dp->sink_max_lane_count = drm_dp_max_lane_count(phytium_dp->dpcd); + phytium_dp->common_max_lane_count = min(phytium_dp->source_max_lane_count, + phytium_dp->sink_max_lane_count); + + /* get dpcd sink count */ + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_SINK_COUNT, &sink_count) <= 0) { + DRM_ERROR("get DPCD sink_count fail\n"); + return false; + } + + phytium_dp->sink_count = DP_GET_SINK_COUNT(sink_count); + if (!phytium_dp->sink_count) { + DRM_ERROR("DPCD sink_count should not be zero\n"); + return false; + } + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return true; + + if (phytium_dp->dpcd[DP_DPCD_REV] == 0x10) + return true; + + /* get downstream port for branch device */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_DOWNSTREAM_PORT_0, + phytium_dp->downstream_ports, DP_MAX_DOWNSTREAM_PORTS); + if (ret < 0) { + DRM_ERROR("get DPCD DFP fail\n"); + return false; + } + + return true; +} + +static enum drm_connector_status +phytium_dp_detect_dpcd(struct phytium_dp_device *phytium_dp) +{ + if (!phytium_dp_get_dpcd(phytium_dp)) + return connector_status_disconnected; + + if (!drm_dp_is_branch(phytium_dp->dpcd)) + return connector_status_connected; + + if (phytium_dp->downstream_ports[0] & DP_DS_PORT_HPD) { + return phytium_dp->sink_count ? connector_status_connected + : connector_status_disconnected; + } + return connector_status_connected; +} + +static void phytium_get_adjust_train(struct phytium_dp_device *phytium_dp, + const uint8_t link_status[DP_LINK_STATUS_SIZE], uint8_t lane_count) +{ + unsigned char v = 0; + unsigned char p = 0; + int lane; + unsigned char voltage_max; + unsigned char preemph_max; + + /* find max value */ + for (lane = 0; lane < lane_count; lane++) { + uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane); + uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane); + + if (this_v > v) + v = this_v; + if (this_p > p) + p = this_p; + } + voltage_max = DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + if (v >= voltage_max) + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; + + preemph_max = DP_TRAIN_PRE_EMPH_LEVEL_3; + if (p >= preemph_max) + p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + + for (lane = 0; lane < 4; lane++) + phytium_dp->train_set[lane] = v | p; +} + +bool phytium_dp_coding_8b10b_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +bool phytium_dp_scrambled_need_enable(unsigned char test_pattern) +{ + switch (test_pattern) { + case PHYTIUM_PHY_TP_SYMBOL_ERROR: + case PHYTIUM_PHY_TP_CP2520_1: + case PHYTIUM_PHY_TP_CP2520_2: + case PHYTIUM_PHY_TP_CP2520_3: + return true; + case PHYTIUM_PHY_TP_D10_2: + case PHYTIUM_PHY_TP_PRBS7: + case PHYTIUM_PHY_TP_80BIT_CUSTOM: + return false; + default: + return false; + } +} + +static void phytium_dp_hw_set_lane_setting(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, + uint8_t train_set) +{ + phytium_dp->funcs->dp_hw_set_phy_lane_setting(phytium_dp, link_rate, train_set); +} + +static void phytium_dp_hw_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint32_t link_rate) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0, retry = 3; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, lane_count, + group_offset, PHYTIUM_DP_LANE_COUNT_SET); + phytium_writel_reg(priv, + drm_dp_link_rate_to_bw_code(link_rate), + group_offset, PHYTIUM_DP_LINK_BW_SET); + + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + phytium_writel_reg(priv, ENHANCED_FRAME_ENABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + else + phytium_writel_reg(priv, ENHANCED_FRAME_DISABLE, + group_offset, PHYTIUM_DP_ENHANCED_FRAME_EN); + +try_again: + ret = phytium_dp->funcs->dp_hw_set_phy_lane_and_rate(phytium_dp, lane_count, link_rate); + if ((ret < 0) && retry) { + retry--; + goto try_again; + } +} + +static void phytium_dp_hw_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, + uint8_t test_pattern, + uint8_t *custom_pattern, + uint32_t custom_pattern_size) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, val = 0, tmp = 0, i; + uint32_t group_offset = priv->dp_reg_base[port]; + + if ((test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + && custom_pattern && (custom_pattern_size > 0)) { + val = *(int *)custom_pattern; + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0); + val = *(int *)(custom_pattern + 4); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1); + val = *(short int *)(custom_pattern + 8); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2); + } + + if (test_pattern == PHYTIUM_PHY_TP_D10_2 || test_pattern == PHYTIUM_PHY_TP_PRBS7 + || test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + else + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + + tmp = test_pattern - PHYTIUM_PHY_TP_NONE + TEST_PATTERN_NONE; + val = 0; + for (i = 0; i < lane_count; i++) + val |= (tmp << (TEST_PATTERN_LANE_SHIFT * i)); + phytium_writel_reg(priv, val, group_offset, PHYTIUM_DP_LINK_QUAL_PATTERN_SET); +} + +static void phytium_dp_hw_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, tmp = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + /* Scrambling is disabled for TPS1/TPS2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 + || train_pattern == DP_TRAINING_PATTERN_DISABLE) { + phytium_writel_reg(priv, SCRAMBLING_ENABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + phytium_writel_reg(priv, SCRAMBLER_RESET, group_offset, + PHYTIUM_DP_FORCE_SCRAMBLER_RESET); + } else { + phytium_writel_reg(priv, SCRAMBLING_DISABLE, group_offset, + PHYTIUM_DP_SCRAMBLING_DISABLE); + } + switch (train_pattern) { + case DP_TRAINING_PATTERN_DISABLE: + tmp = TRAINING_OFF; + break; + case DP_TRAINING_PATTERN_1: + tmp = TRAINING_PATTERN_1; + break; + case DP_TRAINING_PATTERN_2: + tmp = TRAINING_PATTERN_2; + break; + case DP_TRAINING_PATTERN_3: + tmp = TRAINING_PATTERN_3; + break; + case DP_TRAINING_PATTERN_4: + tmp = TRAINING_PATTERN_4; + break; + default: + tmp = TRAINING_OFF; + break; + } + + phytium_writel_reg(priv, tmp, group_offset, PHYTIUM_DP_TRAINING_PATTERN_SET); +} + +void phytium_dp_hw_enable_audio(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int config = 0, config1, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + + n_m = phytium_dp_audio_get_n_m(phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, phytium_dp->audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + + config1 = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config1, group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_writel_reg(priv, config, group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static void phytium_dp_hw_audio_shutdown(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); +} + +static void phytium_dp_hw_audio_digital_mute(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + if (enable) + phytium_writel_reg(priv, CHANNEL_MUTE_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); + else + phytium_writel_reg(priv, SEC_AUDIO_ENABLE, + group_offset, PHYTIUM_DP_SEC_AUDIO_ENABLE); +} + +static int +phytium_dp_hw_audio_hw_params(struct phytium_dp_device *phytium_dp, struct audio_info audio_info) +{ + struct phytium_display_private *priv = phytium_dp->dev->dev_private; + int port = phytium_dp->port; + int ret = 0, data_window = 0; + const struct dp_audio_n_m *n_m = NULL; + uint32_t fs, ws, fs_accurac; + uint32_t group_offset = priv->dp_reg_base[port]; + + DRM_DEBUG_KMS("%s:set port%d sample_rate(%d) channels(%d) sample_width(%d)\n", + __func__, phytium_dp->port, audio_info.sample_rate, + audio_info.channels, audio_info.sample_width); + + phytium_writel_reg(priv, INPUT_SELECT_I2S, group_offset, PHYTIUM_DP_SEC_INPUT_SELECT); + phytium_writel_reg(priv, APB_CLOCK/audio_info.sample_rate, + group_offset, PHYTIUM_DP_SEC_DIRECT_CLKDIV); + phytium_writel_reg(priv, audio_info.channels & CHANNEL_MASK, + group_offset, PHYTIUM_DP_SEC_CHANNEL_COUNT); + phytium_writel_reg(priv, CHANNEL_MAP_DEFAULT, group_offset, PHYTIUM_DP_SEC_CHANNEL_MAP); + data_window = 90*(phytium_dp->link_rate)/100 + *(phytium_dp->mode.htotal - phytium_dp->mode.hdisplay) + /phytium_dp->mode.clock/4; + phytium_writel_reg(priv, data_window, group_offset, PHYTIUM_DP_SEC_DATA_WINDOW); + phytium_writel_reg(priv, 0xb5, group_offset, PHYTIUM_DP_SEC_CS_CATEGORY_CODE); + + phytium_writel_reg(priv, CLOCK_MODE_SYNC, group_offset, PHYTIUM_DP_SEC_CLOCK_MODE); + phytium_writel_reg(priv, CS_SOURCE_FORMAT_DEFAULT, + group_offset, PHYTIUM_DP_SEC_CS_SOURCE_FORMAT); + + switch (audio_info.sample_rate) { + case 32000: + fs = ORIG_FREQ_32000; + fs_accurac = SAMPLING_FREQ_32000; + break; + case 44100: + fs = ORIG_FREQ_44100; + fs_accurac = SAMPLING_FREQ_44100; + break; + case 48000: + fs = ORIG_FREQ_48000; + fs_accurac = SAMPLING_FREQ_48000; + break; + case 96000: + fs = ORIG_FREQ_96000; + fs_accurac = SAMPLING_FREQ_96000; + break; + case 176400: + fs = ORIG_FREQ_176400; + fs_accurac = SAMPLING_FREQ_176400; + break; + case 192000: + fs = ORIG_FREQ_192000; + fs_accurac = SAMPLING_FREQ_192000; + break; + default: + DRM_ERROR("dp not support sample_rate %d\n", audio_info.sample_rate); + goto out; + } + + switch (audio_info.sample_width) { + case 16: + ws = WORD_LENGTH_16; + break; + case 18: + ws = WORD_LENGTH_18; + break; + case 20: + ws = WORD_LENGTH_20; + break; + case 24: + ws = WORD_LENGTH_24; + break; + default: + DRM_ERROR("dp not support sample_width %d\n", audio_info.sample_width); + goto out; + } + + phytium_writel_reg(priv, ((fs&ORIG_FREQ_MASK)<link_rate, audio_info.sample_rate); + if (n_m == NULL) { + DRM_NOTE("can not get n_m for link_rate(%d) and sample_rate(%d)\n", + phytium_dp->link_rate, audio_info.sample_rate); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_SEC_NAUD); + + } else { + phytium_writel_reg(priv, n_m->m, group_offset, PHYTIUM_DP_SEC_MAUD); + phytium_writel_reg(priv, n_m->n, group_offset, PHYTIUM_DP_SEC_NAUD); + } + phytium_writel_reg(priv, SECONDARY_STREAM_ENABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + phytium_dp->audio_info = audio_info; + + return 0; + +out: + phytium_writel_reg(priv, SECONDARY_STREAM_DISABLE, + group_offset, PHYTIUM_DP_SECONDARY_STREAM_ENABLE); + + return ret; +} + +void phytium_dp_hw_disable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_DISABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); +} + +bool phytium_dp_hw_video_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + return config ? true : false; +} + +void phytium_dp_hw_enable_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, SST_MST_SOURCE_0_ENABLE, + group_offset, PHYTIUM_DP_VIDEO_STREAM_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_config_video(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + unsigned long link_bw, date_rate = 0; + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned char tu_size = 64; + unsigned long data_per_tu = 0; + int symbols_per_tu, frac_symbols_per_tu, symbol_count, udc, value; + + /* cal M/N and tu_size */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_clock/10, group_offset, PHYTIUM_DP_M_VID); + phytium_writel_reg(priv, phytium_dp->link_rate/10, group_offset, PHYTIUM_DP_N_VID); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + /* mul 10 for register setting */ + data_per_tu = 10*tu_size * date_rate/link_bw; + symbols_per_tu = (data_per_tu/10)&0xff; + frac_symbols_per_tu = (data_per_tu%10*16/10) & 0xf; + phytium_writel_reg(priv, frac_symbols_per_tu<<24 | symbols_per_tu<<16 | tu_size, + group_offset, PHYTIUM_DP_TRANSFER_UNIT_SIZE); + + symbol_count = (phytium_dp->mode.crtc_hdisplay*display_info->bpc*3 + 7)/8; + udc = (symbol_count + phytium_dp->link_lane_count - 1)/phytium_dp->link_lane_count; + phytium_writel_reg(priv, udc, group_offset, PHYTIUM_DP_DATA_COUNT); + + /* config main stream attributes */ + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal, + group_offset, PHYTIUM_DP_MAIN_LINK_HTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_hdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_HRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_hsync_end - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_htotal - phytium_dp->mode.crtc_hsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_HSTART); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal, + group_offset, PHYTIUM_DP_MAIN_LINK_VTOTAL); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vdisplay, + group_offset, PHYTIUM_DP_MAIN_LINK_VRES); + phytium_writel_reg(priv, + phytium_dp->mode.crtc_vsync_end - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSWIDTH); + phytium_writel_reg(priv, phytium_dp->mode.crtc_vtotal - phytium_dp->mode.crtc_vsync_start, + group_offset, PHYTIUM_DP_MAIN_LINK_VSTART); + + value = 0; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value & (~HSYNC_POLARITY_LOW); + else + value = value | HSYNC_POLARITY_LOW; + + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value & (~VSYNC_POLARITY_LOW); + else + value = value | VSYNC_POLARITY_LOW; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_POLARITY); + + switch (display_info->bpc) { + case 10: + value = (MISC0_BIT_DEPTH_10BIT << MISC0_BIT_DEPTH_OFFSET); + break; + case 6: + value = (MISC0_BIT_DEPTH_6BIT << MISC0_BIT_DEPTH_OFFSET); + break; + default: + value = (MISC0_BIT_DEPTH_8BIT << MISC0_BIT_DEPTH_OFFSET); + break; + } + value |= (MISC0_COMPONENT_FORMAT_RGB << MISC0_COMPONENT_FORMAT_SHIFT) + | MISC0_SYNCHRONOUS_CLOCK; + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_MAIN_LINK_MISC0); + phytium_writel_reg(priv, 0, group_offset, PHYTIUM_DP_MAIN_LINK_MISC1); + + value = USER_ODDEVEN_POLARITY_HIGH | USER_DATA_ENABLE_POLARITY_HIGH; + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PHSYNC) + value = value | USER_HSYNC_POLARITY_HIGH; + else + value = value & (~USER_HSYNC_POLARITY_HIGH); + if (phytium_dp->mode.flags & DRM_MODE_FLAG_PVSYNC) + value = value | USER_VSYNC_POLARITY_HIGH; + else + value = value & (~USER_VSYNC_POLARITY_HIGH); + phytium_writel_reg(priv, value, group_offset, PHYTIUM_DP_USER_SYNC_POLARITY); +} + +void phytium_dp_hw_disable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_DISABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); +} + +void phytium_dp_hw_enable_output(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, LINK_SOFT_RESET, group_offset, PHYTIUM_DP_SOFT_RESET); + phytium_writel_reg(priv, TRANSMITTER_OUTPUT_ENABLE, + group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); +} + +void phytium_dp_hw_enable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_writel_reg(priv, VIRTUAL_SOURCE_0_ENABLE, + group_offset, PHYTIUM_INPUT_SOURCE_ENABLE); +} + +void phytium_dp_hw_disable_input_source(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + + phytium_writel_reg(priv, (~VIRTUAL_SOURCE_0_ENABLE)&VIRTUAL_SOURCE_0_ENABLE_MASK, + priv->dp_reg_base[port], PHYTIUM_INPUT_SOURCE_ENABLE); +} + +bool phytium_dp_hw_output_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + int config = 0; + + config = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE); + return config ? true : false; +} + +static void phytium_dp_hw_get_hpd_state(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t val = 0, raw_state = 0; + uint32_t group_offset = priv->dp_reg_base[port]; + + val = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_RAW_STATUS); + + /* maybe miss hpd, so used for clear PHYTIUM_DP_INTERRUPT_RAW_STATUS */ + phytium_readl_reg(priv, group_offset, PHYTIUM_DP_INTERRUPT_STATUS); + raw_state = phytium_readl_reg(priv, group_offset, PHYTIUM_DP_SINK_HPD_STATE); + if (val & HPD_EVENT) + phytium_dp->dp_hpd_state.hpd_event_state = true; + + if (val & HPD_IRQ) + phytium_dp->dp_hpd_state.hpd_irq_state = true; + + if (raw_state & HPD_CONNECT) + phytium_dp->dp_hpd_state.hpd_raw_state = true; + else + phytium_dp->dp_hpd_state.hpd_raw_state = false; +} + +void phytium_dp_hw_hpd_irq_setup(struct phytium_dp_device *phytium_dp, bool enable) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dp_reg_base[port]; + + phytium_dp->dp_hpd_state.hpd_irq_enable = enable; + if (enable) + phytium_writel_reg(priv, HPD_OTHER_MASK, group_offset, PHYTIUM_DP_INTERRUPT_MASK); + else + phytium_writel_reg(priv, HPD_IRQ_MASK|HPD_EVENT_MASK|HPD_OTHER_MASK, + group_offset, PHYTIUM_DP_INTERRUPT_MASK); +} + +int phytium_dp_hw_init(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + uint8_t count = 0; + + phytium_dp->source_rates = phytium_rate; + phytium_dp->num_source_rates = num_source_rates; + count = phytium_dp->funcs->dp_hw_get_source_lane_count(phytium_dp); + phytium_dp->source_max_lane_count = count; + + ret = phytium_dp->funcs->dp_hw_reset(phytium_dp); + if (ret) + goto out; + ret = phytium_dp->funcs->dp_hw_init_phy(phytium_dp); + if (ret) + goto out; + + phytium_dp->fast_train_support = false; + phytium_dp->hw_spread_enable = phytium_dp->funcs->dp_hw_spread_is_enable(phytium_dp); + +out: + return ret; +} + +static int phytium_dp_dpcd_get_tp_link(struct phytium_dp_device *phytium_dp, + uint8_t *test_lane_count, + uint32_t *test_link_rate) +{ + uint8_t test_link_bw; + int ret; + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LANE_COUNT, + test_lane_count); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern Lane count read failed(%d)\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_LINK_RATE, + &test_link_bw); + if (ret <= 0) { + DRM_DEBUG_KMS("test pattern link rate read failed(%d)\n", ret); + goto failed; + } + *test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw); + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_link(struct phytium_dp_device *phytium_dp, + uint8_t lane_count, uint32_t link_rate) +{ + uint8_t link_config[2]; + int ret = 0; + + link_config[0] = drm_dp_link_rate_to_bw_code(link_rate); + link_config[1] = lane_count; + if (drm_dp_enhanced_frame_cap(phytium_dp->dpcd)) + link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (ret < 0) { + DRM_NOTE("write dpcd DP_LINK_BW_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp->hw_spread_enable) + link_config[0] = DP_SPREAD_AMP_0_5; + else + link_config[0] = 0; + link_config[1] = DP_SET_ANSI_8B10B; + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2); + if (ret < 0) { + DRM_ERROR("write DP_DOWNSPREAD_CTRL fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_test_pattern(struct phytium_dp_device *phytium_dp, + uint8_t test_pattern) +{ + unsigned char value; + int ret; + + if (phytium_dp_coding_8b10b_need_enable(test_pattern)) + value = DP_SET_ANSI_8B10B; + else + value = 0; + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_MAIN_LINK_CHANNEL_CODING_SET fail: ret:%d\n", ret); + goto failed; + } + + if (phytium_dp_scrambled_need_enable(test_pattern)) + value = DP_TRAINING_PATTERN_DISABLE; + else + value = (DP_TRAINING_PATTERN_DISABLE | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_LINK_QUAL_LANE0_SET, test_pattern); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int phytium_dp_dpcd_set_train_pattern(struct phytium_dp_device *phytium_dp, + uint8_t train_pattern) +{ + uint8_t value; + int ret; + + /* Scrambling is disabled for TPS1/2/3 and enabled for TPS4 */ + if (train_pattern == DP_TRAINING_PATTERN_4 || train_pattern == DP_TRAINING_PATTERN_DISABLE) + value = train_pattern; + else + value = (train_pattern | DP_LINK_SCRAMBLING_DISABLE); + + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TRAINING_PATTERN_SET, value); + if (ret < 0) { + DRM_NOTE("write DP_TRAINING_PATTERN_SET fail: ret:%d\n", ret); + goto failed; + } + + return 0; +failed: + return ret; +} + +static int +phytium_dp_dpcd_set_lane_setting(struct phytium_dp_device *phytium_dp, uint8_t *train_set) +{ + int ret = 0; + + ret = drm_dp_dpcd_write(&phytium_dp->aux, DP_TRAINING_LANE0_SET, + phytium_dp->train_set, 4); + if (ret < 0) { + DRM_ERROR("write DP_TRAINING_LANE0_SET fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +static int +phytium_dp_dpcd_get_adjust_request(struct phytium_dp_device *phytium_dp, uint8_t lane_count) +{ + int ret = 0; + uint8_t link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + goto failed; + } + phytium_get_adjust_train(phytium_dp, link_status, lane_count); + + return 0; +failed: + return ret; +} + +void phytium_dp_dpcd_sink_dpms(struct phytium_dp_device *phytium_dp, int mode) +{ + int ret, i; + + if (phytium_dp->dpcd[DP_DPCD_REV] < 0x11) + return; + if (mode != DRM_MODE_DPMS_ON) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D3); + } else { + for (i = 0; i < 3; i++) { + ret = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_SET_POWER, DP_SET_POWER_D0); + if (ret == 1) + break; + msleep(20); + } + } + + if (ret != 1) + DRM_DEBUG_KMS("failed to %s sink power state\n", + mode == DRM_MODE_DPMS_ON ? "enable" : "disable"); +} + +static bool phytium_dp_link_training_clock_recovery(struct phytium_dp_device *phytium_dp) +{ + int ret; + unsigned char voltage, max_vswing_tries; + int voltage_tries; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->link_lane_count, + phytium_dp->link_rate); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_link failed(ret=%d)\n", ret); + return false; + } + + /* config source's voltage swing and pre-emphasis(103-106) */ + memset(phytium_dp->train_set, 0, sizeof(phytium_dp->train_set)); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return false; + } + + /* config sink's voltage swing and pre-emphasis(103-106) */ + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + voltage_tries = 1; + max_vswing_tries = 0; + for (;;) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + drm_dp_link_train_clock_recovery_delay(&phytium_dp->aux, phytium_dp->dpcd); + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("clock revorery ok\n"); + return true; + } + + if (voltage_tries == 5) { + DRM_DEBUG_KMS("Same voltage tried 5 times\n"); + return false; + } + + if (max_vswing_tries == 1) { + DRM_DEBUG_KMS("Max Voltage Swing reached\n"); + return false; + } + + voltage = phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return false; + } + + if ((phytium_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) + ++voltage_tries; + else + voltage_tries = 1; + + if (phytium_dp->train_set[0] & DP_TRAIN_MAX_SWING_REACHED) + ++max_vswing_tries; + + DRM_DEBUG_KMS("try train_set:0x%x voltage_tries:%d max_vswing_tries:%d\n", + phytium_dp->train_set[0], voltage_tries, max_vswing_tries); + } +} + +static unsigned int phytium_dp_get_training_pattern(struct phytium_dp_device *phytium_dp) +{ + bool sink_tps3, sink_tps4; + + sink_tps4 = drm_dp_tps4_supported(phytium_dp->dpcd); + if (sink_tps4) + return DP_TRAINING_PATTERN_4; + else if (phytium_dp->link_rate == 810000) + DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n"); + + sink_tps3 = drm_dp_tps3_supported(phytium_dp->dpcd); + if (sink_tps3) + return DP_TRAINING_PATTERN_3; + else if (phytium_dp->link_rate >= 540000) + DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + + return DP_TRAINING_PATTERN_2; +} + +static bool phytium_dp_link_training_channel_equalization(struct phytium_dp_device *phytium_dp) +{ + unsigned int training_pattern; + int tries, ret; + unsigned char link_status[DP_LINK_STATUS_SIZE]; + bool channel_eq = false; + + /* config source and sink's voltage swing and pre-emphasis(103-106), from clock recovery */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + return channel_eq; + } + + /* config source and sink's train_pattern x */ + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, training_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return channel_eq; + } + + for (tries = 0; tries < 5; tries++) { + drm_dp_link_train_channel_eq_delay(&phytium_dp->aux, phytium_dp->dpcd); + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + break; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("CR check failed, cannot continue channel equalization\n"); + break; + } + + if (drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + channel_eq = true; + DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); + break; + } + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_get_adjust_train(phytium_dp, link_status, phytium_dp->link_lane_count); + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + break; + } + } + + /* Try 5 times, else fail and try at lower BW */ + if (tries == 5) + DRM_DEBUG_KMS("Channel equalization failed 5 times\n"); + + return channel_eq; +} + +static void phytium_dp_train_retry_work_fn(struct work_struct *work) +{ + struct phytium_dp_device *phytium_dp = train_retry_to_dp_device(work); + struct drm_connector *connector; + + connector = &phytium_dp->connector; + DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name); + mutex_lock(&connector->dev->mode_config.mutex); + drm_connector_set_link_status_property(connector, DRM_MODE_LINK_STATUS_BAD); + mutex_unlock(&connector->dev->mode_config.mutex); + drm_kms_helper_hotplug_event(connector->dev); +} + +/* return index of rate in rates array, or -1 if not found */ +static int phytium_dp_rate_index(const int *rates, int len, int rate) +{ + int i; + + for (i = 0; i < len; i++) + if (rate == rates[i]) + return i; + + return -1; +} + +int phytium_dp_get_link_train_fallback_values(struct phytium_dp_device *phytium_dp) +{ + int index, ret = 0; + + if (phytium_dp->is_edp) { + phytium_dp->train_retry_count++; + DRM_INFO("Retrying Link training for eDP(%d) with same parameters\n", + phytium_dp->port); + goto out; + } else { + index = phytium_dp_rate_index(phytium_dp->common_rates, + phytium_dp->num_common_rates, + phytium_dp->link_rate); + if (index > 0) { + phytium_dp->link_rate = phytium_dp->common_rates[index - 1]; + } else if (phytium_dp->link_lane_count > 1) { + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->link_lane_count >> 1; + } else { + phytium_dp->train_retry_count++; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_INFO("Retrying Link training for DP(%d) with maximal parameters\n", + phytium_dp->port); + ret = -1; + } + } + +out: + return ret; +} + +static int +phytium_dp_stop_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret; + + /* config source and sink's train_pattern x: DP_TRAINING_PATTERN_DISABLE */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + ret = phytium_dp_dpcd_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + if (ret < 0) { + DRM_NOTE("phytium_dp_dpcd_set_train_pattern fail: ret:%d\n", ret); + return ret; + } + + return 0; +} + +int phytium_dp_start_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + + phytium_dp_hw_disable_output(phytium_dp); + phytium_dp_hw_disable_input_source(phytium_dp); + phytium_dp_hw_disable_video(phytium_dp); + phytium_dp_hw_enable_input_source(phytium_dp); + phytium_dp_hw_enable_output(phytium_dp); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_OFF); + phytium_dp_dpcd_sink_dpms(phytium_dp, DRM_MODE_DPMS_ON); + + if (!phytium_dp_link_training_clock_recovery(phytium_dp)) + goto failure_handling; + + if (!phytium_dp_link_training_channel_equalization(phytium_dp)) + goto failure_handling; + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + if (phytium_dp->trigger_train_fail) { + phytium_dp->trigger_train_fail--; + goto failure_handling; + } + phytium_dp->train_retry_count = 0; + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Pass at Link Rate = %d, Lane count = %d\n", + phytium_dp->connector.base.id, + phytium_dp->connector.name, phytium_dp->link_rate, + phytium_dp->link_lane_count); + + return 0; + +failure_handling: + DRM_INFO("[CONNECTOR:%d:%s] Link Training failed at Link Rate = %d, Lane count = %d", + phytium_dp->connector.base.id, + phytium_dp->connector.name, + phytium_dp->link_rate, phytium_dp->link_lane_count); + + ret = phytium_dp_stop_link_train(phytium_dp); + if (ret < 0) { + DRM_NOTE("phytium_dp_stop_link_train failed: ret = %d\n", ret); + goto out; + } + + phytium_dp_get_link_train_fallback_values(phytium_dp); + if (phytium_dp->train_retry_count < 5) + schedule_work(&phytium_dp->train_retry_work); + else + DRM_ERROR("DP(%d) Link Training Unsuccessful, and stop Training\n", + phytium_dp->port); + +out: + return -1; +} + +static bool phytium_dp_needs_link_retrain(struct phytium_dp_device *phytium_dp) +{ + unsigned char link_status[DP_LINK_STATUS_SIZE]; + int ret = 0; + + /* get link status 0x202-0x207 */ + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return true; + } + + if ((phytium_dp->link_rate == 0) || (phytium_dp->link_lane_count == 0)) { + DRM_DEBUG_KMS("link_rate(%d) or lane_count(%d) is invalid\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + return true; + } + + /* Make sure clock is still ok */ + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Clock recovery check failed\n"); + return true; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("Channel EQ check failed\n"); + return true; + } + + if (!phytium_dp_hw_output_is_enable(phytium_dp)) { + DRM_DEBUG_KMS("check DP output enable failed\n"); + return true; + } + return false; +} + +static bool +phytium_dp_get_sink_irq(struct phytium_dp_device *phytium_dp, u8 *sink_irq_vector) +{ + return drm_dp_dpcd_readb(&phytium_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector) == 1; +} + +static uint8_t phytium_dp_autotest_phy_pattern(struct phytium_dp_device *phytium_dp) +{ + union phytium_phy_tp phytium_phy_tp; + int ret; + unsigned char test_80_bit_pattern[ + (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - + DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; + unsigned char test_pattern; + unsigned int offset; + + offset = DP_PHY_TEST_PATTERN; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, offset, + &phytium_phy_tp.raw, + sizeof(phytium_phy_tp)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + + test_pattern = phytium_phy_tp.bits.PATTERN; + + if (test_pattern == PHYTIUM_PHY_TP_80BIT_CUSTOM) { + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + if (ret <= 0) { + DRM_DEBUG_KMS("Could not read DP_TEST_PHY_PATTERN\n"); + goto failed; + } + } + + /* config source and sink's link rate and link count */ + ret = phytium_dp_dpcd_get_tp_link(phytium_dp, &phytium_dp->compliance.test_lane_count, + &phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_tp_link fail: ret:%d\n", ret); + goto failed; + } + + phytium_dp_hw_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + ret = phytium_dp_dpcd_set_link(phytium_dp, phytium_dp->compliance.test_lane_count, + phytium_dp->compliance.test_link_rate); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_link fail: ret:%d\n", ret); + goto failed_dpcd_set_link; + } + + /* config source and sink's lane setting: voltage swing and pre-emphasis */ + ret = phytium_dp_dpcd_get_adjust_request(phytium_dp, + phytium_dp->compliance.test_lane_count); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_get_adjust_request fail: ret:%d\n", ret); + goto failed_dpcd_get_adjust_request; + } + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->compliance.test_link_rate, + phytium_dp->train_set[0]); + ret = phytium_dp_dpcd_set_lane_setting(phytium_dp, phytium_dp->train_set); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_lane_setting fail: ret:%d\n", ret); + goto failed_dpcd_set_lane_setting; + } + + /* config test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + test_pattern, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); + ret = phytium_dp_dpcd_set_test_pattern(phytium_dp, test_pattern); + if (ret < 0) { + DRM_ERROR("phytium_dp_dpcd_set_test_pattern fail: ret:%d\n", ret); + goto failed_dpcd_set_tp; + } + + return DP_TEST_ACK; + +failed_dpcd_set_tp: + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->compliance.test_lane_count, + PHYTIUM_PHY_TP_NONE, test_80_bit_pattern, + sizeof(test_80_bit_pattern)); +failed_dpcd_set_link: +failed_dpcd_set_lane_setting: +failed_dpcd_get_adjust_request: +failed: + return DP_TEST_NAK; +} + +static void phytium_dp_handle_test_request(struct phytium_dp_device *phytium_dp) +{ + uint8_t response = DP_TEST_NAK; + uint8_t request = 0; + int status; + + status = drm_dp_dpcd_readb(&phytium_dp->aux, DP_TEST_REQUEST, &request); + if (status <= 0) { + DRM_DEBUG_KMS("Could not read test request from sink\n"); + goto update_status; + } + + switch (request) { + case DP_TEST_LINK_TRAINING: + case DP_TEST_LINK_VIDEO_PATTERN: + case DP_TEST_LINK_EDID_READ: + DRM_DEBUG_KMS("Not support test request '%02x'\n", request); + response = DP_TEST_NAK; + break; + case DP_TEST_LINK_PHY_TEST_PATTERN: + DRM_DEBUG_KMS("PHY_PATTERN test requested\n"); + response = phytium_dp_autotest_phy_pattern(phytium_dp); + break; + default: + DRM_DEBUG_KMS("Invalid test request '%02x'\n", request); + break; + } + +update_status: + status = drm_dp_dpcd_writeb(&phytium_dp->aux, DP_TEST_RESPONSE, response); + if (status <= 0) + DRM_DEBUG_KMS("Could not write test response to sink\n"); + +} + +static int phytium_dp_long_pulse(struct drm_connector *connector, bool hpd_raw_state) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + bool video_enable = false; + uint32_t index = 0; + + if (phytium_dp->is_edp) + status = connector_status_connected; + else if (hpd_raw_state) { + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + } else { + status = connector_status_disconnected; + goto out; + } + + if (!phytium_dp->is_edp) { + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) + goto out; + + index = phytium_dp->num_common_rates-1; + phytium_dp->max_link_rate = phytium_dp->common_rates[index]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + } + +out: + return status; +} + +static int phytium_dp_short_pulse(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + enum drm_connector_status status = connector->status; + u8 sink_irq_vector = 0; + bool video_enable = false; + + /* handle the test pattern */ + if (phytium_dp_get_sink_irq(phytium_dp, &sink_irq_vector) && + sink_irq_vector != 0) { + drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_DEVICE_SERVICE_IRQ_VECTOR, + sink_irq_vector); + if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) + phytium_dp_handle_test_request(phytium_dp); + if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) + DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); + } + if (!phytium_dp_needs_link_retrain(phytium_dp)) { + status = connector_status_connected; + goto out; + } + + video_enable = phytium_dp_hw_video_is_enable(phytium_dp); + phytium_dp_start_link_train(phytium_dp); + if (video_enable) { + mdelay(2); + phytium_dp_hw_enable_video(phytium_dp); + } + +out: + return status; +} + +void phytium_dp_hpd_poll_handler(struct phytium_display_private *priv) +{ + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug poll functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); +} + +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + struct phytium_display_private *priv = dev->dev_private; + bool handler = false; + bool hpd_raw_state_old = false; + + /* We might have missed any hotplugs that happened, so polling and handler */ + if (enable) { + spin_lock_irq(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (!phytium_dp->dp_hpd_state.hpd_irq_enable) { + hpd_raw_state_old = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state + || (hpd_raw_state_old != phytium_dp->dp_hpd_state.hpd_raw_state)) { + handler = true; + } + } + } + spin_unlock_irq(&priv->hotplug_irq_lock); + if (handler) + phytium_dp_hpd_poll_handler(priv); + } + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + phytium_dp_hw_hpd_irq_setup(phytium_dp, enable); + } +} + +void phytium_dp_hpd_work_func(struct work_struct *work) +{ + struct phytium_display_private *priv = + container_of(work, struct phytium_display_private, hotplug_work); + struct drm_device *dev = priv->dev; + struct drm_connector_list_iter conn_iter; + struct drm_connector *connector; + enum drm_connector_status old_status; + bool changed = false; + + mutex_lock(&dev->mode_config.mutex); + DRM_DEBUG_KMS("running encoder hotplug work functions\n"); + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->force) + continue; + old_status = connector->status; + connector->status = drm_helper_probe_detect(connector, NULL, false); + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", + connector->base.id, + connector->name, + old, new); + changed = true; + } + } + drm_connector_list_iter_end(&conn_iter); + mutex_unlock(&dev->mode_config.mutex); + + if (changed) + drm_kms_helper_hotplug_event(dev); + + phytium_dp_hpd_irq_setup(dev, true); +} + +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv) +{ + struct drm_encoder *encoder = NULL; + struct phytium_dp_device *phytium_dp = NULL; + struct drm_device *dev = priv->dev; + bool handler = false; + + spin_lock(&priv->hotplug_irq_lock); + + drm_for_each_encoder(encoder, dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->dp_hpd_state.hpd_irq_enable) { + phytium_dp_hw_get_hpd_state(phytium_dp); + if (phytium_dp->dp_hpd_state.hpd_event_state + || phytium_dp->dp_hpd_state.hpd_irq_state) { + handler = true; + } + } + } + spin_unlock(&priv->hotplug_irq_lock); + + if (handler) { + phytium_dp_hpd_irq_setup(dev, false); + schedule_work(&priv->hotplug_work); + return IRQ_HANDLED; + } + return IRQ_NONE; +} + + +static void phytium_dp_fast_link_train_detect(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->fast_train_support = !!(phytium_dp->dpcd[DP_MAX_DOWNSPREAD] + & DP_NO_AUX_HANDSHAKE_LINK_TRAINING); + DRM_DEBUG_KMS("fast link training %s\n", + phytium_dp->fast_train_support ? "supported" : "unsupported"); +} + +bool phytium_dp_fast_link_train(struct phytium_dp_device *phytium_dp) +{ + int ret = 0; + unsigned int training_pattern; + + /* clear the test pattern */ + phytium_dp_hw_set_test_pattern(phytium_dp, phytium_dp->link_lane_count, + PHYTIUM_PHY_TP_NONE, NULL, 0); + + /* config source and sink's link rate and lane count */ + phytium_dp_hw_set_link(phytium_dp, phytium_dp->link_lane_count, phytium_dp->link_rate); + + /* config source and sink's voltage swing and pre-emphasis(103-106) */ + phytium_dp_hw_set_lane_setting(phytium_dp, phytium_dp->link_rate, + phytium_dp->train_set[0]); + + /* config train pattern */ + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_1); + usleep_range(500, 600); + + training_pattern = phytium_dp_get_training_pattern(phytium_dp); + phytium_dp_hw_set_train_pattern(phytium_dp, training_pattern); + usleep_range(500, 600); + + phytium_dp_hw_set_train_pattern(phytium_dp, DP_TRAINING_PATTERN_DISABLE); + + if (dc_fast_training_check) { + unsigned char link_status[DP_LINK_STATUS_SIZE]; + + ret = drm_dp_dpcd_read(&phytium_dp->aux, DP_LANE0_1_STATUS, + link_status, DP_LINK_STATUS_SIZE); + if (ret < 0) { + DRM_ERROR("failed to get link status(DP_LANE0_1_STATUS)\n"); + return false; + } + + if (!drm_dp_clock_recovery_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check clock recovery failed\n"); + return false; + } + + if (!drm_dp_channel_eq_ok(link_status, phytium_dp->link_lane_count)) { + DRM_DEBUG_KMS("check channel equalization failed\n"); + return false; + } + } + + return true; +} + +static enum drm_connector_status +phytium_connector_detect(struct drm_connector *connector, bool force) +{ + enum drm_connector_status status = connector->status; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + bool hpd_event_state, hpd_irq_state, hpd_raw_state; + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + bool plugged = true; + + spin_lock_irq(&priv->hotplug_irq_lock); + hpd_event_state = phytium_dp->dp_hpd_state.hpd_event_state; + hpd_irq_state = phytium_dp->dp_hpd_state.hpd_irq_state; + hpd_raw_state = phytium_dp->dp_hpd_state.hpd_raw_state; + phytium_dp->dp_hpd_state.hpd_event_state = false; + phytium_dp->dp_hpd_state.hpd_irq_state = false; + spin_unlock_irq(&priv->hotplug_irq_lock); + + if (hpd_event_state) + status = phytium_dp_long_pulse(connector, hpd_raw_state); + + if (hpd_irq_state) + status = phytium_dp_short_pulse(connector); + + if (status == connector_status_unknown) + status = connector_status_disconnected; + + if ((!phytium_dp->is_edp) && (!hpd_raw_state)) + status = connector_status_disconnected; + + if (connector->status != status) { + if ((status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + } + + return status; +} + +static void +phytium_connector_destroy(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + drm_connector_cleanup(connector); + kfree(phytium_dp); +} + +static int +phytium_dp_connector_register(struct drm_connector *connector) +{ + int ret; + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + phytium_dp_aux_init(phytium_dp); + if (phytium_dp->is_edp) { + phytium_edp_init_connector(phytium_dp); + ret = phytium_edp_backlight_device_register(phytium_dp); + if (ret) + DRM_ERROR("failed to register port(%d) backlight device(ret=%d)\n", + phytium_dp->port, ret); + } + + ret = phytium_debugfs_connector_add(connector); + if (ret) + DRM_ERROR("failed to register phytium connector debugfs(ret=%d)\n", ret); + + return 0; +} + +static void +phytium_dp_connector_unregister(struct drm_connector *connector) +{ + struct phytium_dp_device *phytium_dp = connector_to_dp_device(connector); + + if (phytium_dp->is_edp) { + phytium_edp_backlight_device_unregister(phytium_dp); + phytium_edp_fini_connector(phytium_dp); + } + drm_dp_aux_unregister(&phytium_dp->aux); +} + +static const struct drm_connector_funcs phytium_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = phytium_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = phytium_connector_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .late_register = phytium_dp_connector_register, + .early_unregister = phytium_dp_connector_unregister, +}; + +static void phytium_dp_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted) +{ + struct phytium_dp_device *dp = encoder_to_dp_device(encoder); + + drm_mode_copy(&dp->mode, adjusted); +} + +static void phytium_edp_panel_poweron(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweron(&phytium_dp->panel); +} + +static void phytium_edp_panel_poweroff(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_poweroff(&phytium_dp->panel); +} + +static void phytium_edp_backlight_on(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_enable_backlight(&phytium_dp->panel); +} + +static void phytium_edp_backlight_off(struct phytium_dp_device *phytium_dp) +{ + phytium_panel_disable_backlight(&phytium_dp->panel); +} + +static void phytium_encoder_disable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + if (phytium_dp->is_edp) + phytium_edp_backlight_off(phytium_dp); + + phytium_dp_hw_disable_video(phytium_dp); + + mdelay(50); + + if (phytium_dp->is_edp) + phytium_edp_panel_poweroff(phytium_dp); +} + +void phytium_dp_adjust_link_train_parameter(struct phytium_dp_device *phytium_dp) +{ + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned long link_bw, date_rate = 0, bs_limit, bs_request; + int rate = 0; + + bs_request = phytium_dp->mode.crtc_htotal/(phytium_dp->mode.crtc_clock/1000); + date_rate = (phytium_dp->mode.crtc_clock * display_info->bpc * 3)/8; + + for (;;) { + bs_limit = 8192 / (phytium_dp->link_rate/1000); + link_bw = phytium_dp->link_rate * phytium_dp->link_lane_count; + rate = 10 * date_rate / link_bw; + DRM_DEBUG_KMS("adjust link rate(%d), lane count(%d)\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); + DRM_DEBUG_KMS("for crtc_clock(%d) bs_request(%ld) bs_limit(%ld) rate(%d)\n", + phytium_dp->mode.crtc_clock, bs_request, bs_limit, rate); + if ((link_dynamic_adjust && (bs_request < bs_limit) && rate < 10) || + ((!link_dynamic_adjust) && (rate < 10))) + break; + phytium_dp_get_link_train_fallback_values(phytium_dp); + } + + DRM_DEBUG_KMS("Try link training at Link Rate = %d, Lane count = %d\n", + phytium_dp->link_rate, phytium_dp->link_lane_count); +} + +static void phytium_encoder_enable(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + int ret = 0; + + phytium_dp_hw_disable_video(phytium_dp); + + if (phytium_dp->is_edp) { + phytium_edp_panel_poweron(phytium_dp); + if (phytium_dp->fast_train_support) + phytium_dp_fast_link_train(phytium_dp); + else + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + phytium_dp_fast_link_train_detect(phytium_dp); + } else { + phytium_dp_adjust_link_train_parameter(phytium_dp); + ret = phytium_dp_start_link_train(phytium_dp); + mdelay(2); + } + + phytium_dp_hw_config_video(phytium_dp); + if (ret == 0) { + phytium_dp_hw_enable_video(phytium_dp); + if (phytium_dp->has_audio) + phytium_dp_hw_enable_audio(phytium_dp); + } + + if (phytium_dp->is_edp) + phytium_edp_backlight_on(phytium_dp); + +} + +enum drm_mode_status +phytium_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + struct drm_display_info *display_info = &phytium_dp->connector.display_info; + unsigned int requested, actual; + + switch (display_info->bpc) { + case 10: + case 6: + case 8: + break; + default: + DRM_INFO("not support bpc(%d)\n", display_info->bpc); + display_info->bpc = 8; + break; + } + + if ((display_info->color_formats & DRM_COLOR_FORMAT_RGB444) == 0) { + DRM_INFO("not support color_format(%d)\n", display_info->color_formats); + display_info->color_formats = DRM_COLOR_FORMAT_RGB444; + } + + requested = mode->clock * display_info->bpc * 3 / 1000; + actual = phytium_dp->max_link_rate * phytium_dp->max_link_lane_count / 100; + actual = actual * 8 / 10; + if (requested >= actual) { + DRM_DEBUG_KMS("requested=%d, actual=%d, clock=%d\n", requested, actual, + mode->clock); + return MODE_CLOCK_HIGH; + } + + if (dc_fake_mode_enable && + (phytium_dp->native_mode.clock == mode->clock) && + (phytium_dp->native_mode.htotal == mode->htotal) && + (phytium_dp->native_mode.vtotal == mode->vtotal)) + return MODE_OK; + + if ((mode->hdisplay == 1600) && (mode->vdisplay == 900)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay == 1024) && (mode->clock > 78000)) + return MODE_BAD_HVALUE; + + if ((mode->hdisplay < 640) || (mode->vdisplay < 480)) + return MODE_BAD_HVALUE; + + return MODE_OK; +} + +static const struct drm_encoder_helper_funcs phytium_encoder_helper_funcs = { + .mode_set = phytium_dp_encoder_mode_set, + .disable = phytium_encoder_disable, + .enable = phytium_encoder_enable, + .mode_valid = phytium_encoder_mode_valid, +}; + +void phytium_dp_encoder_destroy(struct drm_encoder *encoder) +{ + struct phytium_dp_device *phytium_dp = encoder_to_dp_device(encoder); + + phytium_dp_audio_codec_fini(phytium_dp); + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs phytium_encoder_funcs = { + .destroy = phytium_dp_encoder_destroy, +}; + +static const struct dp_audio_n_m phytium_dp_audio_n_m[] = { + { 32000, 162000, 1024, 10125 }, + { 44100, 162000, 784, 5625 }, + { 48000, 162000, 512, 3375 }, + { 64000, 162000, 2048, 10125 }, + { 88200, 162000, 1568, 5625 }, + { 96000, 162000, 1024, 3375 }, + { 128000, 162000, 4096, 10125 }, + { 176400, 162000, 3136, 5625 }, + { 192000, 162000, 2048, 3375 }, + { 32000, 270000, 1024, 16875 }, + { 44100, 270000, 784, 9375 }, + { 48000, 270000, 512, 5625 }, + { 64000, 270000, 2048, 16875 }, + { 88200, 270000, 1568, 9375 }, + { 96000, 270000, 1024, 5625 }, + { 128000, 270000, 4096, 16875 }, + { 176400, 270000, 3136, 9375 }, + { 192000, 270000, 2048, 5625 }, + { 32000, 540000, 1024, 33750 }, + { 44100, 540000, 784, 18750 }, + { 48000, 540000, 512, 11250 }, + { 64000, 540000, 2048, 33750 }, + { 88200, 540000, 1568, 18750 }, + { 96000, 540000, 1024, 11250 }, + { 128000, 540000, 4096, 33750 }, + { 176400, 540000, 3136, 18750 }, + { 192000, 540000, 2048, 11250 }, + { 32000, 810000, 1024, 50625 }, + { 44100, 810000, 784, 28125 }, + { 48000, 810000, 512, 16875 }, + { 64000, 810000, 2048, 50625 }, + { 88200, 810000, 1568, 28125 }, + { 96000, 810000, 1024, 16875 }, + { 128000, 810000, 4096, 50625 }, + { 176400, 810000, 3136, 28125 }, + { 192000, 810000, 2048, 16875 }, +}; + +static int phytium_dp_audio_get_eld(struct device *dev, void *data, u8 *buf, size_t len) +{ + struct phytium_dp_device *phytium_dp = data; + + memcpy(buf, phytium_dp->connector.eld, min(sizeof(phytium_dp->connector.eld), len)); + + return 0; +} + +static int phytium_dp_audio_mute_stream(struct device *dev, void *data, bool enable, int direction) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_digital_mute(phytium_dp, enable); + + return 0; +} + +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(phytium_dp_audio_n_m); i++) { + if (sample_rate == phytium_dp_audio_n_m[i].sample_rate + && link_rate == phytium_dp_audio_n_m[i].link_rate) + return &phytium_dp_audio_n_m[i]; + } + + return NULL; +} + +static int phytium_dp_audio_hw_params(struct device *dev, void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct phytium_dp_device *phytium_dp = data; + int ret = 0; + struct audio_info audio_info = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .channels = params->channels, + }; + + if (daifmt->fmt != HDMI_I2S) { + DRM_ERROR("invalid audio format %d\n", daifmt->fmt); + ret = -EINVAL; + goto failed; + } + + ret = phytium_dp_hw_audio_hw_params(phytium_dp, audio_info); + +failed: + return ret; +} + +static void phytium_dp_audio_shutdown(struct device *dev, void *data) +{ + struct phytium_dp_device *phytium_dp = data; + + phytium_dp_hw_audio_shutdown(phytium_dp); +} + +static void handle_plugged_change(struct phytium_dp_device *phytium_dp, bool plugged) +{ + if (phytium_dp->plugged_cb && phytium_dp->codec_dev) + phytium_dp->plugged_cb(phytium_dp->codec_dev, plugged); +} + +static int phytium_dp_audio_hook_plugged_cb(struct device *dev, void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct phytium_dp_device *phytium_dp = data; + bool plugged; + + phytium_dp->plugged_cb = fn; + phytium_dp->codec_dev = codec_dev; + + if ((phytium_dp->connector.status == connector_status_connected) && phytium_dp->has_audio) + plugged = true; + else + plugged = false; + + handle_plugged_change(phytium_dp, plugged); + return 0; +} + + +static const struct hdmi_codec_ops phytium_audio_codec_ops = { + .hw_params = phytium_dp_audio_hw_params, + .audio_shutdown = phytium_dp_audio_shutdown, + .mute_stream = phytium_dp_audio_mute_stream, + .get_eld = phytium_dp_audio_get_eld, + .hook_plugged_cb = phytium_dp_audio_hook_plugged_cb, +}; + +static int phytium_dp_audio_codec_init(struct phytium_dp_device *phytium_dp) +{ + struct device *dev = phytium_dp->dev->dev; + struct hdmi_codec_pdata codec_data = { + .i2s = 1, + .spdif = 0, + .ops = &phytium_audio_codec_ops, + .max_i2s_channels = 2, + .data = phytium_dp, + }; + + phytium_dp->audio_pdev = platform_device_register_data(dev, HDMI_CODEC_DRV_NAME, + codec_id, + &codec_data, sizeof(codec_data)); + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + codec_id += 1; + + return PTR_ERR_OR_ZERO(phytium_dp->audio_pdev); +} + +static void phytium_dp_audio_codec_fini(struct phytium_dp_device *phytium_dp) +{ + + if (!PTR_ERR_OR_ZERO(phytium_dp->audio_pdev)) + platform_device_unregister(phytium_dp->audio_pdev); + phytium_dp->audio_pdev = NULL; + codec_id -= 1; +} + +static long phytium_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) +{ + struct phytium_dp_device *phytium_dp = container_of(aux, struct phytium_dp_device, aux); + long ret = 0; + + DRM_DEBUG_KMS("msg->size: 0x%lx\n", msg->size); + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = phytium_dp_hw_aux_transfer_write(phytium_dp, msg); + DRM_DEBUG_KMS("aux write reply:0x%x ret:0x%lx\n", msg->reply, ret); + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + ret = phytium_dp_hw_aux_transfer_read(phytium_dp, msg); + DRM_DEBUG_KMS("aux read ret:0x%lx\n", ret); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static void phytium_dp_aux_init(struct phytium_dp_device *phytium_dp) +{ + drm_dp_aux_init(&phytium_dp->aux); + phytium_dp->aux.name = kasprintf(GFP_KERNEL, "dp-%d", phytium_dp->port); + phytium_dp->aux.transfer = phytium_dp_aux_transfer; +} + +int phytium_get_encoder_crtc_mask(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int i, mask = 0; + + for_each_pipe_masked(priv, i) { + if (i != port) + mask++; + else + break; + } + + return BIT(mask); +} + +static bool phytium_dp_is_edp(struct phytium_dp_device *phytium_dp, int port) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + + if (priv->info.edp_mask & BIT(port)) + return true; + else + return false; +} + +static bool phytium_edp_init_connector(struct phytium_dp_device *phytium_dp) +{ + enum drm_connector_status status; + struct drm_connector *connector = &phytium_dp->connector; + + phytium_edp_panel_poweron(phytium_dp); + + status = phytium_dp_detect_dpcd(phytium_dp); + if (status == connector_status_disconnected) { + DRM_ERROR("detect edp dpcd failed\n"); + return false; + } + + phytium_dp->edp_edid = drm_get_edid(connector, &phytium_dp->aux.ddc); + if (!phytium_dp->edp_edid) { + DRM_ERROR("get edp edid failed\n"); + return false; + } + + connector->status = status; + phytium_dp->max_link_rate = phytium_dp->common_rates[phytium_dp->num_common_rates-1]; + phytium_dp->max_link_lane_count = phytium_dp->common_max_lane_count; + phytium_dp->link_rate = phytium_dp->max_link_rate; + phytium_dp->link_lane_count = phytium_dp->max_link_lane_count; + DRM_DEBUG_KMS("common_max_lane_count: %d, common_max_rate:%d\n", + phytium_dp->max_link_lane_count, phytium_dp->max_link_rate); + + return true; +} + +static void phytium_edp_fini_connector(struct phytium_dp_device *phytium_dp) +{ + kfree(phytium_dp->edp_edid); + + phytium_dp->edp_edid = NULL; + phytium_edp_panel_poweroff(phytium_dp); +} + +int phytium_dp_resume(struct drm_device *drm_dev) +{ + struct phytium_dp_device *phytium_dp; + struct drm_encoder *encoder; + int ret = 0; + + drm_for_each_encoder(encoder, drm_dev) { + phytium_dp = encoder_to_dp_device(encoder); + if (phytium_dp->is_edp) { + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + return -EIO; + } + } + + return 0; +} + +int phytium_dp_init(struct drm_device *dev, int port) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_dp_device *phytium_dp = NULL; + int ret, type; + + DRM_DEBUG_KMS("%s: port %d\n", __func__, port); + phytium_dp = kzalloc(sizeof(*phytium_dp), GFP_KERNEL); + if (!phytium_dp) { + ret = -ENOMEM; + goto failed_malloc_dp; + } + + phytium_dp->dev = dev; + phytium_dp->port = port; + + if (IS_PX210(priv)) { + px210_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PX210_DP_BASE(port); + priv->phy_access_base[port] = PX210_PHY_ACCESS_BASE(port); + } else if (IS_PE220X(priv)) { + pe220x_dp_func_register(phytium_dp); + priv->dp_reg_base[port] = PE220X_DP_BASE(port); + priv->phy_access_base[port] = PE220X_PHY_ACCESS_BASE(port); + } + + if (phytium_dp_is_edp(phytium_dp, port)) { + phytium_dp->is_edp = true; + type = DRM_MODE_CONNECTOR_eDP; + phytium_dp_panel_init_backlight_funcs(phytium_dp); + phytium_edp_backlight_off(phytium_dp); + phytium_edp_panel_poweroff(phytium_dp); + } else { + phytium_dp->is_edp = false; + type = DRM_MODE_CONNECTOR_DisplayPort; + } + + ret = phytium_dp_hw_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize dp %d\n", phytium_dp->port); + goto failed_init_dp; + } + + ret = drm_encoder_init(dev, &phytium_dp->encoder, + &phytium_encoder_funcs, + DRM_MODE_ENCODER_TMDS, "DP %d", port); + if (ret) { + DRM_ERROR("failed to initialize encoder with drm\n"); + goto failed_encoder_init; + } + drm_encoder_helper_add(&phytium_dp->encoder, &phytium_encoder_helper_funcs); + phytium_dp->encoder.possible_crtcs = phytium_get_encoder_crtc_mask(phytium_dp, port); + + phytium_dp->connector.dpms = DRM_MODE_DPMS_OFF; + phytium_dp->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; + ret = drm_connector_init(dev, &phytium_dp->connector, &phytium_connector_funcs, + type); + if (ret) { + DRM_ERROR("failed to initialize connector with drm\n"); + goto failed_connector_init; + } + drm_connector_helper_add(&phytium_dp->connector, &phytium_connector_helper_funcs); + drm_connector_attach_encoder(&phytium_dp->connector, &phytium_dp->encoder); + + ret = phytium_dp_audio_codec_init(phytium_dp); + if (ret) { + DRM_ERROR("failed to initialize audio codec\n"); + goto failed_connector_init; + } + + phytium_dp->train_retry_count = 0; + INIT_WORK(&phytium_dp->train_retry_work, phytium_dp_train_retry_work_fn); + drm_connector_register(&phytium_dp->connector); + + return 0; +failed_connector_init: +failed_encoder_init: +failed_init_dp: + kfree(phytium_dp); +failed_malloc_dp: + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_dp.h b/drivers/gpu/drm/phytium/phytium_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..ada3f42a68684243bcd45577e068815f9b540aca --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_dp.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_DP_H__ +#define __PHYTIUM_DP_H__ + +#include +#include +#include +#include + +struct phytium_dp_device; + +#include "phytium_panel.h" + +struct audio_info { + int sample_rate; + int channels; + int sample_width; +}; + +struct dp_audio_n_m { + int sample_rate; + int link_rate; + u16 m; + u16 n; +}; + +struct phytium_dp_compliance { + unsigned long test_type; + uint32_t test_link_rate; + u8 test_lane_count; + bool test_active; + u8 reserve[2]; +}; + +struct phytium_dp_func { + uint8_t (*dp_hw_get_source_lane_count)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_reset)(struct phytium_dp_device *phytium_dp); + bool (*dp_hw_spread_is_enable)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_set_backlight)(struct phytium_dp_device *phytium_dp, uint32_t level); + uint32_t (*dp_hw_get_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_disable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_enable_backlight)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweroff_panel)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_poweron_panel)(struct phytium_dp_device *phytium_dp); + int (*dp_hw_init_phy)(struct phytium_dp_device *phytium_dp); + void (*dp_hw_set_phy_lane_setting)(struct phytium_dp_device *phytium_dp, + uint32_t link_rate, uint8_t train_set); + int (*dp_hw_set_phy_lane_and_rate)(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate); +}; + +struct phytium_dp_hpd_state { + bool hpd_event_state; + bool hpd_irq_state; + bool hpd_raw_state; + bool hpd_irq_enable; +}; + +struct phytium_dp_device { + struct drm_device *dev; + struct drm_encoder encoder; + struct drm_connector connector; + int port; + struct drm_display_mode mode; + bool link_trained; + bool detect_done; + bool is_edp; + bool reserve0; + struct drm_dp_aux aux; + unsigned char dpcd[DP_RECEIVER_CAP_SIZE]; + uint8_t edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE]; + unsigned char downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + unsigned char sink_count; + + int *source_rates; + int num_source_rates; + int sink_rates[DP_MAX_SUPPORTED_RATES]; + int num_sink_rates; + int common_rates[DP_MAX_SUPPORTED_RATES]; + int num_common_rates; + + int source_max_lane_count; + int sink_max_lane_count; + int common_max_lane_count; + + int max_link_rate; + int max_link_lane_count; + int link_rate; + int link_lane_count; + struct work_struct train_retry_work; + int train_retry_count; + uint32_t trigger_train_fail; + + unsigned char train_set[4]; + struct edid *edp_edid; + bool has_audio; + bool fast_train_support; + bool hw_spread_enable; + bool reserve[1]; + struct platform_device *audio_pdev; + struct audio_info audio_info; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct phytium_dp_compliance compliance; + struct phytium_dp_func *funcs; + struct phytium_dp_hpd_state dp_hpd_state; + + struct phytium_panel panel; + struct drm_display_mode native_mode; +}; + +union phytium_phy_tp { + struct { + /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 + * and 3 bits for DP1.2. + */ + uint8_t PATTERN :3; + uint8_t RESERVED :5; + } bits; + uint8_t raw; +}; + +/* PHY test patterns + * The order of test patterns follows DPCD register PHY_TEST_PATTERN (0x248) + */ +enum phytium_dpcd_phy_tp { + PHYTIUM_PHY_TP_NONE = 0, + PHYTIUM_PHY_TP_D10_2, + PHYTIUM_PHY_TP_SYMBOL_ERROR, + PHYTIUM_PHY_TP_PRBS7, + PHYTIUM_PHY_TP_80BIT_CUSTOM, + PHYTIUM_PHY_TP_CP2520_1, + PHYTIUM_PHY_TP_CP2520_2, + PHYTIUM_PHY_TP_CP2520_3, +}; +#define PHYTIUM_DP_AUDIO_ID (('P' << 24) + ('H' << 16) + ('Y' << 8)) +#define encoder_to_dp_device(x) container_of(x, struct phytium_dp_device, encoder) +#define connector_to_dp_device(x) container_of(x, struct phytium_dp_device, connector) +#define panel_to_dp_device(x) container_of(x, struct phytium_dp_device, panel) +#define train_retry_to_dp_device(x) container_of(x, struct phytium_dp_device, train_retry_work) +void phytium_phy_writel(struct phytium_dp_device *phytium_dp, uint32_t address, uint32_t data); +uint32_t phytium_phy_readl(struct phytium_dp_device *phytium_dp, uint32_t address); + +int phytium_dp_init(struct drm_device *dev, int pipe); +int phytium_dp_resume(struct drm_device *drm_dev); +void phytium_dp_hpd_irq_setup(struct drm_device *dev, bool enable); +irqreturn_t phytium_dp_hpd_irq_handler(struct phytium_display_private *priv); +void phytium_dp_hpd_work_func(struct work_struct *work); +const struct dp_audio_n_m *phytium_dp_audio_get_n_m(int link_rate, int sample_rate); +#endif /* __PHYTIUM_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fb.c b/drivers/gpu/drm/phytium/phytium_fb.c new file mode 100644 index 0000000000000000000000000000000000000000..879065964729e79326c2ecf4db6196ed70ea1f0b --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.c @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static int +phytium_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file_priv, + unsigned int *handle) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + + return drm_gem_handle_create(file_priv, &phytium_fb->phytium_gem_obj[0]->base, handle); +} + +static void phytium_fb_destroy(struct drm_framebuffer *fb) +{ + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + int i, num_planes; + struct drm_gem_object *obj = NULL; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + obj = &phytium_fb->phytium_gem_obj[i]->base; + if (obj) + drm_gem_object_put(obj); + } + + drm_framebuffer_cleanup(fb); + kfree(phytium_fb); +} + +static struct drm_framebuffer_funcs viv_fb_funcs = { + .create_handle = phytium_fb_create_handle, + .destroy = phytium_fb_destroy, +}; + +struct phytium_framebuffer * +phytium_fb_alloc(struct drm_device *dev, const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, unsigned int num_planes) +{ + struct phytium_framebuffer *phytium_fb; + int ret = 0, i; + + phytium_fb = kzalloc(sizeof(*phytium_fb), GFP_KERNEL); + if (!phytium_fb) + return ERR_PTR(-ENOMEM); + + drm_helper_mode_fill_fb_struct(dev, &phytium_fb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &phytium_fb->base, &viv_fb_funcs); + + if (ret) { + DRM_ERROR("Failed to initialize framebuffer: %d\n", ret); + kfree(phytium_fb); + return ERR_PTR(ret); + } + + for (i = 0; i < num_planes; i++) { + phytium_fb->phytium_gem_obj[i] = phytium_gem_obj[i]; + phytium_fb->base.obj[i] = &phytium_gem_obj[i]->base; + } + return phytium_fb; +} + +struct drm_framebuffer * +phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + int ret = 0, i, num_planes; + struct drm_gem_object *obj; + unsigned int hsub, vsub, size; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE] = {0}; + struct phytium_framebuffer *phytium_fb; + struct phytium_display_private *priv = dev->dev_private; + const struct drm_format_info *info; + + info = drm_format_info(mode_cmd->pixel_format); + hsub = info ? info->hsub : 1; + vsub = info ? info->vsub : 1; + num_planes = info ? info->num_planes : 1; + num_planes = min(num_planes, PHYTIUM_FORMAT_MAX_PLANE); + + for (i = 0; i < num_planes; i++) { + unsigned int height = mode_cmd->height / (i ? vsub : 1); + + size = height * mode_cmd->pitches[i] + mode_cmd->offsets[i]; + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); + if (!obj) { + DRM_ERROR("Failed to lookup GEM object\n"); + ret = -ENXIO; + goto error; + } + + if (obj->size < size) { + drm_gem_object_put(obj); + ret = -EINVAL; + goto error; + } + + phytium_gem_obj[i] = to_phytium_gem_obj(obj); + + ret = priv->dc_hw_fb_format_check(mode_cmd, i); + if (ret < 0) + goto error; + } + + phytium_fb = phytium_fb_alloc(dev, mode_cmd, phytium_gem_obj, i); + if (IS_ERR(phytium_fb)) { + DRM_DEBUG_KMS("phytium_fb_alloc failed\n"); + ret = PTR_ERR(phytium_fb); + goto error; + } + + return &phytium_fb->base; +error: + for (i--; i >= 0; i--) + drm_gem_object_put(&phytium_gem_obj[i]->base); + + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_fb.h b/drivers/gpu/drm/phytium/phytium_fb.h new file mode 100644 index 0000000000000000000000000000000000000000..e096aa30ccb508143ae8940a483eaf64aded4da1 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fb.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_FB_H__ +#define __PHYTIUM_FB_H__ + +#include + +struct phytium_framebuffer { + struct drm_framebuffer base; + struct phytium_gem_object *phytium_gem_obj[PHYTIUM_FORMAT_MAX_PLANE]; +}; + +#define to_phytium_framebuffer(fb) container_of(fb, struct phytium_framebuffer, base) + +struct phytium_framebuffer *phytium_fb_alloc(struct drm_device *dev, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct phytium_gem_object **phytium_gem_obj, + unsigned int num_planes); + +struct drm_framebuffer *phytium_fb_create(struct drm_device *dev, struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd); +#endif /* __PHYTIUM_FB_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.c b/drivers/gpu/drm/phytium/phytium_fbdev.c new file mode 100644 index 0000000000000000000000000000000000000000..e929ad281724ade0b47f826bb537b594d7a7af45 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" +#include "phytium_fb.h" + + +#define PHYTIUM_MAX_CONNECTOR 1 +#define helper_to_drm_private(x) container_of(x, struct phytium_display_private, fbdev_helper) + +static void phytium_fbdev_destroy(struct fb_info *info) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); +} + +static int phytium_fbdev_mmap(struct fb_info *info, struct vm_area_struct *vma) +{ + struct drm_fb_helper *helper = info->par; + struct phytium_display_private *priv = helper_to_drm_private(helper); + + return phytium_gem_mmap_obj(&priv->fbdev_phytium_gem->base, vma); +} + +static const struct fb_ops phytium_fbdev_ops = { + .owner = THIS_MODULE, + DRM_FB_HELPER_DEFAULT_OPS, + .fb_mmap = phytium_fbdev_mmap, + FB_DEFAULT_IOMEM_OPS, + .fb_destroy = phytium_fbdev_destroy, +}; + +static int +phytium_drm_fbdev_create(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) +{ + struct phytium_display_private *priv = helper_to_drm_private(helper); + struct drm_device *dev = helper->dev; + unsigned int bytes_per_pixel; + struct drm_mode_fb_cmd2 mode_cmd = {0}; + struct phytium_framebuffer *phytium_fb = NULL; + struct fb_info *fbi = NULL; + struct drm_framebuffer *fb = NULL; + size_t size = 0; + int ret = 0; + unsigned long offset; + + bytes_per_pixel = DIV_ROUND_UP(sizes->surface_bpp, 8); + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + mode_cmd.pitches[0] = ALIGN(sizes->surface_width * bytes_per_pixel, 128); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, sizes->surface_depth); + size = PAGE_ALIGN(mode_cmd.pitches[0] * mode_cmd.height); + + ret = mutex_lock_interruptible(&dev->struct_mutex); + if (ret < 0) { + DRM_ERROR("failed to get mutex lock\n"); + return ret; + } + + priv->fbdev_phytium_gem = phytium_gem_create_object(dev, size); + if (!priv->fbdev_phytium_gem) { + DRM_ERROR("failed to create gem object\n"); + return -ENOMEM; + } + mutex_unlock(&dev->struct_mutex); + + fbi = drm_fb_helper_alloc_info(helper); + if (IS_ERR(fbi)) { + DRM_DEV_ERROR(dev->dev, "Failed to create framebuffer info."); + ret = PTR_ERR(fbi); + goto out; + } + + phytium_fb = phytium_fb_alloc(dev, &mode_cmd, &priv->fbdev_phytium_gem, 1); + if (IS_ERR(phytium_fb)) { + DRM_DEV_ERROR(dev->dev, "Failed to alloc DRM framebuffer.\n"); + ret = PTR_ERR(phytium_fb); + goto out; + } + + helper->fb = &(phytium_fb->base); + fbi->par = helper; + fbi->fbops = &phytium_fbdev_ops; + + fb = helper->fb; + drm_fb_helper_fill_info(fbi, helper, sizes); + + offset = fbi->var.xoffset * bytes_per_pixel; + offset += fbi->var.yoffset * fb->pitches[0]; + fbi->screen_base = priv->fbdev_phytium_gem->vaddr + offset; + fbi->screen_size = priv->fbdev_phytium_gem->base.size; + fbi->fix.smem_len = priv->fbdev_phytium_gem->base.size; + DRM_DEBUG_KMS("FB [%dx%d]-%d kvaddr=%pa offset=%ld size=%zu\n", fb->width, fb->height, + fb->format->depth, &priv->fbdev_phytium_gem->iova, offset, size); + fbi->skip_vt_switch = true; + + return 0; +out: + phytium_gem_free_object(&priv->fbdev_phytium_gem->base); + return ret; +} + +static const struct drm_fb_helper_funcs phytium_drm_fb_helper_funcs = { + .fb_probe = phytium_drm_fbdev_create, +}; + +int phytium_drm_fbdev_init(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + int ret; + + if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector) + return -EINVAL; + + helper = &priv->fbdev_helper; + drm_fb_helper_prepare(dev, helper, 32, &phytium_drm_fb_helper_funcs); + + ret = drm_fb_helper_init(dev, helper); + if (ret < 0) { + DRM_DEV_ERROR(dev->dev, "Failed to initialize drm fb helper -ret %d\n", ret); + return ret; + } + + ret = drm_fb_helper_initial_config(helper); + return 0; +} + +void phytium_drm_fbdev_fini(struct drm_device *dev) +{ + struct phytium_display_private *priv = dev->dev_private; + struct drm_fb_helper *helper; + + helper = &priv->fbdev_helper; + drm_fb_helper_unregister_info(helper); + + if (helper->fb) + drm_framebuffer_put(helper->fb); + + drm_fb_helper_fini(helper); +} diff --git a/drivers/gpu/drm/phytium/phytium_fbdev.h b/drivers/gpu/drm/phytium/phytium_fbdev.h new file mode 100644 index 0000000000000000000000000000000000000000..fe352557a4f9d14ef037dc6170215b49c5e760e3 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_fbdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef _PHYTIUM_FBDEV_H +#define _PHYTIUM_FBDEV_H + +int phytium_drm_fbdev_init(struct drm_device *dev); +void phytium_drm_fbdev_fini(struct drm_device *dev); + +#endif /* _PHYTIUM_FBDEV_H */ diff --git a/drivers/gpu/drm/phytium/phytium_gem.c b/drivers/gpu/drm/phytium/phytium_gem.c new file mode 100644 index 0000000000000000000000000000000000000000..f470f769dce6a0789b7070b6982d05f8d420dd64 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.c @@ -0,0 +1,509 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_gem.h" + +#define VRAM_POOL_ALLOC_ORDER 12 + +int phytium_memory_pool_alloc(struct phytium_display_private *priv, void **pvaddr, + phys_addr_t *phys_addr, uint64_t size) +{ + unsigned long vaddr; + + vaddr = gen_pool_alloc(priv->memory_pool, size); + if (!vaddr) + return -ENOMEM; + + *phys_addr = gen_pool_virt_to_phys(priv->memory_pool, vaddr); + + *pvaddr = (void *)vaddr; + return 0; +} + +void phytium_memory_pool_free(struct phytium_display_private *priv, void *vaddr, uint64_t size) +{ + gen_pool_free(priv->memory_pool, (unsigned long)vaddr, size); +} + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->memory_pool = gen_pool_create(VRAM_POOL_ALLOC_ORDER, -1); + if (priv->memory_pool == NULL) { + DRM_ERROR("fail to create memory pool\n"); + ret = -1; + goto failed_create_pool; + } + + ret = gen_pool_add_virt(priv->memory_pool, (unsigned long)priv->pool_virt_addr, + priv->pool_phys_addr, priv->pool_size, -1); + if (ret) { + DRM_ERROR("fail to add vram pool\n"); + ret = -1; + goto failed_add_pool_virt; + } + + return 0; + +failed_add_pool_virt: + gen_pool_destroy(priv->memory_pool); + +failed_create_pool: + return ret; +} + +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv) +{ + gen_pool_destroy(priv->memory_pool); +} + +struct sg_table * +phytium_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct sg_table *sgt; + struct drm_device *dev = obj->dev; + int ret; + struct page *page = NULL; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) { + DRM_DEBUG_KMS("malloc sgt fail\n"); + return ERR_PTR(-ENOMEM); + } + + if ((phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) || + (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT)) { + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg\n"); + goto sgt_free; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + sg_set_page(sgt->sgl, page, PAGE_ALIGN(phytium_gem_obj->size), 0); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_get_sgtable_attrs(dev->dev, sgt, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, phytium_gem_obj->size, + DMA_ATTR_WRITE_COMBINE); + if (ret) { + DRM_ERROR("failed to allocate sgt, %d\n", ret); + goto sgt_free; + } + } + + return sgt; +sgt_free: + kfree(sgt); + return ERR_PTR(ret); +} + +struct drm_gem_object * +phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sgt) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct scatterlist *s; + dma_addr_t expected; + int ret, i; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto failed_malloc; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, attach->dmabuf->size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + expected = sg_dma_address(sgt->sgl); + for_each_sg(sgt->sgl, s, sgt->nents, i) { + if (sg_dma_address(s) != expected) { + DRM_ERROR("sg_table is not contiguous"); + ret = -EINVAL; + goto failed_check_continue; + } + expected = sg_dma_address(s) + sg_dma_len(s); + } + + phytium_gem_obj->iova = sg_dma_address(sgt->sgl); + phytium_gem_obj->sgt = sgt; + + return &phytium_gem_obj->base; +failed_check_continue: + drm_gem_object_release(&phytium_gem_obj->base); +failed_object_init: + kfree(phytium_gem_obj); +failed_malloc: + return ERR_PTR(ret); +} + +int phytium_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct phytium_gem_object *phytium_obj = to_phytium_gem_obj(obj); + + iosys_map_set_vaddr(map, phytium_obj->vaddr); + + return 0; +} + +void phytium_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + +} + +static void phytium_dma_callback(void *callback_param) +{ + struct completion *comp = callback_param; + + complete(comp); +} + +int phytium_dma_transfer(struct drm_device *drm_dev, int dev_to_mem, void *addr, + dma_addr_t iova, uint64_t size) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct dma_chan *dma_chan = priv->dma_chan; + struct sg_table st; + struct scatterlist *sgl; + int ret = 0, timeout; + uint32_t nents, i; + struct dma_slave_config cfg = {0}; + struct dma_async_tx_descriptor *desc; + struct completion comp; + enum dma_data_direction dir; + size_t min = 0; + + nents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(&st, nents, GFP_KERNEL); + if (ret) { + DRM_ERROR("failed to allocate sg_table\n"); + ret = -ENOMEM; + goto failed_sg_alloc_table; + } + + for_each_sg(st.sgl, sgl, st.nents, i) { + min = min_t(size_t, size, PAGE_SIZE - offset_in_page(addr)); + sg_set_page(sgl, vmalloc_to_page(addr), min, offset_in_page(addr)); + addr += min; + size -= min; + } + + memset(&cfg, 0, sizeof(cfg)); + if (dev_to_mem) { + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = iova; + cfg.dst_addr = 0; + dir = DMA_FROM_DEVICE; + } else { + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = iova; + dir = DMA_TO_DEVICE; + } + + dmaengine_slave_config(dma_chan, &cfg); + + nents = dma_map_sg(dma_chan->device->dev, st.sgl, st.nents, dir); + if (!nents) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dma_map_sg for dmaengine\n"); + ret = -EINVAL; + goto failed_dma_map_sg; + } + st.nents = nents; + dma_sync_sg_for_device(dma_chan->device->dev, st.sgl, st.nents, dir); + + sgl = st.sgl; + desc = dmaengine_prep_slave_sg(dma_chan, + st.sgl, + st.nents, + cfg.direction, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + DRM_DEV_ERROR(drm_dev->dev, "failed to dmaengine_prep_slave_sg\n"); + ret = -EINVAL; + goto failed_prep_slave_sg; + } + init_completion(&comp); + desc->callback = phytium_dma_callback; + desc->callback_param = ∁ + + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + + timeout = wait_for_completion_timeout(&comp, 2 * HZ); + if (timeout == 0) { + DRM_DEV_ERROR(drm_dev->dev, "wait for dma callback timeout\n"); + ret = -EIO; + } + dma_sync_sg_for_cpu(dma_chan->device->dev, st.sgl, st.nents, dir); + +failed_prep_slave_sg: + dma_unmap_sg(dma_chan->device->dev, st.sgl, st.nents, dir); +failed_dma_map_sg: + sg_free_table(&st); +failed_sg_alloc_table: + return ret; +} + +int phytium_gem_suspend(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + phytium_gem_obj->vaddr_save = vmalloc(phytium_gem_obj->size); + if (!phytium_gem_obj->vaddr_save) + goto malloc_failed; + + if (priv->dma_inited) + ret = phytium_dma_transfer(drm_dev, 1, phytium_gem_obj->vaddr_save, + phytium_gem_obj->iova, phytium_gem_obj->size); + + if ((!priv->dma_inited) || ret) + memcpy(phytium_gem_obj->vaddr_save, phytium_gem_obj->vaddr, + phytium_gem_obj->size); + } + + return 0; +malloc_failed: + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + if (phytium_gem_obj->vaddr_save) { + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } + } + return -ENOMEM; +} + +void phytium_gem_resume(struct drm_device *drm_dev) +{ + struct phytium_display_private *priv = drm_dev->dev_private; + struct phytium_gem_object *phytium_gem_obj = NULL; + + list_for_each_entry(phytium_gem_obj, &priv->gem_list_head, list) { + if (phytium_gem_obj->memory_type != MEMORY_TYPE_VRAM) + continue; + + memcpy(phytium_gem_obj->vaddr, phytium_gem_obj->vaddr_save, phytium_gem_obj->size); + vfree(phytium_gem_obj->vaddr_save); + phytium_gem_obj->vaddr_save = NULL; + } +} + +void phytium_gem_free_object(struct drm_gem_object *obj) +{ + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + struct drm_device *dev = obj->dev; + struct phytium_display_private *priv = dev->dev_private; + uint64_t size = phytium_gem_obj->size; + + DRM_DEBUG_KMS("free phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + if (phytium_gem_obj->vaddr) { + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + dma_unmap_page(dev->dev, phytium_gem_obj->iova, size, DMA_TO_DEVICE); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] -= size; + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + dma_free_attrs(dev->dev, size, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, 0); + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] -= size; + } + list_del(&phytium_gem_obj->list); + } else if (obj->import_attach) + drm_prime_gem_destroy(obj, phytium_gem_obj->sgt); + drm_gem_object_release(obj); + kfree(phytium_gem_obj); +} + +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret = 0; + struct phytium_gem_object *phytium_gem_obj = to_phytium_gem_obj(obj); + unsigned long pfn = PHYS_PFN(phytium_gem_obj->phys_addr); + /* + * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the + * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map + * the whole buffer. + */ + vm_flags_clear(vma, VM_PFNMAP); + vma->vm_pgoff = 0; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + + if (phytium_gem_obj->memory_type == MEMORY_TYPE_VRAM) { + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = remap_pfn_range(vma, vma->vm_start, pfn, + vma->vm_end - vma->vm_start, vma->vm_page_prot); + } else if (phytium_gem_obj->memory_type == MEMORY_TYPE_SYSTEM_UNIFIED) { + ret = dma_mmap_attrs(obj->dev->dev, vma, phytium_gem_obj->vaddr, + phytium_gem_obj->iova, vma->vm_end - vma->vm_start, 0); + } + if (ret) + drm_gem_vm_close(vma); + + return ret; +} + +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + int ret = 0; + + ret = drm_gem_mmap(filp, vma); + if (ret < 0) + return ret; + + return phytium_gem_mmap_obj(vma->vm_private_data, vma); +} + +static const struct vm_operations_struct phytium_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static const struct drm_gem_object_funcs phytium_drm_gem_object_funcs = { + .free = phytium_gem_free_object, + .get_sg_table = phytium_gem_prime_get_sg_table, + .vmap = phytium_gem_prime_vmap, + .vunmap = phytium_gem_prime_vunmap, + .vm_ops = &phytium_vm_ops, +}; + +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size) +{ + struct phytium_gem_object *phytium_gem_obj = NULL; + struct phytium_display_private *priv = dev->dev_private; + struct page *page = NULL; + int ret = 0; + + phytium_gem_obj = kzalloc(sizeof(*phytium_gem_obj), GFP_KERNEL); + if (!phytium_gem_obj) { + DRM_ERROR("failed to allocate phytium_gem_obj\n"); + ret = -ENOMEM; + goto error; + } + + ret = drm_gem_object_init(dev, &phytium_gem_obj->base, size); + if (ret) { + DRM_ERROR("failed to initialize drm gem object: %d\n", ret); + goto failed_object_init; + } + + if (priv->support_memory_type & MEMORY_TYPE_VRAM) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate vram buffer with size %lx\n", size); + goto failed_dma_alloc; + } + phytium_gem_obj->iova = phytium_gem_obj->phys_addr; + phytium_gem_obj->memory_type = MEMORY_TYPE_VRAM; + priv->mem_state[PHYTIUM_MEM_VRAM_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_CARVEOUT) { + ret = phytium_memory_pool_alloc(priv, &phytium_gem_obj->vaddr, + &phytium_gem_obj->phys_addr, size); + if (ret) { + DRM_ERROR("fail to allocate carveout memory with size %lx\n", size); + goto failed_dma_alloc; + } + page = phys_to_page(phytium_gem_obj->phys_addr); + phytium_gem_obj->iova = dma_map_page(dev->dev, page, 0, size, DMA_TO_DEVICE); + if (dma_mapping_error(dev->dev, phytium_gem_obj->iova)) { + DRM_ERROR("fail to dma map carveout memory with size %lx\n", size); + phytium_memory_pool_free(priv, phytium_gem_obj->vaddr, size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_ALLOC] += size; + } else if (priv->support_memory_type & MEMORY_TYPE_SYSTEM_UNIFIED) { + phytium_gem_obj->vaddr = dma_alloc_attrs(dev->dev, size, &phytium_gem_obj->iova, + GFP_KERNEL, 0); + if (!phytium_gem_obj->vaddr) { + DRM_ERROR("fail to allocate unified buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + phytium_gem_obj->memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->mem_state[PHYTIUM_MEM_SYSTEM_UNIFIED_ALLOC] += size; + } else { + DRM_ERROR("fail to allocate buffer with size %lx\n", size); + ret = -ENOMEM; + goto failed_dma_alloc; + } + + phytium_gem_obj->base.funcs = &phytium_drm_gem_object_funcs; + + phytium_gem_obj->size = size; + list_add_tail(&phytium_gem_obj->list, &priv->gem_list_head); + DRM_DEBUG_KMS("phytium_gem_obj iova:0x%pa size:0x%lx\n", + &phytium_gem_obj->iova, phytium_gem_obj->size); + return phytium_gem_obj; + +failed_dma_alloc: + drm_gem_object_put(&phytium_gem_obj->base); + + return ERR_PTR(ret); +failed_object_init: + kfree(phytium_gem_obj); +error: + return ERR_PTR(ret); +} + +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int size = 0; + struct phytium_gem_object *phytium_gem_obj = NULL; + int ret = 0; + + args->pitch = ALIGN(args->width*DIV_ROUND_UP(args->bpp, 8), 128); + args->size = args->pitch * args->height; + size = PAGE_ALIGN(args->size); + phytium_gem_obj = phytium_gem_create_object(dev, size); + if (IS_ERR(phytium_gem_obj)) + return PTR_ERR(phytium_gem_obj); + ret = drm_gem_handle_create(file, &phytium_gem_obj->base, &args->handle); + if (ret) { + DRM_ERROR("failed to drm_gem_handle_create\n"); + goto failed_gem_handle; + } + drm_gem_object_put(&phytium_gem_obj->base); + + return 0; +failed_gem_handle: + phytium_gem_free_object(&phytium_gem_obj->base); + return ret; +} diff --git a/drivers/gpu/drm/phytium/phytium_gem.h b/drivers/gpu/drm/phytium/phytium_gem.h new file mode 100644 index 0000000000000000000000000000000000000000..17c438e6e63c9f37cee1948e2547956d2da07afe --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_gem.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_GEM_H__ +#define __PHYTIUM_GEM_H__ + +#include + +struct phytium_gem_object { + struct drm_gem_object base; + phys_addr_t phys_addr; + dma_addr_t iova; + void *vaddr; + unsigned long size; + struct sg_table *sgt; + char memory_type; + char reserve[3]; + struct list_head list; + void *vaddr_save; +}; + +#define to_phytium_gem_obj(obj) container_of(obj, struct phytium_gem_object, base) + +int phytium_memory_pool_init(struct device *dev, struct phytium_display_private *priv); +void phytium_memory_pool_fini(struct device *dev, struct phytium_display_private *priv); +int phytium_gem_mmap_obj(struct drm_gem_object *obj, struct vm_area_struct *vma); +int phytium_gem_mmap(struct file *filp, struct vm_area_struct *vma); +void phytium_gem_free_object(struct drm_gem_object *obj); +struct sg_table *phytium_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *phytium_gem_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, struct sg_table *sgt); +void phytium_gem_free_object(struct drm_gem_object *obj); +int phytium_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev, unsigned int handle); +struct phytium_gem_object *phytium_gem_create_object(struct drm_device *dev, unsigned long size); +int phytium_gem_dumb_create(struct drm_file *file, struct drm_device *dev, + struct drm_mode_create_dumb *args); +int phytium_gem_suspend(struct drm_device *drm_dev); +void phytium_gem_resume(struct drm_device *drm_dev); +#endif /* __PHYTIUM_GEM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_panel.c b/drivers/gpu/drm/phytium/phytium_panel.c new file mode 100644 index 0000000000000000000000000000000000000000..1cd266e868b37a9e376885776da5223ed75a75e0 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.c @@ -0,0 +1,420 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_dp.h" +#include "phytium_panel.h" + +static int +phytium_dp_aux_set_backlight(struct phytium_panel *panel, unsigned int level) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + unsigned char vals[2] = { 0x0 }; + + vals[0] = level; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) { + vals[0] = (level & 0xFF00) >> 8; + vals[1] = (level & 0xFF); + } + + if (drm_dp_dpcd_write(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + vals, sizeof(vals)) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight level\n"); + return -EIO; + } + + return 0; +} + +static unsigned int phytium_dp_aux_get_backlight(struct phytium_panel *panel) +{ + unsigned char read_val[2] = { 0x0 }; + unsigned char level = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_read(&phytium_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, + &read_val, sizeof(read_val)) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_BRIGHTNESS_MSB); + return 0; + } + + level = read_val[0]; + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + level = (read_val[0] << 8 | read_val[1]); + + return level; +} + +static void set_aux_backlight_enable(struct phytium_panel *panel, bool enable) +{ + u8 reg_val = 0; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (!(phytium_dp->edp_dpcd[1] & DP_EDP_BACKLIGHT_AUX_ENABLE_CAP)) + return; + + if (drm_dp_dpcd_readb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + ®_val) < 0) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_DISPLAY_CONTROL_REGISTER); + return; + } + + if (enable) + reg_val |= DP_EDP_BACKLIGHT_ENABLE; + else + reg_val &= ~(DP_EDP_BACKLIGHT_ENABLE); + + if (drm_dp_dpcd_writeb(&phytium_dp->aux, DP_EDP_DISPLAY_CONTROL_REGISTER, + reg_val) != 1) { + DRM_DEBUG_KMS("Failed to %s aux backlight\n", + enable ? "enable" : "disable"); + } +} + +static void phytium_dp_aux_enable_backlight(struct phytium_panel *panel) +{ + unsigned char dpcd_buf, new_dpcd_buf, edp_backlight_mode; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (drm_dp_dpcd_readb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &dpcd_buf) != 1) { + DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n", + DP_EDP_BACKLIGHT_MODE_SET_REGISTER); + return; + } + + new_dpcd_buf = dpcd_buf; + edp_backlight_mode = dpcd_buf & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + + switch (edp_backlight_mode) { + case DP_EDP_BACKLIGHT_CONTROL_MODE_PWM: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRESET: + case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT: + new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK; + new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD; + break; + + /* Do nothing when it is already DPCD mode */ + case DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD: + default: + break; + } + + if (new_dpcd_buf != dpcd_buf) { + if (drm_dp_dpcd_writeb(&phytium_dp->aux, + DP_EDP_BACKLIGHT_MODE_SET_REGISTER, new_dpcd_buf) < 0) { + DRM_DEBUG_KMS("Failed to write aux backlight mode\n"); + } + } + + set_aux_backlight_enable(panel, true); + phytium_dp_aux_set_backlight(panel, panel->level); +} + +static void phytium_dp_aux_disable_backlight(struct phytium_panel *panel) +{ + set_aux_backlight_enable(panel, false); +} + +static void phytium_dp_aux_setup_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + if (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT) + phytium_dp->panel.max = 0xFFFF; + else + phytium_dp->panel.max = 0xFF; + + phytium_dp->panel.min = 0; + phytium_dp->panel.level = phytium_dp_aux_get_backlight(panel); + phytium_dp->panel.backlight_enabled = (phytium_dp->panel.level != 0); +} + +static void phytium_dp_hw_poweron_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweron_panel(phytium_dp); +} + +static void phytium_dp_hw_poweroff_panel(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_poweroff_panel(phytium_dp); +} + +static int +phytium_dp_hw_set_backlight(struct phytium_panel *panel, uint32_t level) +{ + int ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, level); + + return ret; +} + +static uint32_t phytium_dp_hw_get_backlight(struct phytium_panel *panel) +{ + uint32_t ret; + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + ret = phytium_dp->funcs->dp_hw_get_backlight(phytium_dp); + + return ret; +} + +static void phytium_dp_hw_enable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_set_backlight(phytium_dp, phytium_dp->panel.level); + phytium_dp->funcs->dp_hw_enable_backlight(phytium_dp); +} + +static void phytium_dp_hw_disable_backlight(struct phytium_panel *panel) +{ + struct phytium_dp_device *phytium_dp = panel_to_dp_device(panel); + + phytium_dp->funcs->dp_hw_disable_backlight(phytium_dp); +} + +static void phytium_dp_hw_setup_backlight(struct phytium_panel *panel) +{ + struct drm_device *dev = panel->dev; + struct phytium_display_private *priv = dev->dev_private; + + panel->max = priv->info.backlight_max; + panel->min = 0; + panel->level = phytium_dp_hw_get_backlight(panel); +} + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->edp_dpcd[1] & DP_EDP_TCON_BACKLIGHT_ADJUSTMENT_CAP && + (phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_AUX_SET_CAP) && + !(phytium_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_PWM_PIN_CAP)) { + DRM_DEBUG_KMS("AUX Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_aux_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_aux_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_aux_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_aux_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_aux_get_backlight; + } else { + DRM_DEBUG_KMS("SE Backlight Control Supported!\n"); + phytium_dp->panel.setup_backlight = phytium_dp_hw_setup_backlight; + phytium_dp->panel.enable_backlight = phytium_dp_hw_enable_backlight; + phytium_dp->panel.disable_backlight = phytium_dp_hw_disable_backlight; + phytium_dp->panel.set_backlight = phytium_dp_hw_set_backlight; + phytium_dp->panel.get_backlight = phytium_dp_hw_get_backlight; + } + phytium_dp->panel.poweron = phytium_dp_hw_poweron_panel; + phytium_dp->panel.poweroff = phytium_dp_hw_poweroff_panel; + mutex_init(&phytium_dp->panel.panel_lock); + phytium_dp->panel.dev = phytium_dp->dev; + + /* Upper limits from eDP 1.3 spec */ + phytium_dp->panel.panel_power_up_delay = 210; /* t1_t3 */ + phytium_dp->panel.backlight_on_delay = 50; /* t7 */ + phytium_dp->panel.backlight_off_delay = 50; + phytium_dp->panel.panel_power_down_delay = 0; /* t10 */ + phytium_dp->panel.panel_power_cycle_delay = 510; /* t11 + t12 */ +} + +void phytium_dp_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->panel.setup_backlight = NULL; + phytium_dp->panel.enable_backlight = NULL; + phytium_dp->panel.disable_backlight = NULL; + phytium_dp->panel.set_backlight = NULL; + phytium_dp->panel.get_backlight = NULL; + phytium_dp->panel.poweron = NULL; + phytium_dp->panel.poweroff = NULL; +} + +void phytium_panel_enable_backlight(struct phytium_panel *panel) +{ + + if (panel->enable_backlight) { + mutex_lock(&panel->panel_lock); + msleep(panel->backlight_on_delay); + panel->enable_backlight(panel); + panel->backlight_enabled = true; + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_disable_backlight(struct phytium_panel *panel) +{ + if (panel->disable_backlight) { + mutex_lock(&panel->panel_lock); + panel->disable_backlight(panel); + panel->backlight_enabled = false; + msleep(panel->backlight_off_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweron(struct phytium_panel *panel) +{ + if (panel->poweron) { + mutex_lock(&panel->panel_lock); + panel->poweron(panel); + panel->power_enabled = true; + msleep(panel->panel_power_up_delay); + mutex_unlock(&panel->panel_lock); + } +} + +void phytium_panel_poweroff(struct phytium_panel *panel) +{ + if (panel->poweroff) { + mutex_lock(&panel->panel_lock); + msleep(panel->panel_power_down_delay); + panel->poweroff(panel); + panel->power_enabled = false; + mutex_unlock(&panel->panel_lock); + } +} + +static uint32_t phytium_scale(uint32_t source_val, + uint32_t source_min, uint32_t source_max, + uint32_t target_min, uint32_t target_max) +{ + uint64_t target_val; + + WARN_ON(source_min > source_max); + WARN_ON(target_min > target_max); + + /* defensive */ + source_val = clamp(source_val, source_min, source_max); + + /* avoid overflows */ + target_val = mul_u32_u32(source_val - source_min, target_max - target_min); + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); + target_val += target_min; + + return target_val; +} + +static inline uint32_t +phytium_scale_hw_to_user(struct phytium_panel *panel, uint32_t hw_level, uint32_t user_max) +{ + return phytium_scale(hw_level, panel->min, panel->max, + 0, user_max); +} + +static inline uint32_t +phytium_scale_user_to_hw(struct phytium_panel *panel, u32 user_level, u32 user_max) +{ + return phytium_scale(user_level, 0, user_max, + panel->min, panel->max); +} + +static int phytium_backlight_device_update_status(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret = 0; + + DRM_DEBUG_KMS("updating phytium_backlight, brightness=%d/%d\n", + bd->props.brightness, bd->props.max_brightness); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + hw_level = phytium_scale_user_to_hw(panel, bd->props.brightness, bd->props.max_brightness); + + if ((panel->set_backlight) && (panel->backlight_enabled)) { + mutex_lock(&panel->panel_lock); + ret = panel->set_backlight(panel, hw_level); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + + return ret; +} + +static int phytium_backlight_device_get_brightness(struct backlight_device *bd) +{ + struct phytium_panel *panel = bl_get_data(bd); + struct drm_device *dev = panel->dev; + uint32_t hw_level = 0; + int ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + if (panel->get_backlight && panel->backlight_enabled) { + mutex_lock(&panel->panel_lock); + hw_level = panel->get_backlight(panel); + panel->level = hw_level; + mutex_unlock(&panel->panel_lock); + } + drm_modeset_unlock(&dev->mode_config.connection_mutex); + ret = phytium_scale_hw_to_user(panel, hw_level, bd->props.max_brightness); + DRM_DEBUG_KMS("get phytium_backlight, brightness=%d/%d\n", + ret, bd->props.max_brightness); + + return ret; +} + +static const struct backlight_ops phytium_backlight_device_ops = { + .update_status = phytium_backlight_device_update_status, + .get_brightness = phytium_backlight_device_get_brightness, +}; + +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp) +{ + struct backlight_properties props; + char bl_name[16]; + + if (phytium_dp->panel.setup_backlight) { + mutex_lock(&phytium_dp->panel.panel_lock); + phytium_dp->panel.setup_backlight(&phytium_dp->panel); + mutex_unlock(&phytium_dp->panel.panel_lock); + } else { + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.max_brightness = PHYTIUM_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + props.brightness = phytium_scale_hw_to_user(&phytium_dp->panel, phytium_dp->panel.level, + props.max_brightness); + snprintf(bl_name, sizeof(bl_name), "phytium_bl%d", phytium_dp->port); + + phytium_dp->panel.bl_device = + backlight_device_register(bl_name, + phytium_dp->connector.kdev, + &phytium_dp->panel, + &phytium_backlight_device_ops, + &props); + + if (IS_ERR(phytium_dp->panel.bl_device)) { + DRM_ERROR("Failed to register backlight: %ld\n", + PTR_ERR(phytium_dp->panel.bl_device)); + phytium_dp->panel.bl_device = NULL; + return -ENODEV; + } + + DRM_DEBUG_KMS("Connector %s backlight sysfs interface registered\n", + phytium_dp->connector.name); + + return 0; +} + +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp) +{ + if (phytium_dp->panel.bl_device) { + backlight_device_unregister(phytium_dp->panel.bl_device); + phytium_dp->panel.bl_device = NULL; + } +} diff --git a/drivers/gpu/drm/phytium/phytium_panel.h b/drivers/gpu/drm/phytium/phytium_panel.h new file mode 100644 index 0000000000000000000000000000000000000000..f9e2c7e65896f2c129c5d5274f64beb091a73b23 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_panel.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PANEL_H__ +#define __PHYTIUM_PANEL_H__ +#include "phytium_dp.h" + +#define PHYTIUM_MAX_BL_LEVEL 0xFF + +struct phytium_panel { + struct drm_device *dev; + bool backlight_enabled; + bool power_enabled; + bool reserve1[2]; + unsigned int min; + unsigned int level; + unsigned int max; + struct backlight_device *bl_device; + void (*setup_backlight)(struct phytium_panel *panel); + uint32_t (*get_backlight)(struct phytium_panel *panel); + int (*set_backlight)(struct phytium_panel *panel, uint32_t level); + void (*disable_backlight)(struct phytium_panel *panel); + void (*enable_backlight)(struct phytium_panel *panel); + void (*poweron)(struct phytium_panel *panel); + void (*poweroff)(struct phytium_panel *panel); + struct mutex panel_lock; + uint32_t panel_power_up_delay; + uint32_t backlight_on_delay; + uint32_t backlight_off_delay; + uint32_t panel_power_down_delay; + uint32_t panel_power_cycle_delay; +}; + +void phytium_dp_panel_init_backlight_funcs(struct phytium_dp_device *phytium_dp); +void phytium_panel_release_backlight_funcs(struct phytium_dp_device *phytium_dp); +int phytium_edp_backlight_device_register(struct phytium_dp_device *phytium_dp); +void phytium_edp_backlight_device_unregister(struct phytium_dp_device *phytium_dp); +void phytium_panel_enable_backlight(struct phytium_panel *panel); +void phytium_panel_disable_backlight(struct phytium_panel *panel); +void phytium_panel_poweron(struct phytium_panel *panel); +void phytium_panel_poweroff(struct phytium_panel *panel); + +#endif /* __PHYTIUM_PANEL_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_pci.c b/drivers/gpu/drm/phytium/phytium_pci.c new file mode 100644 index 0000000000000000000000000000000000000000..f93ab85395c5dc436f9788e124d4f42682c4a90c --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.c @@ -0,0 +1,387 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_pci.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "px210_dc.h" +#include "px210_dp.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int dc_msi_enable; +module_param(dc_msi_enable, int, 0644); +MODULE_PARM_DESC(dc_msi_enable, "Enable DC msi interrupt (0-disabled; 1-enabled; default-0)"); + +void phytium_pci_vram_hw_init(struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + pci_priv->dc_hw_vram_init(priv, priv->pool_phys_addr, priv->pool_size); +} + +int phytium_pci_vram_init(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + int ret = 0; + + priv->pool_phys_addr = pci_resource_start(pdev, 2); + priv->pool_size = pci_resource_len(pdev, 2); + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = devm_ioremap_wc(&pdev->dev, priv->pool_phys_addr, + priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("pci vram ioremap fail, addr:0x%llx, size:0x%llx\n", + priv->pool_phys_addr, priv->pool_size); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_VRAM; + priv->vram_hw_init = phytium_pci_vram_hw_init; + } else { + DRM_DEBUG_KMS("not support vram\n"); + priv->pool_virt_addr = NULL; + priv->mem_state[PHYTIUM_MEM_VRAM_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + devm_iounmap(&pdev->dev, priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_pci_vram_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_VRAM) { + phytium_memory_pool_fini(&pdev->dev, priv); + devm_iounmap(&pdev->dev, priv->pool_virt_addr); + } +} + +static bool phytium_pci_dma_chan_filter(struct dma_chan *chan, void *param) +{ + struct phytium_dma_slave *s = param; + + if (s->dma_dev != chan->device->dev) + return false; + + if (s->chan_id == chan->chan_id) + return true; + else + return false; +} + +int phytium_pci_dma_init(struct phytium_display_private *priv) +{ + struct pci_dev *dma_dev, *gpu_dev; + struct drm_device *drm_dev = priv->dev; + dma_cap_mask_t mask; + struct phytium_dma_slave s; + int ret = 0; + u16 cmd; + + /* check px210 gpu enable */ + gpu_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc20, NULL); + if (!gpu_dev) { + DRM_INFO("failed to get gpu_dev\n"); + ret = -ENODEV; + goto failed; + } + + pci_read_config_word(gpu_dev, PCI_COMMAND, &cmd); + if (!(cmd & PCI_COMMAND_MASTER)) { + DRM_INFO("gpu_dev master is disabled\n"); + ret = -ENODEV; + goto failed; + } + + dma_dev = pci_get_device(PCI_VENDOR_ID_PHYTIUM, 0xdc3c, NULL); + if (!dma_dev) { + DRM_INFO("failed to get dma_dev\n"); + ret = -ENODEV; + goto failed; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + s.dma_dev = &dma_dev->dev; + s.chan_id = 2; + priv->dma_chan = dma_request_channel(mask, phytium_pci_dma_chan_filter, &s); + if (!priv->dma_chan) { + DRM_DEV_ERROR(drm_dev->dev, "failed to request dma chan\n"); + ret = -EBUSY; + goto failed; + } + priv->dma_inited = 1; + +failed: + return ret; +} + +void phytium_pci_dma_fini(struct phytium_display_private *priv) +{ + if (priv->dma_inited) + dma_release_channel(priv->dma_chan); + priv->dma_inited = 0; + priv->dma_chan = NULL; +} + +static struct phytium_display_private* +phytium_pci_private_init(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = NULL; + struct phytium_pci_private *pci_priv = NULL; + struct phytium_device_info *phytium_info = (struct phytium_device_info *)ent->driver_data; + int i = 0; + resource_size_t io_addr, io_size; + + pci_priv = devm_kzalloc(&pdev->dev, sizeof(*pci_priv), GFP_KERNEL); + if (!pci_priv) { + DRM_ERROR("no memory to allocate for drm_display_private\n"); + goto failed_malloc_priv; + } + + memset(pci_priv, 0, sizeof(*pci_priv)); + priv = &pci_priv->base; + phytium_display_private_init(priv, dev); + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + DRM_DEBUG_KMS("priv->info.num_pipes :%d\n", priv->info.num_pipes); + priv->info.pipe_mask = ((pdev->subsystem_device >> PIPE_MASK_SHIFT) & PIPE_MASK_MASK); + priv->info.edp_mask = ((pdev->subsystem_device >> EDP_MASK_SHIFT) & EDP_MASK_MASK); + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed_init_numpipe; + } + + io_addr = pci_resource_start(pdev, 0); + io_size = pci_resource_len(pdev, 0); + priv->regs = ioremap(io_addr, io_size); + if (priv->regs == NULL) { + DRM_ERROR("pci bar0 ioremap fail, addr:0x%llx, size:0x%llx\n", io_addr, io_size); + goto failed_ioremap; + } + + priv->irq = pdev->irq; + if (IS_PX210(priv)) { + pci_priv->dc_hw_vram_init = px210_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = px210_dc_hw_clear_msi_irq; + priv->dc_hw_fb_format_check = px210_dc_hw_fb_format_check; + } else if (IS_PE220X(priv)) { + pci_priv->dc_hw_vram_init = pe220x_dc_hw_vram_init; + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed_ioremap: +failed_init_numpipe: + devm_kfree(&pdev->dev, pci_priv); +failed_malloc_priv: + return NULL; +} + +static void +phytium_pci_private_fini(struct pci_dev *pdev, struct phytium_display_private *priv) +{ + struct phytium_pci_private *pci_priv = to_pci_priv(priv); + + if (priv->regs) + iounmap(priv->regs); + + devm_kfree(&pdev->dev, pci_priv); +} + +static int phytium_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + pci_set_drvdata(pdev, dev); + pci_set_master(pdev); + ret = pci_enable_device(pdev); + if (ret) { + DRM_ERROR("pci enable device fail\n"); + goto failed_enable_device; + } + + if (dc_msi_enable) { + ret = pci_enable_msi(pdev); + if (ret) + DRM_ERROR("pci enable msi fail\n"); + } + + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_pci_private_init(pdev, ent); + if (priv) + dev->dev_private = priv; + else + goto failed_pci_private_init; + + ret = phytium_pci_vram_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init pci vram\n"); + goto failed_pci_vram_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_pci_vram_fini(pdev, priv); +failed_pci_vram_init: + phytium_pci_private_fini(pdev, priv); +failed_pci_private_init: + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); +failed_enable_device: + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); + + return -1; +} + +static void phytium_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_pci_vram_fini(pdev, priv); + phytium_pci_private_fini(pdev, priv); + if (pdev->msi_enabled) + pci_disable_msi(pdev); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); + drm_dev_put(dev); +} + +static void phytium_pci_shutdown(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_pci_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + if (IS_PX210(priv)) + phytium_pci_dma_init(priv); + + ret = priv->display_pm_suspend(drm_dev); + if (ret < 0) + goto out; + + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + udelay(200); + +out: + return ret; +} + +static int phytium_pci_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct phytium_display_private *priv = drm_dev->dev_private; + int ret = 0; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + ret = pci_enable_device(pdev); + if (ret) + return ret; + pci_set_master(pdev); + + ret = priv->display_pm_resume(drm_dev); + if (IS_PX210(priv)) + phytium_pci_dma_fini(priv); + + return ret; +} + +static const struct dev_pm_ops phytium_pci_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_pci_pm_suspend, phytium_pci_pm_resume) +}; + +static const struct phytium_device_info px210_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PX210), + .total_pipes = 3, + .crtc_clock_max = PX210_DC_PIX_CLOCK_MAX, + .hdisplay_max = PX210_DC_HDISPLAY_MAX, + .vdisplay_max = PX210_DC_VDISPLAY_MAX, + .address_mask = PX210_DC_ADDRESS_MASK, + .backlight_max = PX210_DP_BACKLIGHT_MAX, +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct pci_device_id phytium_display_pci_ids[] = { + { PCI_VDEVICE(PHYTIUM, 0xdc22), (kernel_ulong_t)&px210_info }, + { PCI_VDEVICE(PHYTIUM, 0xdc3e), (kernel_ulong_t)&pe220x_info }, + { /* End: all zeroes */ } +}; +MODULE_DEVICE_TABLE(pci, phytium_display_pci_ids); + +struct pci_driver phytium_pci_driver = { + .name = "phytium_display_pci", + .id_table = phytium_display_pci_ids, + .probe = phytium_pci_probe, + .remove = phytium_pci_remove, + .shutdown = phytium_pci_shutdown, + .driver.pm = &phytium_pci_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_pci.h b/drivers/gpu/drm/phytium/phytium_pci.h new file mode 100644 index 0000000000000000000000000000000000000000..92b08fcb045296bc8507960ecd73366994254ec4 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_pci.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PCI_H__ +#define __PHYTIUM_PCI_H__ + +#include "phytium_display_drv.h" + +struct phytium_pci_private { + struct phytium_display_private base; + void (*dc_hw_vram_init)(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size); +}; + +struct phytium_dma_slave { + struct device *dma_dev; + u32 chan_id; +}; + +#define to_pci_priv(priv) container_of(priv, struct phytium_pci_private, base) + +extern struct pci_driver phytium_pci_driver; +#endif /* __PHYTIUM_PCI_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_plane.c b/drivers/gpu/drm/phytium/phytium_plane.c new file mode 100644 index 0000000000000000000000000000000000000000..9f35d57cd726a34af80cacaf382facc086116b95 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "phytium_display_drv.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" +#include "phytium_crtc.h" +#include "px210_dc.h" +#include "pe220x_dc.h" +#include "phytium_reg.h" + +#define PHYTIUM_CURS_W_SIZE 32 +#define PHYTIUM_CURS_H_SIZE 32 + +void phytium_plane_destroy(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + + drm_plane_cleanup(plane); + kfree(phytium_plane); +} + +/** + * phytium_plane_atomic_get_property - fetch plane property value + * @plane: plane to fetch property for + * @state: state containing the property value + * @property: property to look up + * @val: pointer to write property value into + * + * The DRM core does not store shadow copies of properties for + * atomic-capable drivers. This entrypoint is used to fetch + * the current value of a driver-specific plane property. + */ +static int +phytium_plane_atomic_get_property(struct drm_plane *plane, + const struct drm_plane_state *state, + struct drm_property *property, + uint64_t *val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +/** + * phytium_plane_atomic_set_property - set plane property value + * @plane: plane to set property for + * @state: state to update property value in + * @property: property to set + * @val: value to set property to + * + * Writes the specified property value for a plane into the provided atomic + * state object. + * + * Returns 0 on success, -EINVAL on unrecognized properties + */ +int +phytium_plane_atomic_set_property(struct drm_plane *plane, + struct drm_plane_state *state, + struct drm_property *property, + uint64_t val) +{ + DRM_DEBUG_KMS("Unknown plane property [PROP:%d:%s]\n", property->base.id, property->name); + return -EINVAL; +} + +struct drm_plane_state * +phytium_plane_atomic_duplicate_state(struct drm_plane *plane) +{ + struct drm_plane_state *state = NULL; + struct phytium_plane_state *phytium_state = NULL; + + phytium_state = kmemdup(plane->state, sizeof(*phytium_state), GFP_KERNEL); + + if (!phytium_state) + return NULL; + + state = &phytium_state->base; + if (state->fb) + drm_framebuffer_get(state->fb); + + state->fence = NULL; + state->commit = NULL; + + return state; +} + +void +phytium_plane_atomic_destroy_state(struct drm_plane *plane, struct drm_plane_state *state) +{ + struct phytium_plane_state *phytium_state = to_phytium_plane_state(state); + + __drm_atomic_helper_plane_destroy_state(state); + kfree(phytium_state); +} + +const struct drm_plane_funcs phytium_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = phytium_plane_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_get_property = phytium_plane_atomic_get_property, + .atomic_set_property = phytium_plane_atomic_set_property, + .atomic_duplicate_state = phytium_plane_atomic_duplicate_state, + .atomic_destroy_state = phytium_plane_atomic_destroy_state, +}; + +static int +phytium_plane_atomic_check(struct drm_plane *plane, struct drm_atomic_state *atomic_state) +{ + struct drm_plane_state *state = drm_atomic_get_new_plane_state(atomic_state, + plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct drm_framebuffer *fb = state->fb; + struct drm_crtc *crtc = state->crtc; + struct drm_crtc_state *crtc_state; + int src_x, src_y, src_w, src_h; + unsigned long base_offset; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + + if ((!fb) || (!crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + if (phytium_crtc->scale_enable) + return -EINVAL; + if ((src_w != PHYTIUM_CURS_W_SIZE) || (src_h != PHYTIUM_CURS_W_SIZE)) { + DRM_INFO("Invalid cursor size(%d, %d)\n", src_w, src_h); + return -EINVAL; + } + } else if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + src_x = state->src_x >> 16; + src_y = state->src_y >> 16; + src_w = state->src_w >> 16; + src_h = state->src_h >> 16; + + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + if (base_offset & (priv->info.address_mask)) { + DRM_ERROR("fb base address is not aligned by 0x%lx byte\n", + priv->info.address_mask); + return -EINVAL; + } + + if (src_w != state->crtc_w || src_h != state->crtc_h) { + DRM_ERROR("scale not support: crtc_w(0x%x)/h(0x%x) src_w(0x%x)/h(0x%x)\n", + state->crtc_w, state->crtc_h, src_w, src_h); + return -EINVAL; + } + + if ((state->crtc_x < 0) || (state->crtc_y < 0)) { + DRM_ERROR("crtc_x(0x%x)/y(0x%x) of drm plane state is invalid\n", + state->crtc_x, state->crtc_y); + return -EINVAL; + } + + if ((state->crtc_x + state->crtc_w > crtc_state->adjusted_mode.hdisplay) + || (state->crtc_y + state->crtc_h > crtc_state->adjusted_mode.vdisplay)) { + DRM_ERROR("plane out of crtc region\n"); + return -EINVAL; + } + } + + return 0; +} + +static void phytium_dc_get_plane_parameter(struct drm_plane *plane) +{ + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + struct phytium_framebuffer *phytium_fb = to_phytium_framebuffer(fb); + struct phytium_gem_object *phytium_gem_obj = NULL; + int i, num_planes = 0; + const struct drm_format_info *info; + + info = drm_format_info(fb->format->format); + num_planes = info ? info->num_planes : 1; + + for (i = 0; i < num_planes; i++) { + phytium_gem_obj = phytium_fb->phytium_gem_obj[i]; + phytium_plane->iova[i] = phytium_gem_obj->iova + fb->offsets[i]; + phytium_plane->size[i] = phytium_gem_obj->size - fb->offsets[i]; + + if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE0; + else if (fb->modifier == DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC) + phytium_plane->tiling[i] = FRAMEBUFFER_TILE_MODE3; + else if (fb->modifier == DRM_FORMAT_MOD_LINEAR) + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + else + phytium_plane->tiling[i] = FRAMEBUFFER_LINEAR; + + if (i == 0) { + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB2101010; + break; + + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB8888; + break; + + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB8888; + break; + + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB4444; + break; + + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB4444; + break; + + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_ARGB1555; + break; + + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->format = FRAMEBUFFER_FORMAT_XRGB1555; + break; + + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + phytium_plane->format = FRAMEBUFFER_FORMAT_RGB565; + break; + + case DRM_FORMAT_YUYV: + phytium_plane->format = FRAMEBUFFER_FORMAT_YUYV; + break; + + case DRM_FORMAT_UYVY: + phytium_plane->format = FRAMEBUFFER_FORMAT_UYVY; + break; + case DRM_FORMAT_NV16: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV16; + break; + case DRM_FORMAT_NV12: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + case DRM_FORMAT_NV21: + phytium_plane->format = FRAMEBUFFER_FORMAT_NV12; + break; + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + + switch (fb->format->format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_RGB565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_BGR565: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ABGR; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_RGBX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_RGBA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_BGRX8888: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_BGRX5551: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_BGRA; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_NV16: + case DRM_FORMAT_NV12: + phytium_plane->swizzle = FRAMEBUFFER_SWIZZLE_ARGB; + phytium_plane->uv_swizzle = FRAMEBUFFER_UVSWIZZLE_DISABLE; + break; + + default: + DRM_ERROR("unsupported pixel format (format = %d)\n", + fb->format->format); + return; + } + } + } +} + +static void phytium_dc_primary_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int src_x, src_y, crtc_x, crtc_y, crtc_w, crtc_h; + unsigned long base_offset; + int config; + + src_x = plane->state->src_x >> 16; + src_y = plane->state->src_y >> 16; + crtc_x = plane->state->crtc_x; + crtc_y = plane->state->crtc_y; + crtc_w = plane->state->crtc_w; + crtc_h = plane->state->crtc_h; + + if (phytium_plane->dc_hw_update_dcreq) + phytium_plane->dc_hw_update_dcreq(plane); + phytium_plane->dc_hw_update_primary_hi_addr(plane); + + /* config dc */ + /* Y */ + base_offset = src_x * fb->format->cpp[0] + src_y*fb->pitches[0]; + phytium_writel_reg(priv, (phytium_plane->iova[0] + base_offset) & ADDRESS_MASK, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[0], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE); + + /* U */ + phytium_writel_reg(priv, phytium_plane->iova[1] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[1], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_U_STRIDE); + + /* V */ + phytium_writel_reg(priv, phytium_plane->iova[2] & 0xffffffff, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS); + phytium_writel_reg(priv, ALIGN(fb->pitches[2], 128), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_V_STRIDE); + + /* size */ + phytium_writel_reg(priv, (crtc_w & WIDTH_MASK) | ((crtc_h&HEIGHT_MASK) << HEIGHT_SHIFT), + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_FRAMEBUFFER_SIZE); + /* config */ + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config &= ~(FRAMEBUFFER_FORMAT_MASK << FRAMEBUFFER_FORMAT_SHIFT); + config |= (phytium_plane->format << FRAMEBUFFER_FORMAT_SHIFT); + config &= ~(1 << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config |= (phytium_plane->uv_swizzle << FRAMEBUFFER_UVSWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_SWIZZLE_MASK << FRAMEBUFFER_SWIZZLE_SHIFT); + config |= (phytium_plane->swizzle << FRAMEBUFFER_SWIZZLE_SHIFT); + config &= ~(FRAMEBUFFER_TILE_MODE_MASK << FRAMEBUFFER_TILE_MODE_SHIFT); + config |= (phytium_plane->tiling[0] << FRAMEBUFFER_TILE_MODE_SHIFT); + config &= (~FRAMEBUFFER_CLEAR); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); +} + +static void phytium_dc_cursor_plane_update(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + struct drm_framebuffer *fb = plane->state->fb; + int phys_pipe = phytium_plane->phys_pipe; + int config; + unsigned long iova; + + phytium_plane->enable = 1; + phytium_plane->cursor_hot_x = fb->hot_x; + phytium_plane->cursor_hot_y = fb->hot_y; + phytium_plane->cursor_x = plane->state->crtc_x + fb->hot_x; + phytium_plane->cursor_y = plane->state->crtc_y + fb->hot_y; + + config = CURSOR_FORMAT_ARGB8888 | + ((phytium_plane->cursor_hot_y & CURSOR_HOT_Y_MASK) << CURSOR_HOT_Y_SHIFT) | + ((phytium_plane->cursor_hot_x & CURSOR_HOT_X_MASK) << CURSOR_HOT_X_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + + config = ((phytium_plane->cursor_x & CURSOR_X_MASK) << CURSOR_X_SHIFT) | + ((phytium_plane->cursor_y & CURSOR_Y_MASK) << CURSOR_Y_SHIFT); + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_LOCATION); + iova = phytium_plane->iova[0]; + phytium_writel_reg(priv, iova & 0xffffffff, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_CURSOR_ADDRESS); + if (phytium_plane->dc_hw_update_cursor_hi_addr) + phytium_plane->dc_hw_update_cursor_hi_addr(plane, iova); +} + +static void phytium_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_framebuffer *fb, *old_fb; + + DRM_DEBUG_KMS("update plane: type=%d\n", plane->type); + if (!plane->state->crtc || !plane->state->fb) + return; + + fb = plane->state->fb; + old_fb = old_state->fb; + + if (fb) + drm_framebuffer_get(fb); + if (old_fb) + drm_framebuffer_put(old_fb); + + phytium_dc_get_plane_parameter(plane); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) + phytium_dc_primary_plane_update(plane); + else if (plane->type == DRM_PLANE_TYPE_CURSOR) + phytium_dc_cursor_plane_update(plane); +} + +static void phytium_plane_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + int config; + struct drm_framebuffer *old_fb; + + old_fb = old_state->fb; + if (old_fb) + drm_framebuffer_put(old_fb); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + phytium_writel_reg(priv, CLEAR_VALUE_RED, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE); + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + config |= FRAMEBUFFER_CLEAR; + phytium_writel_reg(priv, config, priv->dc_reg_base[phys_pipe], + PHYTIUM_DC_FRAMEBUFFER_CONFIG); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + phytium_writel_reg(priv, CURSOR_FORMAT_DISABLED, + priv->dc_reg_base[phys_pipe], PHYTIUM_DC_CURSOR_CONFIG); + } +} + +const struct drm_plane_helper_funcs phytium_plane_helper_funcs = { + .prepare_fb = drm_gem_plane_helper_prepare_fb, + .atomic_check = phytium_plane_atomic_check, + .atomic_update = phytium_plane_atomic_update, + .atomic_disable = phytium_plane_atomic_disable, +}; + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = px210_dc_hw_update_dcreq; + phytium_plane->dc_hw_update_primary_hi_addr = px210_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_primary_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = pe220x_dc_hw_update_primary_hi_addr; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, formats, + format_count, + format_modifiers, + DRM_PLANE_TYPE_PRIMARY, "primary %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} + +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int phys_pipe) +{ + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = NULL; + struct phytium_plane_state *phytium_plane_state = NULL; + int ret = 0; + unsigned int flags = 0; + const uint32_t *formats = NULL; + uint32_t format_count; + const uint64_t *format_modifiers; + + phytium_plane = kzalloc(sizeof(*phytium_plane), GFP_KERNEL); + if (!phytium_plane) { + ret = -ENOMEM; + goto failed_malloc_plane; + } + + phytium_plane_state = kzalloc(sizeof(*phytium_plane_state), GFP_KERNEL); + if (!phytium_plane_state) { + ret = -ENOMEM; + goto failed_malloc_plane_state; + } + phytium_plane_state->base.plane = &phytium_plane->base; + phytium_plane_state->base.rotation = DRM_MODE_ROTATE_0; + phytium_plane->base.state = &phytium_plane_state->base; + phytium_plane->phys_pipe = phys_pipe; + + if (IS_PX210(priv)) { + phytium_plane->dc_hw_plane_get_format = px210_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = NULL; + } else if (IS_PE220X(priv)) { + phytium_plane->dc_hw_plane_get_format = pe220x_dc_hw_plane_get_cursor_format; + phytium_plane->dc_hw_update_dcreq = NULL; + phytium_plane->dc_hw_update_primary_hi_addr = NULL; + phytium_plane->dc_hw_update_cursor_hi_addr = pe220x_dc_hw_update_cursor_hi_addr; + } + + phytium_plane->dc_hw_plane_get_format(&format_modifiers, &formats, &format_count); + ret = drm_universal_plane_init(dev, &phytium_plane->base, 0x0, + &phytium_plane_funcs, + formats, format_count, + format_modifiers, + DRM_PLANE_TYPE_CURSOR, "cursor %d", phys_pipe); + + if (ret) + goto failed_plane_init; + + flags = DRM_MODE_ROTATE_0; + drm_plane_create_rotation_property(&phytium_plane->base, DRM_MODE_ROTATE_0, flags); + drm_plane_helper_add(&phytium_plane->base, &phytium_plane_helper_funcs); + + return phytium_plane; +failed_plane_init: + kfree(phytium_plane_state); +failed_malloc_plane_state: + kfree(phytium_plane); +failed_malloc_plane: + return ERR_PTR(ret); +} diff --git a/drivers/gpu/drm/phytium/phytium_plane.h b/drivers/gpu/drm/phytium/phytium_plane.h new file mode 100644 index 0000000000000000000000000000000000000000..5527579b03489313f9ef8223e76cfcc2a3bf3def --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_plane.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLANE_H__ +#define __PHYTIUM_PLANE_H__ + +struct phytium_plane { + struct drm_plane base; + int phys_pipe; + unsigned long iova[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned long size[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int format; + unsigned int tiling[PHYTIUM_FORMAT_MAX_PLANE]; + unsigned int swizzle; + unsigned int uv_swizzle; + unsigned int rot_angle; + + /* only for cursor */ + bool enable; + bool reserve[3]; + unsigned int cursor_x; + unsigned int cursor_y; + unsigned int cursor_hot_x; + unsigned int cursor_hot_y; + + void (*dc_hw_plane_get_format)(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); + void (*dc_hw_update_dcreq)(struct drm_plane *plane); + void (*dc_hw_update_primary_hi_addr)(struct drm_plane *plane); + void (*dc_hw_update_cursor_hi_addr)(struct drm_plane *plane, uint64_t iova); +}; + +struct phytium_plane_state { + struct drm_plane_state base; +}; + +#define to_phytium_plane(x) container_of(x, struct phytium_plane, base) +#define to_phytium_plane_state(x) container_of(x, struct phytium_plane_state, base) + +struct phytium_plane *phytium_primary_plane_create(struct drm_device *dev, int pipe); +struct phytium_plane *phytium_cursor_plane_create(struct drm_device *dev, int pipe); +#endif /* __PHYTIUM_PLANE_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_platform.c b/drivers/gpu/drm/phytium/phytium_platform.c new file mode 100644 index 0000000000000000000000000000000000000000..d28aadba7c30ebe41007a7c02654c52360ffcb76 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Phytium display engine DRM driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "phytium_platform.h" +#include "phytium_dp.h" +#include "phytium_gem.h" +#include "pe220x_dc.h" +#include "pe220x_dp.h" + +int phytium_platform_carveout_mem_init(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + struct resource *res; + int ret = 0; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + priv->pool_size = resource_size(res); + priv->pool_phys_addr = res->start; + } + + if ((priv->pool_phys_addr != 0) && (priv->pool_size != 0)) { + priv->pool_virt_addr = ioremap_cache(priv->pool_phys_addr, priv->pool_size); + if (priv->pool_virt_addr == NULL) { + DRM_ERROR("failed to remap carveout mem(0x%llx)\n", priv->pool_phys_addr); + ret = -EINVAL; + goto failed_ioremap; + } + ret = phytium_memory_pool_init(&pdev->dev, priv); + if (ret) + goto failed_init_memory_pool; + + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = priv->pool_size; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_CARVEOUT; + priv->vram_hw_init = NULL; + } else { + DRM_DEBUG_KMS("not support carveout memory\n"); + priv->mem_state[PHYTIUM_MEM_SYSTEM_CARVEOUT_TOTAL] = 0; + priv->support_memory_type = MEMORY_TYPE_SYSTEM_UNIFIED; + priv->vram_hw_init = NULL; + } + + return 0; + +failed_init_memory_pool: + iounmap(priv->pool_virt_addr); +failed_ioremap: + return ret; +} + +void phytium_platform_carveout_mem_fini(struct platform_device *pdev, + struct phytium_display_private *priv) +{ + if (priv->support_memory_type == MEMORY_TYPE_SYSTEM_CARVEOUT) { + phytium_memory_pool_fini(&pdev->dev, priv); + iounmap(priv->pool_virt_addr); + } +} + +static struct phytium_display_private * +phytium_platform_private_init(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct device_node *node; + struct fwnode_handle *np; + struct phytium_display_private *priv = NULL; + struct phytium_platform_private *platform_priv = NULL; + struct phytium_device_info *phytium_info = NULL; + int i = 0, ret = 0; + struct resource *res; + + platform_priv = devm_kzalloc(&pdev->dev, sizeof(*platform_priv), GFP_KERNEL); + if (!platform_priv) { + DRM_ERROR("no memory to allocate for phytium_platform_private\n"); + goto exit; + } + + memset(platform_priv, 0, sizeof(*platform_priv)); + priv = &platform_priv->base; + phytium_display_private_init(priv, dev); + + if (pdev->dev.of_node) { + phytium_info = (struct phytium_device_info *)of_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get dts id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + node = pdev->dev.of_node; + ret = of_property_read_u8(node, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from dts\n"); + goto failed; + } + + ret = of_property_read_u8(node, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from dts\n"); + goto failed; + } + } else if (has_acpi_companion(&pdev->dev)) { + phytium_info = (struct phytium_device_info *)acpi_device_get_match_data(&pdev->dev); + if (!phytium_info) { + DRM_ERROR("failed to get acpi id data(phytium_info)\n"); + goto failed; + } + + memcpy(&(priv->info), phytium_info, sizeof(struct phytium_device_info)); + np = dev_fwnode(&(pdev->dev)); + ret = fwnode_property_read_u8(np, "pipe_mask", &priv->info.pipe_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing pipe_mask property from acpi\n"); + goto failed; + } + ret = fwnode_property_read_u8(np, "edp_mask", &priv->info.edp_mask); + if (ret < 0) { + dev_err(&pdev->dev, "missing edp_mask property from acpi\n"); + goto failed; + } + } + + priv->info.num_pipes = 0; + for_each_pipe_masked(priv, i) + priv->info.num_pipes++; + if (priv->info.num_pipes == 0) { + DRM_ERROR("num_pipes is zero, so exit init\n"); + goto failed; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->regs = devm_ioremap_resource(&pdev->dev, res); + if (priv->regs == NULL) { + DRM_ERROR("ioremap fail, addr:0x%llx, size:0x%llx\n", res->start, res->end); + goto failed; + } + + priv->irq = platform_get_irq(pdev, 0); + if (priv->irq < 0) { + dev_err(&pdev->dev, "failed to get irq\n"); + goto failed; + } + + if (IS_PE220X(priv)) { + priv->dc_hw_clear_msi_irq = NULL; + priv->dc_hw_fb_format_check = pe220x_dc_hw_fb_format_check; + } + + return priv; + +failed: + devm_kfree(&pdev->dev, platform_priv); +exit: + return NULL; +} + +static void phytium_platform_private_fini(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + struct phytium_platform_private *platform_priv = to_platform_priv(priv); + + devm_kfree(&pdev->dev, platform_priv); +} + +static int phytium_platform_probe(struct platform_device *pdev) +{ + struct phytium_display_private *priv = NULL; + struct drm_device *dev = NULL; + int ret = 0; + + dev = drm_dev_alloc(&phytium_display_drm_driver, &pdev->dev); + if (IS_ERR(dev)) { + DRM_ERROR("failed to allocate drm_device\n"); + return PTR_ERR(dev); + } + + dev_set_drvdata(&pdev->dev, dev); + dma_set_mask(&pdev->dev, DMA_BIT_MASK(40)); + + priv = phytium_platform_private_init(pdev); + if (priv) + dev->dev_private = priv; + else + goto failed_platform_private_init; + + ret = phytium_platform_carveout_mem_init(pdev, priv); + if (ret) { + DRM_ERROR("failed to init system carveout memory\n"); + goto failed_carveout_mem_init; + } + + ret = drm_dev_register(dev, 0); + if (ret) { + DRM_ERROR("failed to register drm dev\n"); + goto failed_register_drm; + } + + phytium_dp_hpd_irq_setup(dev, true); + + return 0; + +failed_register_drm: + phytium_platform_carveout_mem_fini(pdev, priv); +failed_carveout_mem_init: + phytium_platform_private_fini(pdev); +failed_platform_private_init: + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + return -1; +} + +static int phytium_platform_remove(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + phytium_dp_hpd_irq_setup(dev, false); + cancel_work_sync(&priv->hotplug_work); + drm_dev_unregister(dev); + phytium_platform_private_fini(pdev); + dev_set_drvdata(&pdev->dev, NULL); + drm_dev_put(dev); + + return 0; +} + +static void phytium_platform_shutdown(struct platform_device *pdev) +{ + struct drm_device *dev = dev_get_drvdata(&pdev->dev); + struct phytium_display_private *priv = dev->dev_private; + + priv->display_shutdown(dev); +} + +static int phytium_platform_pm_suspend(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_suspend(drm_dev); +} + +static int phytium_platform_pm_resume(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + struct phytium_display_private *priv = drm_dev->dev_private; + + return priv->display_pm_resume(drm_dev); +} + +static const struct dev_pm_ops phytium_platform_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(phytium_platform_pm_suspend, phytium_platform_pm_resume) +}; + +static const struct phytium_device_info pe220x_info = { + .platform_mask = BIT(PHYTIUM_PLATFORM_PE220X), + .total_pipes = 2, + .crtc_clock_max = PE220X_DC_PIX_CLOCK_MAX, + .hdisplay_max = PE220X_DC_HDISPLAY_MAX, + .vdisplay_max = PE220X_DC_VDISPLAY_MAX, + .address_mask = PE220X_DC_ADDRESS_MASK, + .backlight_max = PE220X_DP_BACKLIGHT_MAX, +}; + +static const struct of_device_id display_of_match[] = { + { + .compatible = "phytium,dc", + .data = &pe220x_info, + }, + { } +}; + +#ifdef CONFIG_ACPI +static const struct acpi_device_id display_acpi_ids[] = { + { + .id = "PHYT0015", + .driver_data = (kernel_ulong_t)&pe220x_info, + }, + {}, +}; + +MODULE_DEVICE_TABLE(acpi, display_acpi_ids); +#else +#define display_acpi_ids NULL +#endif + +struct platform_driver phytium_platform_driver = { + .driver = { + .name = "phytium_display_platform", + .of_match_table = of_match_ptr(display_of_match), + .acpi_match_table = ACPI_PTR(display_acpi_ids), + }, + .probe = phytium_platform_probe, + .remove = phytium_platform_remove, + .shutdown = phytium_platform_shutdown, + .driver.pm = &phytium_platform_pm_ops, +}; diff --git a/drivers/gpu/drm/phytium/phytium_platform.h b/drivers/gpu/drm/phytium/phytium_platform.h new file mode 100644 index 0000000000000000000000000000000000000000..42f6570b476fbc252b5702f7571c3c45c836d1d0 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_platform.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_PLATFORM_H__ +#define __PHYTIUM_PLATFORM_H__ + +struct phytium_platform_private { + struct phytium_display_private base; +}; + +#define to_platform_priv(priv) container_of(priv, struct phytium_platform_private, base) + +extern struct platform_driver phytium_platform_driver; + +#endif /* __PHYTIUM_PLATFORM_H__ */ diff --git a/drivers/gpu/drm/phytium/phytium_reg.h b/drivers/gpu/drm/phytium/phytium_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..99ac9d4cb4d969831443d92db500eeda6af7fc62 --- /dev/null +++ b/drivers/gpu/drm/phytium/phytium_reg.h @@ -0,0 +1,365 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PHYTIUM_REG_H__ +#define __PHYTIUM_REG_H__ + +/******************************register base******************************************/ +#define PX210_PIPE_BASE(pipe) (0x8000*pipe) +#define PX210_DC_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x0000) +#define PX210_DCREQ_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x2000) +#define PX210_DP_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x3000) +#define PX210_ADDRESS_TRANSFORM_BASE 0x4000 +#define PX210_PHY_ACCESS_BASE(pipe) (PX210_PIPE_BASE(pipe) + 0x5000) + +#define PE220X_DC_BASE(pipe) (0x1000*pipe) +#define PE220X_DP_BASE(pipe) (0x4000 + 0x1000*pipe) +#define PE220X_ADDRESS_TRANSFORM_BASE 0x8000 +#define PE220X_PHY_ACCESS_BASE(pipe) (0x6000 + 0x1000*pipe) +/******************************register base end******************************************/ + +/******************************dc register start******************************************/ +#define PHYTIUM_DC_FRAMEBUFFER_Y_ADDRESS 0x1400 + #define ADDRESS_MASK 0xffffff80 +#define PHYTIUM_DC_FRAMEBUFFER_Y_STRIDE 0x1408 +#define PHYTIUM_DC_PANEL_CONFIG 0x1418 + #define PANEL_DATAENABLE_ENABLE (1<<0) + #define PANEL_DATA_ENABLE (1<<4) + #define PANEL_CLOCK_ENABLE (1<<8) +#define PHYTIUM_DC_HDISPLAY 0x1430 + #define HDISPLAY_END_SHIFT 0 + #define HDISPLAY_END_MASK 0x7fff + #define HDISPLAY_TOTAL_SHIFT 16 + #define HDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_HSYNC 0x1438 + #define HSYNC_START_SHIFT 0 + #define HSYNC_START_MASK 0x7fff + #define HSYNC_END_SHIFT 15 + #define HSYNC_END_MASK 0x7fff + #define HSYNC_PULSE_ENABLED (1<<30) + #define HSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_VDISPLAY 0x1440 + #define VDISPLAY_END_SHIFT 0 + #define VDISPLAY_END_MASK 0x7fff + #define VDISPLAY_TOTAL_SHIFT 16 + #define VDISPLAY_TOTAL_MASK 0x7fff +#define PHYTIUM_DC_VSYNC 0x1448 + #define VSYNC_START_SHIFT 0 + #define VSYNC_START_MASK 0x7fff + #define VSYNC_END_SHIFT 15 + #define VSYNC_END_MASK 0x7fff + #define VSYNC_PULSE_ENABLED (1<<30) + #define VSYNC_NEGATIVE (1<<31) +#define PHYTIUM_DC_DISPLAY_CURRENT_LOCATION 0x1450 +#define PHYTIUM_DC_GAMMA_INDEX 0x1458 + #define GAMMA_INDEX_MAX 256 +#define PHYTIUM_DC_GAMMA_DATA 0x1460 + #define GAMMA_BLUE_SHIFT 0 + #define GAMMA_BLUE_MASK 0x3ff + #define GAMMA_GREEN_SHIFT 10 + #define GAMMA_GREEN_MASK 0x3ff + #define GAMMA_RED_SHIFT 20 + #define GAMMA_RED_MASK 0x3ff +#define PHYTIUM_DC_CURSOR_CONFIG 0x1468 + #define CURSOR_FORMAT_DISABLED 0x0 + #define CURSOR_FORMAT_MASKMODE 0x3 + #define CURSOR_FORMAT_ARGB8888 0x2 + #define CURSOR_FORMAT_MASK 0x3 + #define CURSOR_HOT_Y_SHIFT 8 + #define CURSOR_HOT_Y_MASK 0x1f + #define CURSOR_HOT_X_SHIFT 16 + #define CURSOR_HOT_X_MASK 0x1f +#define PHYTIUM_DC_CURSOR_ADDRESS 0x146c +#define PHYTIUM_DC_CURSOR_LOCATION 0x1470 + #define CURSOR_X_SHIFT 0 + #define CURSOR_X_MASK 0x7fff + #define CURSOR_Y_SHIFT 16 + #define CURSOR_Y_MASK 0x7fff +#define PHYTIUM_DC_CURSOR_BACKGROUND 0x1474 +#define PHYTIUM_DC_CURSOR_FOREGROUND 0x1478 +#define PHYTIUM_DC_INT_STATUS 0x147c + #define INT_STATUS 0x1 +#define PHYTIUM_DC_INT_ENABLE 0x1480 + #define INT_ENABLE 0x1 + #define INT_DISABLE 0x0 + +#define PHYTIUM_DC_FRAMEBUFFER_CONFIG 0x1518 + #define FRAMEBUFFER_OUTPUT BIT(0) + #define FRAMEBUFFER_GAMMA_ENABLE BIT(2) + #define FRAMEBUFFER_VALID_PENDING BIT(3) + #define FRAMEBUFFER_RESET BIT(4) + #define FRAMEBUFFER_PROGRESS BIT(6) + #define FRAMEBUFFER_ROT_ANGLE_SHIFT (11) + #define FRAMEBUFFER_ROT_ANGLE_MASK (0x7) + #define FRAMEBUFFER_ROT_ANGLE_ROT0 (0) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_X (1) + #define FRAMEBUFFER_ROT_ANGLE_FLIP_Y (2) + #define FRAMEBUFFER_TILE_MODE_SHIFT (17) + #define FRAMEBUFFER_TILE_MODE_MASK (0x1f) + #define FRAMEBUFFER_LINEAR 0 + #define FRAMEBUFFER_TILE_MODE0 4 + #define FRAMEBUFFER_TILE_MODE3 7 + #define FRAMEBUFFER_FORMAT_SHIFT 26 + #define FRAMEBUFFER_FORMAT_MASK 0x3f + #define FRAMEBUFFER_FORMAT_XRGB4444 0x0 + #define FRAMEBUFFER_FORMAT_ARGB4444 0x1 + #define FRAMEBUFFER_FORMAT_XRGB1555 0x2 + #define FRAMEBUFFER_FORMAT_ARGB1555 0x3 + #define FRAMEBUFFER_FORMAT_RGB565 0x4 + #define FRAMEBUFFER_FORMAT_XRGB8888 0x5 + #define FRAMEBUFFER_FORMAT_ARGB8888 0x6 + #define FRAMEBUFFER_FORMAT_YUYV 0x7 + #define FRAMEBUFFER_FORMAT_UYVY 0x8 + #define FRAMEBUFFER_FORMAT_NV12 0x11 + #define FRAMEBUFFER_FORMAT_NV16 0x12 + #define FRAMEBUFFER_FORMAT_ARGB2101010 0x16 + #define FRAMEBUFFER_SWIZZLE_SHIFT 23 + #define FRAMEBUFFER_SWIZZLE_MASK 0x3 + #define FRAMEBUFFER_SWIZZLE_ARGB 0 + #define FRAMEBUFFER_SWIZZLE_RGBA 1 + #define FRAMEBUFFER_SWIZZLE_ABGR 2 + #define FRAMEBUFFER_SWIZZLE_BGRA 3 + #define FRAMEBUFFER_UVSWIZZLE_SHIFT 25 + #define FRAMEBUFFER_UVSWIZZLE_DISABLE 0 + #define FRAMEBUFFER_UVSWIZZLE_ENABLE 1 + #define FRAMEBUFFER_CLEAR BIT(8) + #define FRAMEBUFFER_SCALE_ENABLE BIT(22) +#define PHYTIUM_DC_FRAMEBUFFER_SCALECONFIG 0x1520 + #define FRAMEBUFFER_FILTER_TAP 3 + #define FRAMEBUFFER_HORIZONTAL_FILTER_TAP 3 + #define FRAMEBUFFER_TAP 0x33 +#define PHYTIUM_DC_FRAMEBUFFER_U_ADDRESS 0x1530 +#define PHYTIUM_DC_FRAMEBUFFER_V_ADDRESS 0x1538 +#define PHYTIUM_DC_OVERLAY_CONFIG 0x1540 + #define PX210_DC_OVERLAY_ENABLE BIT(24) + +#define PHYTIUM_DC_FRAMEBUFFER_U_STRIDE 0x1800 +#define PHYTIUM_DC_FRAMEBUFFER_V_STRIDE 0x1808 +#define PHYTIUM_DC_FRAMEBUFFER_SIZE 0x1810 + #define WIDTH_SHIFT 0 + #define WIDTH_MASK 0x7fff + #define HEIGHT_SHIFT 15 + #define HEIGHT_MASK 0x7fff + +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_X 0x1828 + #define SCALE_FACTOR_X_MASK 0x7fffffff +#define PHYTIUM_DC_FRAMEBUFFER_SCALE_FACTOR_Y 0x1830 + #define SCALE_FACTOR_Y_MASK 0x7fffffff + #define SCALE_FACTOR_Y_MAX 0x3 + #define SCALE_FACTOR_SRC_OFFSET 16 + +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER_INDEX 0x1838 + #define HORI_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_HORI_FILTER 0x1a00 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER_INDEX 0x1a08 + #define VERT_FILTER_INDEX 0x0 +#define PHYTIUM_DC_FRAMEBUFFER_VERT_FILTER 0x1a10 +#define PHYTIUM_DC_FRAMEBUFFER_CLEARVALUE 0x1a18 + #define CLEAR_VALUE_RED 0x00ff0000 + #define CLEAR_VALUE_GREEN 0x0000ff00 + #define CLEAR_VALUE_BLACK 0x00000000 +#define PHYTIUM_DC_FRAMEBUFFER_INITIALOFFSET 0x1a20 + #define INITIALOFFSET (0x8000 | (0X8000 << 16)) +#define PHYTIUM_DC_DP_CONFIG 0x1cd0 + #define OUTPUT_DP (1<<3) + #define DP_RGB666 (0x1) + #define DP_RGB888 (0x2) + #define DP_RGB101010 (0x3) +/******************************dc register end********************************************/ + +/******************************phy access register****************************************/ +#define PHYTIUM_PHY_ACCESS_ADDRESS 0x0000 +#define PHYTIUM_PHY_WRITE_DATA 0x0004 +#define PHYTIUM_PHY_READ_DATA 0x0008 +#define PHYTIUM_PHY_ACCESS_CTRL 0x000c + #define ACCESS_WRITE (1<<0) + #define ACCESS_READ (1<<1) +/******************************phy access register end*************************************/ + +/******************************dp register start******************************************/ +#define PHYTIUM_DP_LINK_BW_SET 0x0000 +#define PHYTIUM_DP_LANE_COUNT_SET 0x0004 +#define PHYTIUM_DP_ENHANCED_FRAME_EN 0x0008 + #define ENHANCED_FRAME_ENABLE 0x1 + #define ENHANCED_FRAME_DISABLE 0x0 +#define PHYTIUM_DP_TRAINING_PATTERN_SET 0x000c + #define TRAINING_OFF 0x0 + #define TRAINING_PATTERN_1 0x1 + #define TRAINING_PATTERN_2 0x2 + #define TRAINING_PATTERN_3 0x3 + #define TRAINING_PATTERN_4 0x4 +#define PHYTIUM_DP_LINK_QUAL_PATTERN_SET 0x0010 + #define TEST_PATTERN_NONE 0x0 + #define TEST_PATTERN_D10_2 0x1 + #define TEST_PATTERN_SYMBOL_ERROR 0x2 + #define TEST_PATTERN_PRBS7 0x3 + #define TEST_PATTERN_80BIT_CUSTOM 0x4 + #define TEST_PATTERN_CP2520_1 0x5 + #define TEST_PATTERN_CP2520_2 0x6 + #define TEST_PATTERN_CP2520_3 0x7 + #define TEST_PATTERN_LANE_SHIFT 8 +#define PHYTIUM_DP_SCRAMBLING_DISABLE 0x0014 + #define SCRAMBLING_ENABLE 0x0 + #define SCRAMBLING_DISABLE 0x1 +#define PHYTIUM_DP_DOWNSPREAD_CTRL 0x0018 +#define PHYTIUM_DP_ALT_SCRAMBLER_RESET 0x001c +#define PHYTIUM_DP_HBR2_SCRAMBLER_RESET 0x0020 +#define PHYTIUM_DP_DISPLAYPORT_VERSION 0x0024 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_0 0x0030 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_1 0x0034 +#define PHYTIUM_DP_CUSTOM_80BIT_PATTERN_2 0x0038 +#define PHYTIUM_DP_TRANSMITTER_OUTPUT_ENABLE 0x0080 + #define TRANSMITTER_OUTPUT_ENABLE BIT(0) + #define TRANSMITTER_OUTPUT_DISABLE 0 +#define PHYTIUM_DP_VIDEO_STREAM_ENABLE 0x0084 + #define SST_MST_SOURCE_0_ENABLE BIT(0) + #define SST_MST_SOURCE_0_ENABLE_MASK 0x1 + #define SST_MST_SOURCE_0_DISABLE 0 +#define PHYTIUM_DP_SECONDARY_STREAM_ENABLE 0x0088 + #define SECONDARY_STREAM_ENABLE 0x1 + #define SECONDARY_STREAM_DISABLE 0x0 +#define PHYTIUM_DP_SEC_DATA_WINDOW 0x008C +#define PHYTIUM_DP_SOFT_RESET 0x0090 + #define LINK_SOFT_RESET (0x1 << 0) + #define VIDEO_SOFT_RESET (0x1 << 1) +#define PHYTIUM_INPUT_SOURCE_ENABLE 0x0094 + #define VIRTUAL_SOURCE_0_ENABLE BIT(0) + #define VIRTUAL_SOURCE_0_ENABLE_MASK 0x1 +#define PHYTIUM_DP_FORCE_SCRAMBLER_RESET 0x00C0 + #define SCRAMBLER_RESET BIT(0) +#define PHYTIUM_DP_SOURCE_CONTROL_STATUS 0x00C4 +#define PHYTIUM_DP_DATA_CONTROL 0x00C8 +#define PHYTIUM_DP_CORE_CAPABILITY 0x00F8 +#define PHYTIUM_DP_CORE_ID 0x00FC +#define PHYTIUM_DP_AUX_COMMAND 0x0100 + #define BYTE_COUNT_MASK 0xf + #define COMMAND_SHIFT 8 + #define COMMAND_MASK 0xf + #define ADDRESS_ONLY (1<<12) +#define PHYTIUM_DP_AUX_WRITE_FIFO 0x0104 +#define PHYTIUM_DP_AUX_ADDRESS 0x0108 +#define PHYTIUM_DP_AUX_CLK_DIVIDER 0x010C + #define AUX_CLK_DIVIDER 48 + #define AUX_CLK_DIVIDER_100 100 +#define PHYTIUM_DP_SINK_HPD_STATE 0x0128 + #define HPD_CONNECT 0x1 + #define HPD_DISCONNECT 0x0 +#define PHYTIUM_DP_INTERRUPT_RAW_STATUS 0x0130 + #define REPLY_TIMEOUT (1<<3) + #define DP_STATUS_REQUEST_IN_PROGRESS (1<<1) + #define HPD_STATE (0<<1) +#define PHYTIUM_DP_AUX_REPLY_DATA 0x0134 +#define PHYTIUM_DP_AUX_REPLY_CODE 0x0138 + #define AUX_NATIVE_ACK (0x0<<0) + #define AUX_NATIVE_NACK (0x1<<0) + #define AUX_NATIVE_DEFER (0x2<<0) + #define AUX_NATIVE_MASK (0x3 << 0) + #define AUX_I2C_ACK (0x0<<2) + #define AUX_I2C_NACK (0x1<<2) + #define AUX_I2C_DEFER (0x2<<2) + #define AUX_I2C_MASK (0x3 << 2) +#define PHYTIUM_DP_INTERRUPT_STATUS 0x0140 + #define HPD_IRQ (1<<1) + #define HPD_EVENT (1<<0) +#define PHYTIUM_DP_INTERRUPT_MASK 0x0144 + #define HPD_IRQ_MASK (1<<1) + #define HPD_EVENT_MASK (1<<0) + #define HPD_OTHER_MASK 0x3c +#define PHYTIUM_DP_AUX_REPLY_DATA_COUNT 0x0148 +#define PHYTIUM_DP_AUX_STATUS 0x014C + #define REPLY_RECEIVED 0x1 + #define REPLY_IN_PROGRESS 0x2 + #define REQUEST_IN_PROGRESS 0x4 + #define REPLY_ERROR 0x8 +#define PHYTIUM_DP_AUX_TIMER 0x0158 +#define PHYTIUM_DP_MAIN_LINK_HTOTAL 0x0180 +#define PHYTIUM_DP_MAIN_LINK_VTOTAL 0x0184 +#define PHYTIUM_DP_MAIN_LINK_POLARITY 0x0188 + #define VSYNC_POLARITY_LOW BIT(1) + #define HSYNC_POLARITY_LOW BIT(0) +#define PHYTIUM_DP_MAIN_LINK_HSWIDTH 0x018C +#define PHYTIUM_DP_MAIN_LINK_VSWIDTH 0x0190 +#define PHYTIUM_DP_MAIN_LINK_HRES 0x0194 +#define PHYTIUM_DP_MAIN_LINK_VRES 0x0198 +#define PHYTIUM_DP_MAIN_LINK_HSTART 0x019C +#define PHYTIUM_DP_MAIN_LINK_VSTART 0x01A0 +#define PHYTIUM_DP_MAIN_LINK_MISC0 0x01A4 + #define MISC0_SYNCHRONOUS_CLOCK BIT(0) + #define MISC0_BIT_DEPTH_OFFSET 5 + #define MISC0_BIT_DEPTH_6BIT 0x0 + #define MISC0_BIT_DEPTH_8BIT 0x1 + #define MISC0_BIT_DEPTH_10BIT 0x2 + #define MISC0_COMPONENT_FORMAT_SHIFT 1 + #define MISC0_COMPONENT_FORMAT_RGB 0x0 +#define PHYTIUM_DP_MAIN_LINK_MISC1 0x01A8 +#define PHYTIUM_DP_M_VID 0x01AC +#define PHYTIUM_DP_TRANSFER_UNIT_SIZE 0x01B0 +#define PHYTIUM_DP_N_VID 0x01B4 +#define PHYTIUM_DP_USER_PIXEL_WIDTH 0x01B8 +#define PHYTIUM_DP_DATA_COUNT 0x01BC +#define PHYTIUM_DP_INTERLACED 0x01C0 +#define PHYTIUM_DP_USER_SYNC_POLARITY 0x01C4 + #define USER_ODDEVEN_POLARITY_HIGH BIT(3) + #define USER_DATA_ENABLE_POLARITY_HIGH BIT(2) + #define USER_VSYNC_POLARITY_HIGH BIT(1) + #define USER_HSYNC_POLARITY_HIGH BIT(0) +#define PHYTIUM_DP_USER_CONTROL 0x01C8 +#define PHYTIUM_EDP_CRC_ENABLE 0x01D0 + #define SUPPORT_EDP_1_4 BIT(1) +#define PHYTIUM_EDP_CRC_RED 0x01D4 +#define PHYTIUM_EDP_CRC_GREEN 0x01D8 +#define PHYTIUM_EDP_CRC_BLUE 0x01DC +#define PHYTIUM_DP_SEC_AUDIO_ENABLE 0x0300 + #define SEC_AUDIO_ENABLE BIT(0) + #define CHANNEL_MUTE_ENABLE BIT(1) +#define PHYTIUM_DP_SEC_INPUT_SELECT 0x0304 + #define INPUT_SELECT_I2S 0x0 +#define PHYTIUM_DP_SEC_CHANNEL_COUNT 0x0308 + #define CHANNEL_2 0x2 + #define CHANNEL_2_LFE 0x3 + #define CHANNEL_5_1 0x6 + #define CHANNEL_7_1 0x7 + #define CHANNEL_MASK 0xf +#define PHYTIUM_DP_SEC_DIRECT_CLKDIV 0x030c + #define APB_CLOCK 48000000 +#define PHYTIUM_DP_SEC_MAUD 0x0318 +#define PHYTIUM_DP_SEC_NAUD 0x031c +#define PHYTIUM_DP_SEC_CLOCK_MODE 0x0320 + #define CLOCK_MODE_SYNC 0x1 +#define PHYTIUM_DP_SEC_CS_SOURCE_FORMAT 0x0340 + #define CS_SOURCE_FORMAT_DEFAULT 0x0 +#define PHYTIUM_DP_SEC_CS_CATEGORY_CODE 0x0344 +#define PHYTIUM_DP_SEC_CS_LENGTH_ORIG_FREQ 0x0348 + #define ORIG_FREQ_32000 0xc + #define ORIG_FREQ_44100 0xf + #define ORIG_FREQ_48000 0xd + #define ORIG_FREQ_88200 0x7 + #define ORIG_FREQ_96000 0x5 + #define ORIG_FREQ_176400 0x3 + #define ORIG_FREQ_192000 0x1 + #define ORIG_FREQ_MASK 0xf + #define ORIG_FREQ_SHIFT 0 + #define WORD_LENGTH_16 0x4 + #define WORD_LENGTH_18 0x2 + #define WORD_LENGTH_20 0xc + #define WORD_LENGTH_24 0xd + #define WORD_LENGTH_MASK 0xf + #define WORD_LENGTH_SHIFT 4 +#define PHYTIUM_DP_SEC_CS_FREQ_CLOCK_ACCURACY 0x034c // not used + #define SAMPLING_FREQ_32000 0xc + #define SAMPLING_FREQ_44100 0x0 + #define SAMPLING_FREQ_48000 0x4 + #define SAMPLING_FREQ_88200 0x1 + #define SAMPLING_FREQ_96000 0x5 + #define SAMPLING_FREQ_176400 0x3 + #define SAMPLING_FREQ_192000 0x7 + #define SAMPLING_FREQ_MASK 0xf + #define SAMPLING_FREQ_SHIFT 4 +#define PHYTIUM_DP_SEC_CHANNEL_MAP 0x035C + #define CHANNEL_MAP_DEFAULT 0x87654321 +/******************************dp register end********************************************/ + +#endif /* __PHYTIUM_REG_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dc.c b/drivers/gpu/drm/phytium/px210_dc.c new file mode 100644 index 0000000000000000000000000000000000000000..ae022f9fe3fb3418cad9cacf747ae5953f83750b --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.c @@ -0,0 +1,326 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include +#include +#include +#include +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_crtc.h" +#include "phytium_plane.h" +#include "phytium_fb.h" +#include "phytium_gem.h" + +static const unsigned int px210_primary_formats[] = { + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_RGBA1010102, + DRM_FORMAT_BGRA1010102, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGBX8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_ARGB4444, + DRM_FORMAT_ABGR4444, + DRM_FORMAT_RGBA4444, + DRM_FORMAT_BGRA4444, + DRM_FORMAT_XRGB4444, + DRM_FORMAT_XBGR4444, + DRM_FORMAT_RGBX4444, + DRM_FORMAT_BGRX4444, + DRM_FORMAT_ARGB1555, + DRM_FORMAT_ABGR1555, + DRM_FORMAT_RGBA5551, + DRM_FORMAT_BGRA5551, + DRM_FORMAT_XRGB1555, + DRM_FORMAT_XBGR1555, + DRM_FORMAT_RGBX5551, + DRM_FORMAT_BGRX5551, + DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, + DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, +}; + +static uint64_t px210_primary_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC, + DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC, + DRM_FORMAT_MOD_INVALID +}; + +static uint64_t px210_cursor_formats_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + +static const unsigned int px210_cursor_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +void px210_dc_hw_vram_init(struct phytium_display_private *priv, resource_size_t vram_addr, + resource_size_t vram_size) +{ + uint32_t config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_SIZE); + + config = phytium_readl_reg(priv, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + if (config) + phytium_writel_reg(priv, config, group_offset, + PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR); + + phytium_writel_reg(priv, (vram_addr & SRC_ADDR_MASK) >> SRC_ADDR_OFFSET, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR); + phytium_writel_reg(priv, (vram_size >> SIZE_OFFSET) | ADDRESS_TRANSFORM_ENABLE, + group_offset, PX210_DC_ADDRESS_TRANSFORM_SIZE); + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DST_ADDR); +} + +void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe) +{ + phytium_writel_reg(priv, MSI_CLEAR, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_MSI_CLEAR); +} + +void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int phys_pipe = phytium_crtc->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int ret = 0; + + /* config pix clock */ + phytium_writel_reg(priv, FLAG_REQUEST | CMD_PIXEL_CLOCK | (clock & PIXEL_CLOCK_MASK), + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set pixel clock\n", __func__); +} + +void px210_dc_hw_disable(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_crtc *phytium_crtc = to_phytium_crtc(crtc); + int reset_timeout = 100; + int config = 0; + int phys_pipe = phytium_crtc->phys_pipe; + + // reset dc + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset pix clock */ + px210_dc_hw_config_pix_clock(crtc, 0); + + // reset dc + reset_timeout = 100; + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, config | SOFT_RESET, priv->dc_reg_base[phys_pipe], + PX210_DC_CLOCK_CONTROL); + phytium_writel_reg(priv, 0, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_CONTROL); + do { + config = phytium_readl_reg(priv, priv->dc_reg_base[phys_pipe], PX210_DC_CLOCK_IDLE); + if (config | IS_IDLE) + break; + mdelay(1); + reset_timeout--; + } while (reset_timeout); + + /* reset dcreq */ + phytium_writel_reg(priv, DCREQ_PLAN_A, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PLAN); + phytium_writel_reg(priv, 0, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_CONTROL); + phytium_writel_reg(priv, DCREQ_RESET, priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); + msleep(20); + phytium_writel_reg(priv, (~DCREQ_RESET)&DCREQ_RESET_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_RESET); +} + +int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count) +{ + int ret = 0; + + switch (mode_cmd->modifier[count]) { + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE0_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB4444: + case DRM_FORMAT_ABGR4444: + case DRM_FORMAT_RGBA4444: + case DRM_FORMAT_BGRA4444: + case DRM_FORMAT_XRGB4444: + case DRM_FORMAT_XBGR4444: + case DRM_FORMAT_RGBX4444: + case DRM_FORMAT_BGRX4444: + case DRM_FORMAT_ARGB1555: + case DRM_FORMAT_ABGR1555: + case DRM_FORMAT_RGBA5551: + case DRM_FORMAT_BGRA5551: + case DRM_FORMAT_XRGB1555: + case DRM_FORMAT_XBGR1555: + case DRM_FORMAT_RGBX5551: + case DRM_FORMAT_BGRX5551: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_BGR565: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_UYVY: + break; + default: + DRM_ERROR("TILE_MODE0_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_PHYTIUM_TILE_MODE3_FBCDC: + switch (mode_cmd->pixel_format) { + case DRM_FORMAT_ARGB2101010: + case DRM_FORMAT_ABGR2101010: + case DRM_FORMAT_RGBA1010102: + case DRM_FORMAT_BGRA1010102: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGBA8888: + case DRM_FORMAT_BGRA8888: + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_RGBX8888: + case DRM_FORMAT_BGRX8888: + break; + default: + DRM_ERROR("TILE_MODE3_FBCDC not support DRM_FORMAT %d", + mode_cmd->pixel_format); + ret = -EINVAL; + goto error; + } + break; + case DRM_FORMAT_MOD_LINEAR: + break; + default: + DRM_ERROR("unsupported fb modifier 0x%llx\n", mode_cmd->modifier[0]); + ret = -EINVAL; + goto error; + } + + return 0; +error: + return ret; +} + +void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_primary_formats_modifiers; + *formats = px210_primary_formats; + *format_count = ARRAY_SIZE(px210_primary_formats); +} + +void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count) +{ + *format_modifiers = px210_cursor_formats_modifiers; + *formats = px210_cursor_formats; + *format_count = ARRAY_SIZE(px210_cursor_formats); +} + +void px210_dc_hw_update_dcreq(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + uint32_t group_offset = priv->dcreq_reg_base[phys_pipe]; + int config; + + if (phytium_plane->tiling[0] == FRAMEBUFFER_LINEAR) { + phytium_writel_reg(priv, DCREQ_MODE_LINEAR, + group_offset, PX210_DCREQ_PLANE0_CONFIG); + } else { + config = DCREQ_NO_LOSSY; + if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE0) + config |= DCREQ_TILE_TYPE_MODE0; + else if (phytium_plane->tiling[0] == FRAMEBUFFER_TILE_MODE3) + config |= DCREQ_TILE_TYPE_MODE3; + else + config |= DCREQ_TILE_TYPE_MODE0; + + switch (phytium_plane->format) { + case FRAMEBUFFER_FORMAT_ARGB8888: + case FRAMEBUFFER_FORMAT_XRGB8888: + config |= DCREQ_COLOURFORMAT_BGRA8888; + break; + case FRAMEBUFFER_FORMAT_ARGB2101010: + config |= DCREQ_COLOURFORMAT_ARGB2101010; + break; + case FRAMEBUFFER_FORMAT_XRGB4444: + case FRAMEBUFFER_FORMAT_ARGB4444: + config |= DCREQ_COLOURFORMAT_ARGB4444; + break; + case FRAMEBUFFER_FORMAT_XRGB1555: + case FRAMEBUFFER_FORMAT_ARGB1555: + config |= DCREQ_COLOURFORMAT_ARGB1555; + break; + case FRAMEBUFFER_FORMAT_RGB565: + config |= DCREQ_COLOURFORMAT_RGB565; + break; + case FRAMEBUFFER_FORMAT_YUYV: + config |= DCREQ_COLOURFORMAT_YUYV; + break; + case FRAMEBUFFER_FORMAT_UYVY: + config |= DCREQ_COLOURFORMAT_UYVY; + break; + } + config |= DCREQ_ARGBSWIZZLE_ARGB; + config |= DCREQ_MODE_TILE; + phytium_writel_reg(priv, phytium_plane->iova[0] & 0xffffffff, + group_offset, PX210_DCREQ_PLANE0_ADDR_START); + phytium_writel_reg(priv, (phytium_plane->iova[0] + phytium_plane->size[0]) & + 0xffffffff, group_offset, PX210_DCREQ_PLANE0_ADDR_END); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_PLANE0_CONFIG); + } +} + +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane) +{ + struct drm_device *dev = plane->dev; + struct phytium_display_private *priv = dev->dev_private; + struct phytium_plane *phytium_plane = to_phytium_plane(plane); + int phys_pipe = phytium_plane->phys_pipe; + + phytium_writel_reg(priv, (phytium_plane->iova[0] >> PREFIX_SHIFT) & PREFIX_MASK, + priv->dcreq_reg_base[phys_pipe], PX210_DCREQ_PIX_DMA_PREFIX); +} diff --git a/drivers/gpu/drm/phytium/px210_dc.h b/drivers/gpu/drm/phytium/px210_dc.h new file mode 100644 index 0000000000000000000000000000000000000000..1d8220faadc7399f9fad696db53787cff1a0968e --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DC_H__ +#define __PX210_DC_H__ + +#define PX210_DC_PIX_CLOCK_MAX (594000) +#define PX210_DC_HDISPLAY_MAX 3840 +#define PX210_DC_VDISPLAY_MAX 2160 +#define PX210_DC_ADDRESS_MASK 0x7f + +extern void px210_dc_hw_vram_init(struct phytium_display_private *priv, + resource_size_t vram_addr, + resource_size_t vram_size); +extern void px210_dc_hw_clear_msi_irq(struct phytium_display_private *priv, uint32_t phys_pipe); +extern void px210_dc_hw_config_pix_clock(struct drm_crtc *crtc, int clock); +extern void px210_dc_hw_disable(struct drm_crtc *crtc); +extern int px210_dc_hw_fb_format_check(const struct drm_mode_fb_cmd2 *mode_cmd, int count); +extern void px210_dc_hw_plane_get_primary_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +extern void px210_dc_hw_plane_get_cursor_format(const uint64_t **format_modifiers, + const uint32_t **formats, + uint32_t *format_count); +void px210_dc_hw_update_dcreq(struct drm_plane *plane); +void px210_dc_hw_update_primary_hi_addr(struct drm_plane *plane); +#endif /* __PX210_DC_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_dp.c b/drivers/gpu/drm/phytium/px210_dp.c new file mode 100644 index 0000000000000000000000000000000000000000..be3c520a3c09d4ecb4e063cc1a031382c700f5cc --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.c @@ -0,0 +1,920 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#include "phytium_display_drv.h" +#include "px210_reg.h" +#include "phytium_dp.h" +#include "px210_dp.h" + +static uint8_t px210_dp_source_lane_count[3] = {4, 4, 1}; + +/* [reg][ling_rate 1.62->8.1] */ +static int vco_val[12][4] = { + {0x0509, 0x0509, 0x0509, 0x0509}, // CP_PADJ + {0x0f00, 0x0f00, 0x0f00, 0x0f00}, // CP_IADJ + {0x0F08, 0x0F08, 0x0F08, 0x0F08}, // FILT_PADJ + {0x0061, 0x006C, 0x006C, 0x0051}, // INTDIV + {0x3333, 0x0000, 0x0000, 0x0000}, // FRACDIVL + {0x0000, 0x0000, 0x0000, 0x0000}, // FRACDIVH + {0x0042, 0x0048, 0x0048, 0x0036}, // HIGH_THR + {0x0002, 0x0002, 0x0002, 0x0002}, // PDIAG_CTRL + {0x0c5e, 0x0c5e, 0x0c5e, 0x0c5e}, // VCOCAL_PLLCNT_START + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PEFCNT + {0x00c7, 0x00c7, 0x00c7, 0x00c7}, // LOCK_PLLCNT_START + {0x0005, 0x0005, 0x0005, 0x0005}, // LOCK_PLLCNT_THR +}; + +static int mgnfs_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0026, 0x001f, 0x0012, 0x0000}, + {0x0013, 0x0013, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0018, 0x006, 0x0000, 0x0000}, + {0x000c, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0026, 0x0013, 0x005, 0x0000}, + {0x0013, 0x006, 0x0000, 0x0000}, + {0x0006, 0x0000, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int cpost_val[4][4][4] = // [link_rate][swing][emphasis] +{ + /* 1.62Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 2.7Gbps */ + { + {0x0000, 0x0014, 0x0020, 0x002a}, + {0x0000, 0x0010, 0x001f, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 5.4Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, + + /* 8.1Gbps */ + { + {0x0000, 0x0014, 0x0022, 0x002e}, + {0x0000, 0x0013, 0x0020, 0x0000}, + {0x0000, 0x0013, 0x0000, 0x0000}, + {0x0000, 0x0000, 0x0000, 0x0000}, + }, +}; + +static int px210_dp_hw_set_phy_lane_and_rate(struct phytium_dp_device *phytium_dp, + uint8_t link_lane_count, + uint32_t link_rate) +{ + int port = phytium_dp->port%3; + int i = 0, data, tmp, tmp1, index = 0, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + /* set pma powerdown */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (A3_POWERDOWN3 << i*A3_POWERDOWN3_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<port%3; + int voltage_swing = 0; + int pre_emphasis = 0, link_rate_index = 0; + + switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { + case DP_TRAIN_VOLTAGE_SWING_LEVEL_0: + default: + voltage_swing = 0; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_1: + voltage_swing = 1; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_2: + voltage_swing = 2; + break; + case DP_TRAIN_VOLTAGE_SWING_LEVEL_3: + voltage_swing = 3; + break; + } + switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + default: + pre_emphasis = 0; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + pre_emphasis = 1; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + pre_emphasis = 2; + break; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + pre_emphasis = 3; + break; + } + + switch (link_rate) { + case 810000: + link_rate_index = 3; + break; + case 540000: + link_rate_index = 2; + break; + case 270000: + link_rate_index = 1; + break; + case 162000: + link_rate_index = 0; + break; + default: + DRM_ERROR("phytium dp rate(%d) not support\n", link_rate); + link_rate_index = 2; + break; + } + + if (port == 0) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL0_TX_DIAG_ACYA, UNLOCK); + + } else if (port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_CPOST1, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY0_PLL1_TX_DIAG_ACYA, UNLOCK); + } else { + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, LOCK); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_TXCC_CTRL, TX_TXCC_CTRL); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DRV, TX_DRV); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_MGNFS, + mgnfs_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_CPOST, + cpost_val[link_rate_index][voltage_swing][pre_emphasis]); + phytium_phy_writel(phytium_dp, PX210_PHY1_PLL0_TX_DIAG_ACYA, UNLOCK); + } +} + +static int px210_dp_hw_init_phy(struct phytium_dp_device *phytium_dp) +{ + int port = phytium_dp->port; + int i = 0, data, tmp, mask; + int timeout = 500, ret = 0; + + if (port == 0 || port == 1) { + phytium_phy_writel(phytium_dp, PX210_PHY0_APB_RESET, APB_RESET); + + phytium_phy_writel(phytium_dp, PX210_PHY0_PIPE_RESET, RESET); + + /* config lane to dp mode */ + data = 0; + mask = 0; + for (i = 0; i < phytium_dp->source_max_lane_count; i++) { + data |= (LANE_BIT << i*LANE_BIT_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (LANE_MASTER << i*LANE_MASTER_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (PLL_EN << i*PLL_EN_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (BIT_20 << i*BIT_20_SHIFT); + mask |= (((1<source_max_lane_count; i++) { + data |= (A0_ACTIVE << i*A0_ACTIVE_SHIFT); + mask |= (((1<dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweron panel\n", __func__); +} + +static void px210_dp_hw_poweroff_panel(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | PANEL_POWER_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to poweroff panel\n", __func__); +} + +static void px210_dp_hw_enable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, ret = 0; + uint32_t group_offset = priv->dcreq_reg_base[port]; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_ENABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to enable backlight\n", __func__); +} + +static void px210_dp_hw_disable_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int ret = 0; + + phytium_writel_reg(priv, FLAG_REQUEST | CMD_BACKLIGHT | BACKLIGHT_DISABLE, + group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to disable backlight\n", __func__); +} + +static uint32_t px210_dp_hw_get_backlight(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE); + return ((config >> BACKLIGHT_VALUE_SHIFT) & BACKLIGHT_VALUE_MASK); +} + +static int px210_dp_hw_set_backlight(struct phytium_dp_device *phytium_dp, uint32_t level) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + uint32_t group_offset = priv->dcreq_reg_base[port]; + int config = 0; + int ret = 0; + + if (level > PX210_DP_BACKLIGHT_MAX) { + ret = -EINVAL; + goto out; + } + + config = FLAG_REQUEST | CMD_BACKLIGHT | ((level & BACKLIGHT_MASK) << BACKLIGHT_SHIFT); + phytium_writel_reg(priv, config, group_offset, PX210_DCREQ_CMD_REGISTER); + ret = phytium_wait_cmd_done(priv, group_offset + PX210_DCREQ_CMD_REGISTER, + FLAG_REQUEST, FLAG_REPLY); + if (ret < 0) + DRM_ERROR("%s: failed to set backlight\n", __func__); + +out: + return ret; +} + +bool px210_dp_hw_spread_is_enable(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port, config; + uint32_t group_offset = priv->address_transform_base; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + + return ((config & DP_SPREAD_ENABLE(port)) ? true:false); +} + +int px210_dp_hw_reset(struct phytium_dp_device *phytium_dp) +{ + struct drm_device *dev = phytium_dp->dev; + struct phytium_display_private *priv = dev->dev_private; + int port = phytium_dp->port; + int timeout = 100, config, ret = 0; + uint32_t group_offset = priv->address_transform_base; + uint32_t group_offset_dp = priv->dp_reg_base[port]; + + config = phytium_readl_reg(priv, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + config &= (~DC_DP_RESET_STATUS(port)); + + phytium_writel_reg(priv, config, group_offset, PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + phytium_writel_reg(priv, FLAG_REQUEST | CMD_DC_DP_RESET, + priv->dcreq_reg_base[port], PX210_DCREQ_CMD_REGISTER); + do { + mdelay(10); + timeout--; + config = phytium_readl_reg(priv, group_offset, + PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS); + if (config & DC_DP_RESET_STATUS(port)) + break; + } while (timeout); + if (timeout == 0) { + DRM_ERROR("reset dc/dp pipe(%d) failed\n", port); + ret = -1; + } + + phytium_writel_reg(priv, AUX_CLK_DIVIDER, group_offset_dp, PHYTIUM_DP_AUX_CLK_DIVIDER); + + return ret; +} + +uint8_t px210_dp_hw_get_source_lane_count(struct phytium_dp_device *phytium_dp) +{ + return px210_dp_source_lane_count[phytium_dp->port]; +} + +static struct phytium_dp_func px210_dp_funcs = { + .dp_hw_get_source_lane_count = px210_dp_hw_get_source_lane_count, + .dp_hw_reset = px210_dp_hw_reset, + .dp_hw_spread_is_enable = px210_dp_hw_spread_is_enable, + .dp_hw_set_backlight = px210_dp_hw_set_backlight, + .dp_hw_get_backlight = px210_dp_hw_get_backlight, + .dp_hw_disable_backlight = px210_dp_hw_disable_backlight, + .dp_hw_enable_backlight = px210_dp_hw_enable_backlight, + .dp_hw_poweroff_panel = px210_dp_hw_poweroff_panel, + .dp_hw_poweron_panel = px210_dp_hw_poweron_panel, + .dp_hw_init_phy = px210_dp_hw_init_phy, + .dp_hw_set_phy_lane_setting = px210_dp_hw_set_phy_lane_setting, + .dp_hw_set_phy_lane_and_rate = px210_dp_hw_set_phy_lane_and_rate, +}; + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp) +{ + phytium_dp->funcs = &px210_dp_funcs; +} diff --git a/drivers/gpu/drm/phytium/px210_dp.h b/drivers/gpu/drm/phytium/px210_dp.h new file mode 100644 index 0000000000000000000000000000000000000000..f2436ace18453f5409ac034199a39299b7a4e6fe --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_dp.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_DP_H__ +#define __PX210_DP_H__ + +#define PX210_DP_BACKLIGHT_MAX 100 + +void px210_dp_func_register(struct phytium_dp_device *phytium_dp); +#endif /* __PX210_DP_H__ */ diff --git a/drivers/gpu/drm/phytium/px210_reg.h b/drivers/gpu/drm/phytium/px210_reg.h new file mode 100644 index 0000000000000000000000000000000000000000..e594fbc8d96f34a0566d710ce832be14d9a79346 --- /dev/null +++ b/drivers/gpu/drm/phytium/px210_reg.h @@ -0,0 +1,349 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Phytium display drm driver + * + * Copyright (C) 2021-2023, Phytium Technology Co., Ltd. + */ + +#ifndef __PX210_REG_H__ +#define __PX210_REG_H__ + +#include "phytium_reg.h" + +/******************************dc register start******************************************/ +#define PX210_DC_CLOCK_CONTROL 0x0000 + #define SOFT_RESET (1<<12) +#define PX210_DC_CLOCK_IDLE 0x0004 + #define IS_IDLE (1<<16) +/******************************dc register end********************************************/ + +/******************************dcreq register start**************************************/ +#define PX210_DCREQ_PLANE0_ADDR_START 0x00 +#define PX210_DCREQ_PLANE0_ADDR_END 0x04 +#define PX210_DCREQ_PLANE1_ADDR_START 0x08 +#define PX210_DCREQ_PLANE1_ADDR_END 0x0c +#define PX210_DCREQ_PLANE0_CONFIG 0x10 + #define DCREQ_NO_LOSSY (0 << 0) + #define DCREQ_LOSSY (1 << 0) + #define DCREQ_TILE_TYPE_MASK (0x3 << 1) + #define DCREQ_TILE_TYPE_MODE0 (0x1 << 1) + #define DCREQ_TILE_TYPE_MODE3 (0x2 << 1) + #define DCREQ_COLOURFORMAT_MASK (0x7f << 8) + #define DCREQ_COLOURFORMAT_RGB565 (0x5 << 8) + #define DCREQ_COLOURFORMAT_ARGB1555 (0x4 << 8) + #define DCREQ_COLOURFORMAT_ARGB4444 (0x02 << 8) + #define DCREQ_COLOURFORMAT_BGRA8888 (0x29 << 8) + #define DCREQ_COLOURFORMAT_ARGB2101010 (0xe << 8) + #define DCREQ_COLOURFORMAT_YUYV (0x59 << 8) + #define DCREQ_COLOURFORMAT_UYVY (0x5b << 8) + #define DCREQ_ARGBSWIZZLE_MASK (0xf << 4) + #define DCREQ_ARGBSWIZZLE_ARGB (0X0 << 4) + #define DCREQ_ARGBSWIZZLE_BGRA (0XC << 4) + #define DCREQ_MODE_MASK (1 << 16) + #define DCREQ_MODE_LINEAR (0 << 16) + #define DCREQ_MODE_TILE (1 << 16) +#define PX210_DCREQ_PLANE1_CONFIG(pipe) 0x14 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_L 0x18 +#define PX210_DCREQ_PLANE0_CLEAR_COLOR_H 0x1C +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_L 0x20 +#define PX210_DCREQ_PLANE1_CLEAR_COLOR_H 0x24 +#define PX210_DCREQ_CMD_REGISTER 0x38 + #define FLAG_REPLY (1<<31) + #define FLAG_REQUEST (1<<30) + #define CMD_PIXEL_CLOCK (0x0 << 28) + #define CMD_BACKLIGHT (0x1 << 28) + #define CMD_DC_DP_RESET (0x3 << 28) + #define BACKLIGHT_SHIFT 21 + #define BACKLIGHT_MASK 0x7f + #define BACKLIGHT_MAX 100 + #define BACKLIGHT_ENABLE (101 << BACKLIGHT_SHIFT) + #define BACKLIGHT_DISABLE (102 << BACKLIGHT_SHIFT) + #define PANEL_POWER_ENABLE (103 << BACKLIGHT_SHIFT) + #define PANEL_POWER_DISABLE (104 << BACKLIGHT_SHIFT) + #define PIXEL_CLOCK_MASK (0x1fffff) +#define PX210_DCREQ_FBCD_CLOCK_CONFIG 0x3c +#define PX210_DCREQ_PIX_DMA_PREFIX 0x50 + #define PREFIX_MASK 0xff + #define PREFIX_SHIFT 32 +#define PX210_DCREQ_FRAME_START 0x54 +#define PX210_DCREQ_FILTER_CONFIG 0x58 +#define PX210_DCREQ_CONTROL 0x5C + #define DC_REQ_ENABLE (1<<0) +#define PX210_DCREQ_MSI_CLEAR 0x60 + #define MSI_CLEAR 0x0 +#define PX210_DCREQ_RESET 0x68 + #define DCREQ_RESET (0x3 << 0) + #define DCREQ_RESET_MASK 0x3 +#define PX210_DCREQ_PLAN 0x94 + #define DCREQ_PLAN_A 0x0 + #define DCREQ_PLAN_B 0X5 +/******************************dcreq register end**************************************/ + +/******************************address transform register start**************************/ +#define PX210_GPU_ADDRESS_TRANSFORM_SRC_ADDR 0x0 +#define PX210_GPU_ADDRESS_TRANSFORM_SIZE 0x4 +#define PX210_GPU_ADDRESS_TRANSFORM_DST_ADDR 0x8 + +#define PX210_DC_ADDRESS_TRANSFORM_SRC_ADDR 0x24 + #define SRC_ADDR_OFFSET 22 + #define SRC_ADDR_MASK 0xffffffffff +#define PX210_DC_ADDRESS_TRANSFORM_SIZE 0x28 + #define ADDRESS_TRANSFORM_ENABLE (0x1 << 31) + #define SIZE_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DST_ADDR 0x2c + #define DST_ADDR_OFFSET 22 +#define PX210_DC_ADDRESS_TRANSFORM_DP_RESET_STATUS 0x48 + #define DC_DP_RESET_STATUS(pipe) (1 << pipe) + #define DP_SPREAD_ENABLE(pipe) (0x8 << pipe) +#define PX210_DC_ADDRESS_TRANSFORM_BACKLIGHT_VALUE 0x4c + #define BACKLIGHT_VALUE_MASK (0x7f) + #define BACKLIGHT_VALUE_SHIFT 16 +/******************************address transform register end**************************/ + +/******************************phy register start******************************************/ +/* self define */ +#define PX210_PHY0_PIPE_RESET 0x40104 + #define RESET 0x0 + #define RESET_DEASSERT 0x1 +#define PX210_PHY1_PIPE_RESET 0x100100 + #define PHY1_PIPE_RESET 0x0 + #define PHY1_PIPE_RESET_DEASSERT 0x4 + +#define PX210_PHY1_EN_REFCLK 0x100070 + +#define PX210_PHY0_MODE 0x40088 + #define LANE_BIT (0x3) + #define LANE_BIT_SHIFT 0x2 +#define PX210_PHY1_SEL 0x100004 + #define PHY1_DP_LANE_BIT 0x1 + #define PHY1_DP_LANE_BIT_SHIFT 2 + +#define PX210_PHY0_LINK_CFG 0x40044 + #define LANE_MASTER 0x1 + #define LANE_MASTER_SHIFT 1 + +#define PX210_PHY0_PLL_EN 0x40010 + #define PLL_EN 0x1 + #define PLL_EN_SHIFT 1 +#define PX210_PHY0_PMA_WIDTH 0x40020 + #define BIT_20 0x5 + #define BIT_20_SHIFT 4 + +#define PX210_PHY0_PMA0_POWER 0x40014 +#define PX210_PHY0_PMA1_POWER 0x40018 + #define A0_ACTIVE 0x1 + #define A0_ACTIVE_SHIFT 8 + #define A3_POWERDOWN3 0x8 + #define A3_POWERDOWN3_SHIFT 8 + +#define PX210_PHY1_PMA_MISC 0x1000a0 + #define PHY1_PLL_EN 0x1 + #define PHY1_PLL_EN_MASK 1 + #define PHY1_PLL_EN_SHIFT 8 + #define PHY1_BIT_20 0x5 + #define PHY1_BIT_20_SHIFT 9 + #define PHY1_A0_ACTIVE 0x1 + #define PHY1_A0_ACTIVE_SHIFT 2 + #define PHY1_A0_ACTIVE_MASK 0x3f + #define PHY1_A3_POWERDOWN3 0x8 + #define PHY1_A3_POWERDOWN3_MASK 0x3f + #define PHY1_A3_POWERDOWN3_SHIFT 2 + +#define PX210_PHY0_LINK_RESET 0x40108 + #define LINK_RESET 0x1 + #define LINK_RESET_MASK 0x1 + #define LINTK_RESET_SHIFT 0x1 + +#define PX210_PHY0_APB_RESET 0x40100 + #define APB_RESET 0x1 +#define PX210_PHY1_APB_RESET 0x100104 + #define PHY1_APB_RESET 0x4 + +/* phy origin register */ +#define PX210_PHY0_PLL_CFG 0x30038 +#define PX210_PHY1_PLL_CFG 0xb0038 + #define SINGLE_LINK 0x0 + #define DOUBLE_LINK 0x2 + +#define PX210_PHY0_PMA_CONTROL 0x3800c +#define PX210_PHY1_PMA_CONTROL 0xb800c + #define CONTROL_ENABLE 0x1 + #define CONTROL_ENABLE_MASK 0x1 + #define CONTROL_ENABLE_SHIFT 0x1 + +#define PX210_PHY0_PMA_CONTROL2 0x38004 +#define PX210_PHY1_PMA_CONTROL2 0xb8004 + #define PLL0_LOCK_DONE (0x1 << 6) + #define PLL1_LOCK_DONE (0x1 << 7) + +#define PX210_PHY0_PLL0_CLK_SEL 0X684 +#define PX210_PHY0_PLL1_CLK_SEL 0x704 +#define PX210_PHY1_PLL_CLK_SEL 0X80684 + #define PLL_LINK_RATE_162000 0xf01 + #define PLL_LINK_RATE_270000 0x701 + #define PLL_LINK_RATE_540000 0x301 + #define PLL_LINK_RATE_810000 0x200 + +#define PX210_PHY0_HSCLK0_SEL 0x18398 +#define PX210_PHY0_HSCLK1_SEL 0x1a398 +#define PX210_PHY1_HSCLK_SEL 0x90398 + #define HSCLK_LINK_0 0x0 + #define HSCLK_LINK_1 0x1 + +#define PX210_PHY0_HSCLK0_DIV 0x1839c +#define PX210_PHY0_HSCLK1_DIV 0x1a39c +#define PX210_PHY1_HSCLK_DIV 0x9039c + #define HSCLK_LINK_RATE_162000 0x2 + #define HSCLK_LINK_RATE_270000 0x1 + #define HSCLK_LINK_RATE_540000 0x0 + #define HSCLK_LINK_RATE_810000 0x0 + +#define PX210_PHY0_PLLDRC0_CTRL 0x18394 +#define PX210_PHY0_PLLDRC1_CTRL 0x1a394 +#define PX210_PHY1_PLLDRC_CTRL 0x90394 + #define PLLDRC_LINK0 0x1 + #define PLLDRC_LINK1 0x9 + +#define PX210_PHY0_PLL0_DSM_M0 0x250 +#define PX210_PHY1_PLL0_DSM_M0 0x80250 + #define PLL0_DSM_M0 0x4 +#define PX210_PHY0_PLL0_VCOCAL_START 0x218 +#define PX210_PHY1_PLL0_VCOCAL_START 0x80218 + #define PLL0_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL0_VCOCAL_CTRL 0x208 +#define PX210_PHY1_PLL0_VCOCAL_CTRL 0x80208 + #define PLL0_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL1_DSM_M0 0x350 + #define PLL1_DSM_M0 0x4 +#define PX210_PHY0_PLL1_VCOCAL_START 0x318 + #define PLL1_VCOCAL_START 0xc5e +#define PX210_PHY0_PLL1_VCOCAL_CTRL 0x308 + #define PLL1_VCOCAL_CTRL 0x3 + +#define PX210_PHY0_PLL0_CP_PADJ 0x690 +#define PX210_PHY0_PLL0_CP_IADJ 0x694 +#define PX210_PHY0_PLL0_CP_FILT_PADJ 0x698 +#define PX210_PHY0_PLL0_INTDIV 0x240 +#define PX210_PHY0_PLL0_FRACDIVL 0x244 +#define PX210_PHY0_PLL0_FRACDIVH 0x248 +#define PX210_PHY0_PLL0_HIGH_THR 0x24c +#define PX210_PHY0_PLL0_PDIAG_CTRL 0x680 +#define PX210_PHY0_PLL0_VCOCAL_PLLCNT_START 0x220 +#define PX210_PHY0_PLL0_LOCK_PEFCNT 0x270 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_START 0x278 +#define PX210_PHY0_PLL0_LOCK_PLLCNT_THR 0x27c + +#define PX210_PHY0_PLL1_CP_PADJ 0x710 +#define PX210_PHY0_PLL1_CP_IADJ 0x714 +#define PX210_PHY0_PLL1_CP_FILT_PADJ 0x718 +#define PX210_PHY0_PLL1_INTDIV 0x340 +#define PX210_PHY0_PLL1_FRACDIVL 0x344 +#define PX210_PHY0_PLL1_FRACDIVH 0x348 +#define PX210_PHY0_PLL1_HIGH_THR 0x34c +#define PX210_PHY0_PLL1_PDIAG_CTRL 0x700 +#define PX210_PHY0_PLL1_VCOCAL_PLLCNT_START 0x320 +#define PX210_PHY0_PLL1_LOCK_PEFCNT 0x370 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_START 0x378 +#define PX210_PHY0_PLL1_LOCK_PLLCNT_THR 0x37c + +#define PX210_PHY1_PLL0_CP_PADJ 0x80690 +#define PX210_PHY1_PLL0_CP_IADJ 0x80694 +#define PX210_PHY1_PLL0_CP_FILT_PADJ 0x80698 +#define PX210_PHY1_PLL0_INTDIV 0x80240 +#define PX210_PHY1_PLL0_FRACDIVL 0x80244 +#define PX210_PHY1_PLL0_FRACDIVH 0x80248 +#define PX210_PHY1_PLL0_HIGH_THR 0x8024c +#define PX210_PHY1_PLL0_PDIAG_CTRL 0x80680 +#define PX210_PHY1_PLL0_VCOCAL_PLLCNT_START 0x80220 +#define PX210_PHY1_PLL0_LOCK_PEFCNT 0x80270 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_START 0x80278 +#define PX210_PHY1_PLL0_LOCK_PLLCNT_THR 0x8027c + +#define PX210_PHY0_PLL0_TX_PSC_A0 0x18400 +#define PX210_PHY1_PLL0_TX_PSC_A0 0x90400 + #define PLL0_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL0_TX_PSC_A2 0x18408 +#define PX210_PHY1_PLL0_TX_PSC_A2 0x90408 + #define PLL0_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL0_TX_PSC_A3 0x1840c +#define PX210_PHY1_PLL0_TX_PSC_A3 0x9040c + #define PLL0_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL0_RX_PSC_A0 0x28000 +#define PX210_PHY1_PLL0_RX_PSC_A0 0xa0000 + #define PLL0_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A2 0x28008 +#define PX210_PHY1_PLL0_RX_PSC_A2 0xa0008 + #define PLL0_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL0_RX_PSC_A3 0x2800C +#define PX210_PHY1_PLL0_RX_PSC_A3 0xa000C + #define PLL0_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL0_RX_PSC_CAL 0x28018 +#define PX210_PHY1_PLL0_RX_PSC_CAL 0xa0018 + #define PLL0_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL1_TX_PSC_A0 0x1a400 + #define PLL1_TX_PSC_A0 0xfb +#define PX210_PHY0_PLL1_TX_PSC_A2 0x1a408 + #define PLL1_TX_PSC_A2 0x4aa +#define PX210_PHY0_PLL1_TX_PSC_A3 0x1a40c + #define PLL1_TX_PSC_A3 0x4aa +#define PX210_PHY0_PLL1_RX_PSC_A0 0x2a000 + #define PLL1_RX_PSC_A0 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A2 0x2a008 + #define PLL1_RX_PSC_A2 0x0 +#define PX210_PHY0_PLL1_RX_PSC_A3 0x2a00C + #define PLL1_RX_PSC_A3 0x0 +#define PX210_PHY0_PLL1_RX_PSC_CAL 0x2a018 + #define PLL1_RX_PSC_CAL 0x0 + +#define PX210_PHY0_PLL0_XCVR_CTRL 0x183a8 +#define PX210_PHY1_PLL0_XCVR_CTRL 0x903a8 + #define PLL0_XCVR_CTRL 0xf +#define PX210_PHY0_PLL1_XCVR_CTRL 0x1a3a8 + #define PLL1_XCVR_CTRL 0xf + +#define PX210_PHY0_PLL0_RX_GCSM1_CTRL 0x28420 +#define PX210_PHY1_PLL0_RX_GCSM1_CTRL 0xa0420 + #define PLL0_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_GCSM2_CTRL 0x28440 +#define PX210_PHY1_PLL0_RX_GCSM2_CTRL 0xa0440 + #define PLL0_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL0_RX_PERGCSM_CTRL 0x28460 +#define PX210_PHY1_PLL0_RX_PERGCSM_CTRL 0xa0460 + #define PLL0_RX_PERGCSM_CTRL 0x0 + +#define PX210_PHY0_PLL1_RX_GCSM1_CTRL 0x2a420 + #define PLL1_RX_GCSM1_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_GCSM2_CTRL 0x2a440 + #define PLL1_RX_GCSM2_CTRL 0x0 +#define PX210_PHY0_PLL1_RX_PERGCSM_CTRL 0x2a460 + #define PLL1_RX_PERGCSM_CTRL 0x0 + +/* swing and emphasis */ +#define PX210_PHY0_PLL0_TX_DIAG_ACYA 0x1879c +#define PX210_PHY0_PLL1_TX_DIAG_ACYA 0x1a79c +#define PX210_PHY1_PLL0_TX_DIAG_ACYA 0x9079c + #define LOCK 1 + #define UNLOCK 0 + +#define PX210_PHY0_PLL0_TX_TXCC_CTRL 0x18100 +#define PX210_PHY0_PLL1_TX_TXCC_CTRL 0x1a100 +#define PX210_PHY1_PLL0_TX_TXCC_CTRL 0x90100 + #define TX_TXCC_CTRL 0x8a4 + +#define PX210_PHY0_PLL0_TX_DRV 0x18318 +#define PX210_PHY0_PLL1_TX_DRV 0x1a318 +#define PX210_PHY1_PLL0_TX_DRV 0x90318 + #define TX_DRV 0x3 + +#define PX210_PHY0_PLL0_TX_MGNFS 0x18140 +#define PX210_PHY0_PLL1_TX_MGNFS 0x1a140 +#define PX210_PHY1_PLL0_TX_MGNFS 0x90140 + +#define PX210_PHY0_PLL0_TX_CPOST 0x18130 +#define PX210_PHY0_PLL1_TX_CPOST 0x1a130 +#define PX210_PHY0_PLL1_TX_CPOST1 0x1a13c +#define PX210_PHY1_PLL0_TX_CPOST 0x90130 + +/******************************phy register end********************************************/ +#endif /* __PX210_REG_H__ */ diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 10be30366c2bf8e33eda0888d970c80106f9107c..dd7c2481c94f2785fc6229efe7139a2a4beffe64 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8093,6 +8093,9 @@ int cik_irq_process(struct radeon_device *rdev) if (queue_thermal) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index f0ae087be914eefdfc8d497e4e076a631727f7bf..84ce0e5fc72a830f252814abfc1ea489f8e2a555 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -4922,6 +4922,9 @@ int evergreen_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index a17b95eec65fb81036c49c8ed9eeabadfc953f07..43c1fde01708ddd2d25ca288a06cb2ec6ef77cb4 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -4328,6 +4328,9 @@ int r600_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index a91012447b56ed2eb94bf9b77aef364831bb9346..f9c31e99366e51d53351cc4d4074413b1f13d839 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6438,6 +6438,9 @@ int si_irq_process(struct radeon_device *rdev) if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; +#ifdef CONFIG_LOONGARCH + WREG32(IH_RB_RPTR, rptr); +#endif atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index ec38c88921589e7f7cfc89396c0f6c94e363f1e0..55de59abe3ecb0693450bf301af28935dafed24f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -38,6 +38,16 @@ config HWMON_DEBUG_CHIP comment "Native drivers" +config SENSORS_PVT + tristate "SW64 PVT monitor" + depends on SW64 + help + If you say yes here you get support for the voltage + sensor inside your CPU. + + This driver can also be built as a module. If so, the module + will be called PVT. + config SENSORS_ABITUGURU tristate "Abit uGuru (rev 1 & 2)" depends on X86 && DMI @@ -2161,6 +2171,19 @@ config SENSORS_VIA_CPUTEMP sensor inside your CPU. Supported are all known variants of the VIA C7 and Nano. +config SENSORS_ZHAOXIN_CPUTEMP + tristate "Zhaoxin CPU temperature sensor" + depends on X86 + default m + select HWMON_VID + help + If you say yes here you get support for the temperature + sensor inside your CPU. Supported are all known variants of + the Zhaoxin processors. + + This driver can also be built as a module. If so, the module + will be called zhaoxin-cputemp. + config SENSORS_VIA686A tristate "VIA686A" depends on PCI diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 4ac9452b54304a0d60c68e3e479a0e5f8aac6898..f7da084cfc4646091ae53de160746c8cf1cd6209 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -211,6 +211,7 @@ obj-$(CONFIG_SENSORS_TMP464) += tmp464.o obj-$(CONFIG_SENSORS_TMP513) += tmp513.o obj-$(CONFIG_SENSORS_VEXPRESS) += vexpress-hwmon.o obj-$(CONFIG_SENSORS_VIA_CPUTEMP)+= via-cputemp.o +obj-$(CONFIG_SENSORS_ZHAOXIN_CPUTEMP)+= zhaoxin-cputemp.o obj-$(CONFIG_SENSORS_VIA686A) += via686a.o obj-$(CONFIG_SENSORS_VT1211) += vt1211.o obj-$(CONFIG_SENSORS_VT8231) += vt8231.o @@ -220,6 +221,7 @@ obj-$(CONFIG_SENSORS_W83L786NG) += w83l786ng.o obj-$(CONFIG_SENSORS_WM831X) += wm831x-hwmon.o obj-$(CONFIG_SENSORS_WM8350) += wm8350-hwmon.o obj-$(CONFIG_SENSORS_XGENE) += xgene-hwmon.o +obj-$(CONFIG_SENSORS_PVT) += sw64_pvt.o obj-$(CONFIG_SENSORS_OCC) += occ/ obj-$(CONFIG_SENSORS_PECI) += peci/ diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index bae0becfa24be9c9c774a90712449aacdbf3484a..faf3955a311f792e7e9fcd6d4586e9bd6c3f700a 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -84,6 +84,11 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define AMD_I3255_STR "3255" +struct hygon_private { + u32 index_2nd; + u32 offset_2nd; +}; + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -94,6 +99,7 @@ struct k10temp_data { bool is_zen; u32 ccd_offset; bool disp_negative; + void *priv; }; #define TCTL_BIT 0 @@ -201,6 +207,23 @@ static int k10temp_read_labels(struct device *dev, return 0; } +static void hygon_read_temp(struct k10temp_data *data, int channel, + u32 *regval) +{ + struct hygon_private *h_priv; + + h_priv = (struct hygon_private *)data->priv; + if ((channel - 2) < h_priv->index_2nd) + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(data->ccd_offset, channel - 2), + regval); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + channel - 2 - h_priv->index_2nd), + regval); +} + static int k10temp_read_temp(struct device *dev, u32 attr, int channel, long *val) { @@ -221,7 +244,10 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, *val = 0; break; case 2 ... 13: /* Tccd{1-12} */ - amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + if (hygon_f18h_m4h()) + hygon_read_temp(data, channel, ®val); + else + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), ZEN_CCD_TEMP(data->ccd_offset, channel - 2), ®val); *val = (regval & ZEN_CCD_TEMP_MASK) * 125 - 49000; @@ -388,14 +414,48 @@ static void k10temp_get_ccd_support(struct pci_dev *pdev, } } +static void k10temp_get_ccd_support_2nd(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + struct hygon_private *h_priv; + u32 regval; + int i; + + h_priv = (struct hygon_private *)data->priv; + for (i = h_priv->index_2nd; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + ZEN_CCD_TEMP(h_priv->offset_2nd, + i - h_priv->index_2nd), + ®val); + if (regval & ZEN_CCD_TEMP_VALID) + data->show_temp |= BIT(TCCD_BIT(i)); + } +} + static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct hygon_private *h_priv; struct k10temp_data *data; struct device *hwmon_dev; + u8 df_id; int i; + if (hygon_f18h_m4h()) { + if (get_df_id(pdev, &df_id)) { + pr_err("Get DF ID failed.\n"); + return -ENODEV; + } + + /* + * The temperature should be get from the devices + * with id < 4. + */ + if (df_id >= 4) + return 0; + } + if (unreliable) { if (!force) { dev_err(dev, @@ -423,7 +483,7 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) (boot_cpu_data.x86_model & 0xf0) == 0x70)) { data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; - } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { + } else if (boot_cpu_data.x86 == 0x17) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; data->is_zen = true; @@ -448,6 +508,25 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) k10temp_get_ccd_support(pdev, data, 8); break; } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18) { + data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; + data->read_tempreg = read_tempreg_nb_zen; + data->is_zen = true; + + if (boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf) { + data->ccd_offset = 0x154; + data->priv = devm_kzalloc(dev, sizeof(*h_priv), + GFP_KERNEL); + if (!data->priv) + return -ENOMEM; + h_priv = (struct hygon_private *)data->priv; + h_priv->offset_2nd = 0x2f8; + h_priv->index_2nd = 3; + k10temp_get_ccd_support(pdev, data, h_priv->index_2nd); + k10temp_get_ccd_support_2nd(pdev, data, 8); + } } else if (boot_cpu_data.x86 == 0x19) { data->temp_adjust_mask = ZEN_CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_zen; @@ -528,6 +607,8 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3) }, { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) }, + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/sw64_pvt.c b/drivers/hwmon/sw64_pvt.c new file mode 100644 index 0000000000000000000000000000000000000000..aedc29d44077ef32f8e68a1622571b85faa76cf5 --- /dev/null +++ b/drivers/hwmon/sw64_pvt.c @@ -0,0 +1,224 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PVT device driver. + * + * Part of lm_sensors, Linux kernel modules + * for hardware monitoring in sunway. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#define PVT_VSYS 0 +#define PVT0_CTRL 0x7c00 +#define PVT02SPBU_DATA_OUT (0x1 << 26) +#define PVT_READ 0xc000 +#define PVT_WADDR 0xc800 +#define PVT_WDATA 0xcc00 + +/* The PVT registers */ +#define PVT_SAFECTRL 0x0 +#define CLK_SEL 0x1 +#define PVT_RUN 0x2 +#define PVT_CONFIG 0x3 +#define PVT_WAIT_TIME 0x4 +#define TS_ALARM_HVALUE_L 0x5 +#define TS_ALARM_HVALUE_H 0x6 +#define TS_ALARM_LVALUE_L 0x7 +#define TS_ALARM_LVALUE_H 0x8 +#define TS_ALARM_TIMES 0x9 +#define TRIMG 0xa +#define TRIMO 0xb +#define VS_ALARM_HVALUE_L 0xc +#define VS_ALARM_HVALUE_H 0xd +#define VS_ALARM_LVALUE_L 0xe +#define VS_ALARM_LVALUE_H 0xf +#define VS_ALARM_TIMES 0x10 +#define PVT_ALARM_CLEAR 0x11 +#define PVT_ALARM_MASK 0x12 +#define PVT_DATA_OUT_L 0x13 +#define PVT_DATA_OUT_H 0x14 +#define PVT_STATE_INFO 0x15 +#define PVT_ALARM_INFO 0x16 +#define COFFICIENT 71 +#define FIXEDVAL 45598 + +#define vol_algorithm(m, n) (((((m >> 16) & 0x3) * 0x100) +\ + ((n >> 16) & 0xff)) * COFFICIENT + FIXEDVAL) + + +struct pvt_hwmon { + struct pvt *pvt; + void __iomem *base; +}; + +static const char * const input_names[] = { + [PVT_VSYS] = "voltage", +}; + +static inline void pvt_write_reg(struct pvt_hwmon *pvtvol, u64 a, + u64 b, unsigned int offset) +{ + writel(a | b, pvtvol->base + offset); +} + +static inline u64 pvt_read_reg(struct pvt_hwmon *pvtvol, unsigned int offset) +{ + u64 value; + + value = readl(pvtvol->base + offset); + return value; +} + +void pvt_configure(struct pvt_hwmon *pvtvol, u64 value, u64 reg) +{ + pvt_write_reg(pvtvol, PVT_WDATA, value, PVT0_CTRL); + pvt_write_reg(pvtvol, PVT_WADDR, reg, PVT0_CTRL); +} + +static inline u64 pvt_read_vol(struct pvt_hwmon *pvtvol, u64 data, + u64 reg, unsigned int offset) +{ + unsigned int value; + + pvt_write_reg(pvtvol, data, reg, offset); + msleep(100); + value = pvt_read_reg(pvtvol, offset); + return value; +} + +static int pvt_get_vol(struct pvt_hwmon *pvtvol) +{ + unsigned long long data_h, data_l; + + pvt_configure(pvtvol, 0x1, PVT_SAFECTRL); + + /* configure PVT mode */ + pvt_configure(pvtvol, 0x3, PVT_CONFIG); + + /* PVT monitor enable */ + pvt_configure(pvtvol, 0x1, PVT_RUN); + + /* get the upper 2 bits of the PVT voltage */ + data_h = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_H, PVT0_CTRL); + if ((data_h & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_h is error\n"); + return false; + } + + /* get the lower 8 bits of the PVT voltage */ + data_l = pvt_read_vol(pvtvol, PVT_READ, PVT_DATA_OUT_L, PVT0_CTRL); + if ((data_l & PVT02SPBU_DATA_OUT) == 0) { + pr_err("error: the Voltage_l is error\n"); + return false; + } + + return vol_algorithm(data_h, data_l); +} + +static ssize_t pvt_read(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + struct pvt_hwmon *pvtvol = dev_get_drvdata(dev); + unsigned long long pvt_vol; + + pvt_vol = pvt_get_vol(pvtvol); + return sprintf(buf, "%lld\n", (pvt_vol / 100)); +} + +static ssize_t show_label(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", + input_names[to_sensor_dev_attr(devattr)->index]); +} + +static SENSOR_DEVICE_ATTR(in0_input, 0444, pvt_read, NULL, + PVT_VSYS); +static SENSOR_DEVICE_ATTR(in0_label, 0444, show_label, NULL, + PVT_VSYS); + +static struct attribute *pvt_attrs[] = { + &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_label.dev_attr.attr, + NULL +}; + +ATTRIBUTE_GROUPS(pvt); + +static int pvt_vol_plat_probe(struct platform_device *pdev) +{ + struct resource *res; + struct pvt_hwmon *pvtvol; + struct device *hwmon_dev; + unsigned long long value; + struct device *dev = &pdev->dev; + + pvtvol = devm_kzalloc(&pdev->dev, sizeof(*pvtvol), GFP_KERNEL); + if (!pvtvol) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + goto err; + + pvtvol->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pvtvol->base)) + return PTR_ERR(pvtvol->base); + + platform_set_drvdata(pdev, pvtvol); + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "pvt", + pvtvol, pvt_groups); + + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + value = pvt_get_vol(pvtvol); + if (!value) { + dev_info(&pdev->dev, "pvt_vol get failed\n"); + return false; + } + + return 0; + +err: + dev_err(&pdev->dev, "no PVT resource\n"); + return -ENXIO; +} + +#ifdef CONFIG_OF +static const struct of_device_id pvt_vol_of_match[] = { + { .compatible = "sw64,pvt-vol", }, + {}, +}; +MODULE_DEVICE_TABLE(of, pvt_vol_of_match); +#endif + +static struct platform_driver pvt_vol_driver = { + .probe = pvt_vol_plat_probe, + .driver = { + .name = "pvt-sw64", + .of_match_table = of_match_ptr(pvt_vol_of_match), + }, +}; + +static int __init pvt_vol_init_driver(void) +{ + return platform_driver_register(&pvt_vol_driver); +} +subsys_initcall(pvt_vol_init_driver); + +static void __exit pvt_vol_exit_driver(void) +{ + platform_driver_unregister(&pvt_vol_driver); +} +module_exit(pvt_vol_exit_driver); + +MODULE_AUTHOR("Wang Yingying "); +MODULE_DESCRIPTION("pvt controller"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c index e5d18dac8ee7ba91682cc9c421baa6632169443b..0a5057dbe51a63fae6b5a92e22e3ee630a129927 100644 --- a/drivers/hwmon/via-cputemp.c +++ b/drivers/hwmon/via-cputemp.c @@ -273,7 +273,6 @@ static const struct x86_cpu_id __initconst cputemp_ids[] = { X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_A, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_C7_D, NULL), X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 6, X86_CENTAUR_FAM6_NANO, NULL), - X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, X86_MODEL_ANY, NULL), {} }; MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); diff --git a/drivers/hwmon/zhaoxin-cputemp.c b/drivers/hwmon/zhaoxin-cputemp.c new file mode 100644 index 0000000000000000000000000000000000000000..751d2c5a868ab615db076a18f4963a8972c6d50e --- /dev/null +++ b/drivers/hwmon/zhaoxin-cputemp.c @@ -0,0 +1,305 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * zhaoxin-cputemp.c - Driver for Zhaoxin CPU core temperature monitoring + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRVNAME "zhaoxin_cputemp" + +enum { SHOW_TEMP, SHOW_LABEL, SHOW_NAME, SHOW_CRIT, SHOW_MAX }; + +/* Functions declaration */ + +struct zhaoxin_cputemp_data { + struct device *hwmon_dev; + const char *name; + u32 id; + u32 msr_temp; + u32 msr_crit; + u32 msr_max; +}; + +/* Sysfs stuff */ + +static ssize_t name_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + int ret; + struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + + if (attr->index == SHOW_NAME) + ret = sprintf(buf, "%s\n", data->name); + else /* show label */ + ret = sprintf(buf, "Core %d\n", data->id); + return ret; +} + +static ssize_t temp_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xffffff) * 1000); +} + +static ssize_t crit_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_crit, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static ssize_t max_show(struct device *dev, struct device_attribute *devattr, char *buf) +{ + struct zhaoxin_cputemp_data *data = dev_get_drvdata(dev); + u32 eax, edx; + int err; + + err = rdmsr_safe_on_cpu(data->id, data->msr_max, &eax, &edx); + if (err) + return -EAGAIN; + + return sprintf(buf, "%lu\n", ((unsigned long)eax & 0xff) * 1000); +} + +static SENSOR_DEVICE_ATTR_RO(temp1_input, temp, SHOW_TEMP); +static SENSOR_DEVICE_ATTR_RO(temp1_label, name, SHOW_LABEL); +static SENSOR_DEVICE_ATTR_RO(name, name, SHOW_NAME); +static SENSOR_DEVICE_ATTR_RO(temp1_crit, crit, SHOW_CRIT); +static SENSOR_DEVICE_ATTR_RO(temp1_max, max, SHOW_MAX); + +static struct attribute *zhaoxin_cputemp_attributes[] = { + &sensor_dev_attr_name.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_crit.dev_attr.attr, + &sensor_dev_attr_temp1_max.dev_attr.attr, + NULL +}; + +static const struct attribute_group zhaoxin_cputemp_group = { + .attrs = zhaoxin_cputemp_attributes, +}; + +static int zhaoxin_cputemp_probe(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data; + int err; + u32 eax, edx; + struct cpuinfo_x86 *c = &cpu_data(pdev->id); + + data = devm_kzalloc(&pdev->dev, sizeof(struct zhaoxin_cputemp_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->id = pdev->id; + data->name = "zhaoxin_cputemp"; + data->msr_temp = 0x1423; + if (c->x86_model == 0x6b) { + data->msr_crit = 0x175b; + data->msr_max = 0x175a; + } else { + data->msr_crit = 0x1416; + data->msr_max = 0x1415; + } + + /* test if we can access the TEMPERATURE MSR */ + err = rdmsr_safe_on_cpu(data->id, data->msr_temp, &eax, &edx); + if (err) { + dev_err(&pdev->dev, "Unable to access TEMPERATURE MSR, giving up\n"); + return err; + } + + platform_set_drvdata(pdev, data); + + err = sysfs_create_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + if (err) + return err; + + data->hwmon_dev = hwmon_device_register_for_thermal(&pdev->dev, data->name, data); + if (IS_ERR(data->hwmon_dev)) { + err = PTR_ERR(data->hwmon_dev); + dev_err(&pdev->dev, "Class registration failed (%d)\n", err); + goto exit_remove; + } + + return 0; + +exit_remove: + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return err; +} + +static int zhaoxin_cputemp_remove(struct platform_device *pdev) +{ + struct zhaoxin_cputemp_data *data = platform_get_drvdata(pdev); + + hwmon_device_unregister(data->hwmon_dev); + sysfs_remove_group(&pdev->dev.kobj, &zhaoxin_cputemp_group); + return 0; +} + +static struct platform_driver zhaoxin_cputemp_driver = { + .driver = { + .name = DRVNAME, + }, + .probe = zhaoxin_cputemp_probe, + .remove = zhaoxin_cputemp_remove, +}; + +struct pdev_entry { + struct list_head list; + struct platform_device *pdev; + unsigned int cpu; +}; + +static LIST_HEAD(pdev_list); +static DEFINE_MUTEX(pdev_list_mutex); + +static int zhaoxin_cputemp_online(unsigned int cpu) +{ + int err; + struct platform_device *pdev; + struct pdev_entry *pdev_entry; + + pdev = platform_device_alloc(DRVNAME, cpu); + if (!pdev) { + err = -ENOMEM; + pr_err("Device allocation failed\n"); + goto exit; + } + + pdev_entry = kzalloc(sizeof(struct pdev_entry), GFP_KERNEL); + if (!pdev_entry) { + err = -ENOMEM; + goto exit_device_put; + } + + err = platform_device_add(pdev); + if (err) { + pr_err("Device addition failed (%d)\n", err); + goto exit_device_free; + } + + pdev_entry->pdev = pdev; + pdev_entry->cpu = cpu; + mutex_lock(&pdev_list_mutex); + list_add_tail(&pdev_entry->list, &pdev_list); + mutex_unlock(&pdev_list_mutex); + + return 0; + +exit_device_free: + kfree(pdev_entry); +exit_device_put: + platform_device_put(pdev); +exit: + return err; +} + +static int zhaoxin_cputemp_down_prep(unsigned int cpu) +{ + struct pdev_entry *p; + + mutex_lock(&pdev_list_mutex); + list_for_each_entry(p, &pdev_list, list) { + if (p->cpu == cpu) { + platform_device_unregister(p->pdev); + list_del(&p->list); + mutex_unlock(&pdev_list_mutex); + kfree(p); + return 0; + } + } + mutex_unlock(&pdev_list_mutex); + return 0; +} + +static const struct x86_cpu_id cputemp_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x3b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x5b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(CENTAUR, 7, 0x6b, NULL), + X86_MATCH_VENDOR_FAM_MODEL(ZHAOXIN, 7, 0x6b, NULL), + {} +}; +MODULE_DEVICE_TABLE(x86cpu, cputemp_ids); + +static enum cpuhp_state zhaoxin_temp_online; + +static int __init zhaoxin_cputemp_init(void) +{ + int err; + + if (!x86_match_cpu(cputemp_ids)) + return -ENODEV; + + err = platform_driver_register(&zhaoxin_cputemp_driver); + if (err) + goto exit; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "hwmon/zhaoxin:online", + zhaoxin_cputemp_online, zhaoxin_cputemp_down_prep); + if (err < 0) + goto exit_driver_unreg; + + zhaoxin_temp_online = err; + +#ifndef CONFIG_HOTPLUG_CPU + if (list_empty(&pdev_list)) { + err = -ENODEV; + goto exit_hp_unreg; + } +#endif + return 0; + +#ifndef CONFIG_HOTPLUG_CPU +exit_hp_unreg: + cpuhp_remove_state_nocalls(zhaoxin_temp_online); +#endif +exit_driver_unreg: + platform_driver_unregister(&zhaoxin_cputemp_driver); +exit: + return err; +} + +static void __exit zhaoxin_cputemp_exit(void) +{ + cpuhp_remove_state(zhaoxin_temp_online); + platform_driver_unregister(&zhaoxin_cputemp_driver); +} + +MODULE_DESCRIPTION("Zhaoxin CPU temperature monitor"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(HWMON_THERMAL); + +module_init(zhaoxin_cputemp_init) +module_exit(zhaoxin_cputemp_exit) diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 97d27e01a6ee27dcbcfaacbe94d712aa26fab4fc..5cf07397c041001d6e18d12a638fb4c723541494 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -334,6 +334,28 @@ config I2C_VIAPRO This driver can also be built as a module. If so, the module will be called i2c-viapro. +config I2C_ZHAOXIN + tristate "Zhaoxin I2C controller driver" + depends on PCI + select I2C_ALGOBIT + help + If you say yes to this option, support will be included for the + Zhaoxin I2C interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin. + +config I2C_SUNWAY + tristate "Sunway i2c lib" + depends on SW64 + help + If you say yes to this option, support will be included for the + Sunway Soc I2C interface on SW64 platform. + + This driver can also be built as a module. If so, the module + will be called i2c-sunway. + + if ACPI comment "ACPI drivers" @@ -348,6 +370,16 @@ config I2C_SCMI To compile this driver as a module, choose M here: the module will be called i2c-scmi. +config I2C_ZHAOXIN_SMBUS + tristate "Zhaoxin SMBus Interface" + depends on PCI || COMPILE_TEST + help + If you say yes to this option, support will be included for the + ZHAOXIN SMBus interface + + This driver can also be built as a module. If so, the module + will be called i2c-zhaoxin-smbus. + endif # ACPI comment "Mac SMBus host controller drivers" diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index af56fe2c75c09c791c13cbe78c7e206b7fe06f46..87cb4162fdda2760bec0dcfd4cf4b09e4f7383c2 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -29,6 +29,8 @@ obj-$(CONFIG_I2C_SIS630) += i2c-sis630.o obj-$(CONFIG_I2C_SIS96X) += i2c-sis96x.o obj-$(CONFIG_I2C_VIA) += i2c-via.o obj-$(CONFIG_I2C_VIAPRO) += i2c-viapro.o +obj-$(CONFIG_I2C_ZHAOXIN) += i2c-zhaoxin.o +obj-$(CONFIG_I2C_SUNWAY) += i2c-sunway.o # Mac SMBus host controller drivers obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o @@ -140,6 +142,7 @@ obj-$(CONFIG_I2C_ROBOTFUZZ_OSIF) += i2c-robotfuzz-osif.o obj-$(CONFIG_I2C_TAOS_EVM) += i2c-taos-evm.o obj-$(CONFIG_I2C_TINY_USB) += i2c-tiny-usb.o obj-$(CONFIG_I2C_VIPERBOARD) += i2c-viperboard.o +obj-$(CONFIG_I2C_ZHAOXIN_SMBUS) += i2c-zhaoxin-smbus.o # Other I2C/SMBus bus drivers obj-$(CONFIG_I2C_ACORN) += i2c-acorn.o diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index 35f762872b8a58c2f7e8fd4867bb0e139aea5cf0..83803a63e23ffe67f24f01b2ce031abb0da30e6a 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -63,6 +63,9 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + *val = readl(dev->base + reg); return 0; @@ -72,6 +75,9 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val) { struct dw_i2c_dev *dev = context; + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = reg << 7; + writel(val, dev->base + reg); return 0; @@ -149,6 +155,8 @@ int i2c_dw_init_regmap(struct dw_i2c_dev *dev) return ret; reg = readl(dev->base + DW_IC_COMP_TYPE); + if ((dev->flags & MODEL_MASK) == MODEL_SUNWAY) + reg = readl(dev->base + (DW_IC_COMP_TYPE << 7)); i2c_dw_release_lock(dev); if ((dev->flags & MODEL_MASK) == MODEL_AMD_NAVI_GPU) diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index a7f6f3eafad7dd72241f57a2a356219d36bfaf56..2c97fff25b07e0e340b5d68b7257154964f1180b 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -308,7 +308,8 @@ struct dw_i2c_dev { #define MODEL_BAIKAL_BT1 BIT(9) #define MODEL_AMD_NAVI_GPU BIT(10) #define MODEL_WANGXUN_SP BIT(11) -#define MODEL_MASK GENMASK(11, 8) +#define MODEL_SUNWAY BIT(12) +#define MODEL_MASK GENMASK(12, 8) /* * Enable UCSI interrupt by writing 0xd at register diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 855b698e99c08004df29a5da485722974de63335..c818e9d14b9acc670f7ec77334e647e82090ddca 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -150,9 +150,14 @@ static int dw_i2c_of_configure(struct platform_device *pdev) } static const struct of_device_id dw_i2c_of_match[] = { +#ifdef CONFIG_SW64 + { .compatible = "snps,designware-i2c", .data = (void *)MODEL_SUNWAY }, +#else { .compatible = "snps,designware-i2c", }, +#endif { .compatible = "mscc,ocelot-i2c", .data = (void *)MODEL_MSCC_OCELOT }, { .compatible = "baikal,bt1-sys-i2c", .data = (void *)MODEL_BAIKAL_BT1 }, + { .compatible = "sunway,suntai-i2c", .data = (void *)MODEL_SUNWAY }, {}, }; MODULE_DEVICE_TABLE(of, dw_i2c_of_match); diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 809fbd014cd6833749a677bba4b6845854459d3b..cc170c114e1090ddfd1a6600fca310961711ac6f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1043,8 +1043,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) bool notify_imc = false; is_sb800 = true; - if ((dev->vendor == PCI_VENDOR_ID_AMD || - dev->vendor == PCI_VENDOR_ID_HYGON) && + if (dev->vendor == PCI_VENDOR_ID_AMD && dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { u8 imc; diff --git a/drivers/i2c/busses/i2c-sunway.c b/drivers/i2c/busses/i2c-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..cc7268c6a2da81e0f302aa5bc7ac285d00214740 --- /dev/null +++ b/drivers/i2c/busses/i2c-sunway.c @@ -0,0 +1,405 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020 WXIAT Platform Software + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + * The drivers in this file are synchronous/blocking. In addition, + * use poll mode to read/write slave devices on the I2C bus instead + * of the interrupt mode. + */ + +#include +#include +#include +#include + +#include + +#define CPLD_BUSNR 2 + +#define IC_CLK_KHZ 25000 + +/* I2C register definitions */ +#define DW_IC_CON 0x0 +#define DW_IC_STATUS 0x3800 +#define DW_IC_DATA_CMD 0x0800 +#define DW_IC_TAR 0x00200 +#define DW_IC_ENABLE 0x3600 +#define DW_IC_CMD 0x0100 +#define DW_IC_STOP 0x0200 +#define DW_IC_SDA_HOLD 0x3e00 +#define DW_IC_SDA_SETUP 0x4a00 +#define DW_IC_SS_SCL_HCNT 0x0a00 +#define DW_IC_SS_SCL_LCNT 0x0c00 +#define DW_IC_FS_SCL_HCNT 0x0e00 +#define DW_IC_FS_SCL_LCNT 0x1000 +#define DW_IC_TX_TL 0x1e00 +#define DW_IC_RX_TL 0x1c00 +#define DW_IC_INTR_MASK 0x1800 + +#define MAX_RETRY 10000000 + +#define DW_IC_STATUS_ACTIVITY 0x1 +#define DW_IC_STATUS_TFNF 0x2 +#define DW_IC_STATUS_TFE 0x4 +#define DW_IC_STATUS_RFNE 0x8 +#define DW_IC_STATUS_RFF 0x10 + +#define DW_IC_CON_MASTER 0x1 +#define DW_IC_CON_SPEED_STD 0x2 +#define DW_IC_CON_SPEED_FAST 0x4 +#define DW_IC_CON_10BITADDR_MASTER 0x10 +#define DW_IC_CON_RESTART_EN 0x20 +#define DW_IC_CON_SLAVE_DISABLE 0x40 + +#define INTEL_MID_STD_CFG (DW_IC_CON_MASTER | \ + DW_IC_CON_SLAVE_DISABLE | \ + DW_IC_CON_RESTART_EN) + +#define DW_IC_INTR_RX_UNDER 0x001 +#define DW_IC_INTR_RX_OVER 0x002 +#define DW_IC_INTR_RX_FULL 0x004 +#define DW_IC_INTR_TX_OVER 0x008 +#define DW_IC_INTR_TX_EMPTY 0x010 +#define DW_IC_INTR_RD_REQ 0x020 +#define DW_IC_INTR_TX_ABRT 0x040 +#define DW_IC_INTR_RX_DONE 0x080 +#define DW_IC_INTR_ACTIVITY 0x100 +#define DW_IC_INTR_STOP_DET 0x200 +#define DW_IC_INTR_START_DET 0x400 +#define DW_IC_INTR_GEN_CALL 0x800 + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_EMPTY | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) + +enum i2c_bus_operation { + I2C_BUS_READ, + I2C_BUS_WRITE, +}; + +static void __iomem *m_i2c_base_address; + +/* + * This function get I2Cx controller base address + * + * @param i2c_controller_index Bus Number of I2C controller. + * @return I2C BAR. + */ +void __iomem *get_i2c_bar_addr(uint8_t i2c_controller_index) +{ + switch (i2c_controller_index) { + case 0: + return __va(IO_BASE | IIC0_BASE); + case 1: + return __va(IO_BASE | IIC1_BASE); + case 2: + return __va(IO_BASE | IIC2_BASE); + default: + return NULL; + } +} + +static inline void write_cpu_i2c_controller(uint64_t offset, uint32_t data) +{ + writel(data, m_i2c_base_address + offset); +} + +static inline uint32_t read_cpu_i2c_controller(uint64_t offset) +{ + return readl(m_i2c_base_address + offset); +} + +static int poll_for_status_set0(uint16_t status_bit) +{ + uint64_t retry = 0; + uint32_t temp = read_cpu_i2c_controller(DW_IC_STATUS); + + temp = read_cpu_i2c_controller(DW_IC_STATUS); + + while (retry < MAX_RETRY) { + if (read_cpu_i2c_controller(DW_IC_STATUS) & status_bit) + break; + retry++; + } + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static uint32_t i2c_dw_scl_lcnt(uint32_t ic_clk, uint32_t t_low, + uint32_t tf, uint32_t offset) +{ + /* + * Conditional expression: + * + * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (t_low + tf) + * + * DW I2C core starts counting the SCL CNTs for the LOW period + * of the SCL clock (t_low) as soon as it pulls the SCL line. + * In order to meet the t_low timing spec, we need to take into + * account the fall time of SCL signal (tf). Default tf value + * should be 0.3 us, for safety. + */ + return ((ic_clk * (t_low + tf) + 500000) / 1000000) - 1 + offset; +} + +static uint32_t i2c_dw_scl_hcnt(uint32_t ic_clk, uint32_t t_symbol, + uint32_t tf, uint32_t cond, uint32_t offset) +{ + /* + * DesignWare I2C core doesn't seem to have solid strategy to meet + * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec + * will result in violation of the tHD;STA spec. + */ + if (cond) + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH + * + * This is based on the DW manuals, and represents an ideal + * configuration. The resulting I2C bus speed will be faster + * than any of the others. + * + * If your hardware is free from tHD;STA issue, try this one. + */ + return (ic_clk * t_symbol + 500000) / 1000000 - 8 + offset; + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). + */ + return (ic_clk * (t_symbol + tf) + 500000) / 1000000 - 3 + offset; +} + +static int wait_for_cpu_i2c_bus_busy(void) +{ + uint64_t retry = 0; + uint32_t status = 0; + + do { + retry++; + status = !!(read_cpu_i2c_controller(DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY); + } while ((retry < MAX_RETRY) && status); + + if (retry == MAX_RETRY) + return -ETIME; + + return 0; +} + +static int i2c_read(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) + return status; + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, DW_IC_CMD); + + if (poll_for_status_set0(DW_IC_STATUS_RFNE) == 0) + buffer[i] = readb(m_i2c_base_address + DW_IC_DATA_CMD); + else + pr_err("Read timeout line %d.\n", __LINE__); + } + + return 0; +} + +static int i2c_write(uint8_t reg_offset, uint8_t *buffer, uint32_t length) +{ + int status; + uint32_t i; + + /* Data transfer, poll till transmit ready bit is set */ + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + write_cpu_i2c_controller(DW_IC_DATA_CMD, reg_offset); + + for (i = 0; i < length; i++) { + if (poll_for_status_set0(DW_IC_STATUS_TFNF) == 0) { + if (i == length - 1) + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i] | DW_IC_STOP); + else + write_cpu_i2c_controller(DW_IC_DATA_CMD, buffer[i]); + } else { + pr_err("Write timeout %d.\n", __LINE__); + } + } + + mdelay(200); + status = poll_for_status_set0(DW_IC_STATUS_TFE); + if (status) { + pr_err("In i2c-lib.c, line %d.\n", __LINE__); + return status; + } + + return 0; +} + +/* Initialize I2c controller */ +void init_cpu_i2c_controller(void) +{ + uint32_t h_cnt; + uint32_t l_cnt; + uint32_t input_ic_clk_rate = IC_CLK_KHZ; /* by unit KHz ie. 25MHz */ + uint32_t sda_falling_time = 300; + uint32_t scl_falling_time = 300; + + /* + * The I2C protocol specification requires 300ns of hold time on the + * SDA signal (tHD;DAT) in standard and fast speed modes, and a hold + * time long enough to bridge the undefined part between logic 1 and + * logic 0 of the falling edge of SCL in high speed mode. + */ + uint32_t sda_hold_time = 432; + uint32_t sda_hold = 0; + + /* Firstly disable the controller. */ + pr_debug("Initialize CPU I2C controller\n"); + + write_cpu_i2c_controller(DW_IC_ENABLE, 0); + + sda_hold = (input_ic_clk_rate * sda_hold_time + 500000) / 1000000; + write_cpu_i2c_controller(DW_IC_SDA_HOLD, sda_hold); + + /* Set standard and fast speed deviders for high/low periods. */ + /* Standard-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 4000, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 4700, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_SS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_SS_SCL_LCNT, l_cnt); + + pr_debug("Standard-mode HCNT=%x, LCNT=%x\n", h_cnt, l_cnt); + + /* Fast-mode */ + h_cnt = i2c_dw_scl_hcnt(input_ic_clk_rate, 600, sda_falling_time, 0, 0); + l_cnt = i2c_dw_scl_lcnt(input_ic_clk_rate, 1300, scl_falling_time, 0); + + write_cpu_i2c_controller(DW_IC_FS_SCL_HCNT, h_cnt); + write_cpu_i2c_controller(DW_IC_FS_SCL_LCNT, l_cnt); + + pr_debug("Fast-mode HCNT=%x, LCNT=%d\n\n", h_cnt, l_cnt); + + /* Configure Tx/Rx FIFO threshold levels, since we will be working + * in polling mode set both thresholds to their minimum + */ + write_cpu_i2c_controller(DW_IC_TX_TL, 0); + write_cpu_i2c_controller(DW_IC_RX_TL, 0); + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + /* Configure the i2c master */ + write_cpu_i2c_controller(DW_IC_CON, + INTEL_MID_STD_CFG | DW_IC_CON_SPEED_STD); + +} + +/* + * This function enables I2C controllers. + * + * @param i2c_controller_index Bus Number of I2C controllers. + */ +void enable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + init_cpu_i2c_controller(); +} + +/* + * Write/Read data from I2C device. + * + * @i2c_controller_index: i2c bus number + * @slave_address: slave address + * @operation: to read or write + * @length: number of bytes + * @reg_offset: register offset + * @buffer: in/out buffer + */ +int i2c_bus_rw(uint8_t i2c_controller_index, uint8_t slave_address, + enum i2c_bus_operation operation, uint32_t length, + uint8_t reg_offset, void *buffer) +{ + uint8_t *byte_buffer = buffer; + int status = 0; + uint32_t databuffer, temp; + + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + status = wait_for_cpu_i2c_bus_busy(); + if (status) { + pr_err("%d\n", __LINE__); + return status; + } + + mdelay(1000); + + /* Set the slave address. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); /* Disable controller */ + databuffer = read_cpu_i2c_controller(DW_IC_CON); + databuffer &= ~DW_IC_CON_10BITADDR_MASTER; + write_cpu_i2c_controller(DW_IC_CON, databuffer); + + /* Fill the target addr. */ + write_cpu_i2c_controller(DW_IC_TAR, slave_address); + + temp = read_cpu_i2c_controller(DW_IC_TAR); + + /* Configure Tx/Rx FIFO threshold levels. */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x1); /* Enable the adapter */ + write_cpu_i2c_controller(DW_IC_INTR_MASK, DW_IC_INTR_DEFAULT_MASK); + + if (operation == I2C_BUS_READ) + status = i2c_read(reg_offset, byte_buffer, length); + else if (operation == I2C_BUS_WRITE) + status = i2c_write(reg_offset, byte_buffer, length); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + + return status; +} + +void disable_i2c_controller(uint8_t i2c_controller_index) +{ + m_i2c_base_address = get_i2c_bar_addr(i2c_controller_index); + + /* Disable controller */ + write_cpu_i2c_controller(DW_IC_ENABLE, 0x0); + m_i2c_base_address = 0; +} + +void cpld_write(uint8_t slave_addr, uint8_t reg, uint8_t data) +{ + enable_i2c_controller(CPLD_BUSNR); + i2c_bus_rw(CPLD_BUSNR, slave_addr, I2C_BUS_WRITE, sizeof(uint8_t), reg, &data); + disable_i2c_controller(CPLD_BUSNR); +} diff --git a/drivers/i2c/busses/i2c-zhaoxin-smbus.c b/drivers/i2c/busses/i2c-zhaoxin-smbus.c new file mode 100644 index 0000000000000000000000000000000000000000..52c689e928afff5782fd97ab5079fdc61c85d953 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin-smbus.c @@ -0,0 +1,385 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Zhaoxin SMBus controller driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "3.1.0" + +#define ZXSMB_NAME "smbus_zhaoxin" + +/* + * registers + */ +/* SMBus MMIO address offsets */ +#define ZXSMB_STS 0x00 +#define ZXSMB_BUSY BIT(0) +#define ZXSMB_CMD_CMPLET BIT(1) +#define ZXSMB_DEV_ERR BIT(2) +#define ZXSMB_BUS_CLSI BIT(3) +#define ZXSMB_FAIL_TRANS BIT(4) +#define ZXSMB_STS_MASK GENMASK(4, 0) +#define ZXSMB_NSMBSRST BIT(5) +#define ZXSMB_CTL 0x02 +#define ZXSMB_CMPLT_EN BIT(0) +#define ZXSMB_KILL_PRG BIT(1) +#define ZXSMB_START BIT(6) +#define ZXSMB_PEC_EN BIT(7) +#define ZXSMB_CMD 0x03 +#define ZXSMB_ADD 0x04 +#define ZXSMB_DAT0 0x05 +#define ZXSMB_DAT1 0x06 +#define ZXSMB_BLKDAT 0x07 + +/* + * platform related information + */ + /* protocol cmd constants */ +#define ZXSMB_QUICK 0x00 +#define ZXSMB_BYTE 0x04 +#define ZXSMB_BYTE_DATA 0x08 +#define ZXSMB_WORD_DATA 0x0C +#define ZXSMB_PROC_CALL 0x10 +#define ZXSMB_BLOCK_DATA 0x14 +#define ZXSMB_I2C_10_BIT_ADDR 0x18 +#define ZXSMB_I2C_PROC_CALL 0x30 +#define ZXSMB_I2C_BLOCK_DATA 0x34 +#define ZXSMB_I2C_7_BIT_ADDR 0x38 +#define ZXSMB_UNIVERSAL 0x3C + +#define ZXSMB_TIMEOUT 500 + +struct zxsmb { + struct device *dev; + struct i2c_adapter adap; + struct completion complete; + u16 base; + int irq; + u8 status; + int size; + u8 pec; +}; + +static irqreturn_t zxsmb_irq_handle(int irq, void *dev_id) +{ + struct zxsmb *smb = (struct zxsmb *)dev_id; + + smb->status = inb(smb->base + ZXSMB_STS); + if ((smb->status & ZXSMB_STS_MASK) == 0) + return IRQ_NONE; + + /* clear status */ + outb(smb->status, smb->base + ZXSMB_STS); + complete(&smb->complete); + + return IRQ_HANDLED; +} + +static int zxsmb_status_check(struct zxsmb *smb) +{ + if (smb->status & ZXSMB_CMD_CMPLET) + return 0; + + if (smb->status & ZXSMB_BUS_CLSI) { + dev_err(smb->dev, "Lost arbitration\n"); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + return -EAGAIN; + } + + dev_dbg(smb->dev, "Trans failed, status = 0x%X\n", smb->status); + + return -EIO; +} + +static int zxsmb_wait_interrput_finish(struct zxsmb *smb) +{ + int time_left; + + time_left = wait_for_completion_timeout(&smb->complete, msecs_to_jiffies(ZXSMB_TIMEOUT)); + if (time_left == 0) { + u8 status = inb(smb->base + ZXSMB_STS); + + /* some host's irq config not work well */ + if (status & ZXSMB_STS_MASK) { + outb(status, smb->base + ZXSMB_STS); + outb(ZXSMB_KILL_PRG, smb->base + ZXSMB_CTL); + devm_free_irq(smb->dev, smb->irq, smb); + smb->irq = 0; + dev_warn(smb->dev, "change to polling mode\n"); + + return -EAGAIN; + } + dev_dbg(smb->dev, "interrput timeout\n"); + return -EIO; + } + + return zxsmb_status_check(smb); +} + +static int zxsmb_wait_polling_finish(struct zxsmb *smb) +{ + int status; + int time_left = ZXSMB_TIMEOUT * 10; + + do { + usleep_range(100, 200); + status = inb(smb->base + ZXSMB_STS); + } while ((status & ZXSMB_BUSY) && (--time_left)); + + if (time_left == 0) { + dev_dbg(smb->dev, "polling timeout\n"); + return -EIO; + } + + /* clear status */ + outb(status, smb->base + ZXSMB_STS); + smb->status = status; + + return zxsmb_status_check(smb); +} + +static int zxsmb_trans_start(struct zxsmb *smb) +{ + u16 base = smb->base; + int tmp; + + /* Make sure the SMBus host is ready to start transmitting */ + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + outb(tmp, base + ZXSMB_STS); + usleep_range(1000, 5000); + tmp = inb(base + ZXSMB_STS); + if (tmp & ZXSMB_BUSY) { + dev_err(smb->dev, "SMBus reset failed! (0x%02x)\n", tmp); + return -EIO; + } + } + + tmp = ZXSMB_START | smb->size; + + if (smb->pec) + tmp |= ZXSMB_PEC_EN; + else + tmp &= (~ZXSMB_PEC_EN); + + if (smb->irq) + tmp |= ZXSMB_CMPLT_EN; + + reinit_completion(&smb->complete); + smb->status = 0; + outb(tmp, base + ZXSMB_CTL); + return 0; +} + +static int zxsmb_transaction(struct zxsmb *smb) +{ + int err; + + err = zxsmb_trans_start(smb); + if (err) + return err; + + if (smb->irq) + err = zxsmb_wait_interrput_finish(smb); + else + err = zxsmb_wait_polling_finish(smb); + + outb(0, smb->base + ZXSMB_CTL); + return err; +} + +static int zxsmb_smbus_xfer(struct i2c_adapter *adap, u16 addr, u16 flags, char read, u8 command, + int size, union i2c_smbus_data *data) +{ + int i; + int err; + u8 len; + struct zxsmb *smb = (struct zxsmb *)i2c_get_adapdata(adap); + u16 base = smb->base; + + switch (size) { + case I2C_SMBUS_QUICK: + size = ZXSMB_QUICK; + break; + case I2C_SMBUS_BYTE: + size = ZXSMB_BYTE; + if (!read) + outb(command, base + ZXSMB_CMD); + break; + case I2C_SMBUS_BYTE_DATA: + outb(command, base + ZXSMB_CMD); + if (!read) + outb(data->byte, base + ZXSMB_DAT0); + size = ZXSMB_BYTE_DATA; + break; + case I2C_SMBUS_PROC_CALL: + case I2C_SMBUS_WORD_DATA: + if (read && size == I2C_SMBUS_PROC_CALL) + goto exit_unsupported; + outb(command, base + ZXSMB_CMD); + if (!read) { + outb(data->word & 0xff, base + ZXSMB_DAT0); + outb((data->word & 0xff00) >> 8, base + ZXSMB_DAT1); + } + size = (size == I2C_SMBUS_PROC_CALL) ? + ZXSMB_PROC_CALL : ZXSMB_WORD_DATA; + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + case I2C_SMBUS_BLOCK_DATA: + len = data->block[0]; + if (read && size == I2C_SMBUS_I2C_BLOCK_DATA) + outb(len, base + ZXSMB_DAT1); + outb(command, base + ZXSMB_CMD); + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + if (!read) { + outb(len, base + ZXSMB_DAT0); + outb(0, base + ZXSMB_DAT1); + for (i = 1; i <= len; i++) + outb(data->block[i], base + ZXSMB_BLKDAT); + } + size = (size == I2C_SMBUS_I2C_BLOCK_DATA) ? + ZXSMB_I2C_BLOCK_DATA : ZXSMB_BLOCK_DATA; + break; + default: + goto exit_unsupported; + } + + outb(((addr & 0x7f) << 1) | read, base + ZXSMB_ADD); + smb->size = size; + smb->pec = flags & I2C_CLIENT_PEC; + err = zxsmb_transaction(smb); + if (err) + return err; + + if ((read == I2C_SMBUS_WRITE) || (size == ZXSMB_QUICK)) { + if (unlikely(size == ZXSMB_PROC_CALL)) + goto prepare_read; + return 0; + } + +prepare_read: + switch (size) { + case ZXSMB_BYTE: + case ZXSMB_BYTE_DATA: + data->byte = inb(base + ZXSMB_DAT0); + break; + case ZXSMB_PROC_CALL: + case ZXSMB_WORD_DATA: + data->word = inb(base + ZXSMB_DAT0) + (inb(base + ZXSMB_DAT1) << 8); + break; + case ZXSMB_I2C_BLOCK_DATA: + case ZXSMB_BLOCK_DATA: + data->block[0] = inb(base + ZXSMB_DAT0); + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) + data->block[0] = I2C_SMBUS_BLOCK_MAX; + /* Reset ZXSMB_BLKDAT */ + inb(base + ZXSMB_CTL); + for (i = 1; i <= data->block[0]; i++) + data->block[i] = inb(base + ZXSMB_BLKDAT); + break; + } + + return 0; + +exit_unsupported: + dev_err(smb->dev, "unsupported access, size:%x, dir:%s", size, read ? "read" : "write"); + return -EOPNOTSUPP; +} + +static u32 zxsmb_func(struct i2c_adapter *adapter) +{ + return I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm smbus_algorithm = { + .smbus_xfer = zxsmb_smbus_xfer, + .functionality = zxsmb_func, +}; + +static int zxsmb_probe(struct platform_device *pdev) +{ + struct zxsmb *smb; + struct resource *res; + struct i2c_adapter *adap; + + smb = devm_kzalloc(&pdev->dev, sizeof(*smb), GFP_KERNEL); + if (!smb) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (IS_ERR(res)) + return -ENODEV; + smb->base = res->start; + if (!devm_request_region(&pdev->dev, res->start, resource_size(res), pdev->name)) { + dev_err(&pdev->dev, "Can't get I/O resource\n"); + return -EBUSY; + } + + smb->irq = platform_get_irq(pdev, 0); + if (smb->irq < 0 || devm_request_irq(&pdev->dev, smb->irq, zxsmb_irq_handle, IRQF_SHARED, + pdev->name, smb)) { + dev_warn(&pdev->dev, "failed to request irq %d\n", smb->irq); + smb->irq = 0; + } else + init_completion(&smb->complete); + + smb->dev = &pdev->dev; + platform_set_drvdata(pdev, (void *)smb); + + adap = &smb->adap; + adap->algo = &smbus_algorithm; + adap->retries = 2; + adap->owner = THIS_MODULE; + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(smb->dev)); + i2c_set_adapdata(&smb->adap, smb); + + return i2c_add_adapter(&smb->adap); +} + +static int zxsmb_remove(struct platform_device *pdev) +{ + struct zxsmb *smb = platform_get_drvdata(pdev); + + i2c_del_adapter(&(smb->adap)); + platform_set_drvdata(pdev, NULL); + devm_kfree(&pdev->dev, smb); + + return 0; +} + +static const struct acpi_device_id zxsmb_acpi_match[] = { + {"SMB3324", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxsmb_acpi_match); + +static struct platform_driver zxsmb_driver = { + .probe = zxsmb_probe, + .remove = zxsmb_remove, + .driver = { + .name = ZXSMB_NAME, + .acpi_match_table = ACPI_PTR(zxsmb_acpi_match), + }, +}; + +module_platform_driver(zxsmb_driver); + +MODULE_AUTHOR("hanshu@zhaoxin.com"); +MODULE_DESCRIPTION("Zhaoxin SMBus driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-zhaoxin.c b/drivers/i2c/busses/i2c-zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..3d4cb36c1f1700993e12c815111f4df53002dec4 --- /dev/null +++ b/drivers/i2c/busses/i2c-zhaoxin.c @@ -0,0 +1,561 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright(c) 2021 Shanghai Zhaoxin Semiconductor Corporation. + * All rights reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DRIVER_VERSION "1.5.1" + +#define ZX_I2C_NAME "i2c_zhaoxin" + +/* REG_CR Bit fields */ +#define ZXI2C_REG_CR 0x00 +#define ZXI2C_CR_ENABLE BIT(0) +#define ZXI2C_CR_RX_END BIT(1) +#define ZXI2C_CR_TX_END BIT(2) +#define ZXI2C_CR_END_MASK GENMASK(2, 1) +#define ZXI2C_CR_CPU_RDY BIT(3) +#define ZXI2C_CR_MST_RST BIT(7) +#define ZXI2C_CR_FIFO_MODE BIT(14) + +/* REG_TCR Bit fields */ +#define ZXI2C_REG_TCR 0x02 +#define ZXI2C_TCR_HS_MODE BIT(13) +#define ZXI2C_TCR_MASTER_READ BIT(14) +#define ZXI2C_TCR_FAST BIT(15) + +/* REG_CSR Bit fields */ +#define ZXI2C_REG_CSR 0x04 +#define ZXI2C_CSR_RCV_NOT_ACK BIT(0) +#define ZXI2C_CSR_READY_MASK BIT(1) + +/* REG_ISR Bit fields */ +#define ZXI2C_REG_ISR 0x06 +#define ZXI2C_ISR_NACK_ADDR BIT(0) +#define ZXI2C_ISR_BYTE_END BIT(1) +#define ZXI2C_ISR_SCL_TIMEOUT BIT(2) +#define ZXI2C_ISR_MASK_ALL GENMASK(2, 0) +#define ZXI2C_IRQ_FIFOEND BIT(3) +#define ZXI2C_IRQ_FIFONACK BIT(4) +#define ZXI2C_IRQ_MASK (ZXI2C_ISR_MASK_ALL | ZXI2C_IRQ_FIFOEND | ZXI2C_IRQ_FIFONACK) + +/* REG_IMR Bit fields */ +#define ZXI2C_REG_IMR 0x08 +#define ZXI2C_IMR_ADDRNACK BIT(0) +#define ZXI2C_IMR_BYTE BIT(1) +#define ZXI2C_IMR_SCL_TIMEOUT BIT(2) +#define ZXI2C_IMR_ENABLE_ALL GENMASK(2, 0) + +#define ZXI2C_REG_CLK 0x10 +#define ZXI2C_CLK_50M BIT(0) +#define ZXI2C_REG_REV 0x11 +#define ZXI2C_REG_HCR 0x12 +#define ZXI2C_HCR_RST_FIFO GENMASK(1, 0) +#define ZXI2C_REG_HTDR 0x13 +#define ZXI2C_REG_HRDR 0x14 +#define ZXI2C_REG_HTLR 0x15 +#define ZXI2C_REG_HRLR 0x16 +#define ZXI2C_REG_HWCNTR 0x18 +#define ZXI2C_REG_HRCNTR 0x19 + +#define ZXI2C_REG_CDR 0x0A +#define ZXI2C_REG_TR 0x0C +#define ZXI2C_REG_MCR 0x0E + +struct zxi2c { + struct i2c_adapter adapter; + struct completion complete; + struct device *dev; + void __iomem *base; + struct clk *clk; + u16 tcr; + int irq; + u16 cmd_status; + u16 tr; + u16 mcr; + u16 csr; + u8 fstp; + u8 hrv; +}; + +/* parameters Constants */ +#define ZXI2C_GOLD_FSTP_100K 0xF3 +#define ZXI2C_GOLD_FSTP_400K 0x38 +#define ZXI2C_GOLD_FSTP_1M 0x13 +#define ZXI2C_GOLD_FSTP_3400K 0x37 + +#define ZXI2C_HS_MASTER_CODE (0x08 << 8) +#define ZXI2C_FIFO_SIZE 32 + +static int zxi2c_wait_bus_ready(struct zxi2c *i2c) +{ + unsigned long timeout; + void __iomem *base = i2c->base; + u16 tmp; + + timeout = jiffies + msecs_to_jiffies(200); + while (!(readw(base + ZXI2C_REG_CSR) & ZXI2C_CSR_READY_MASK)) { + if (time_after(jiffies, timeout)) { + dev_warn(i2c->dev, "timeout waiting for bus ready\n"); + return -EBUSY; + } + tmp = ioread16(i2c->base + ZXI2C_REG_CR); + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + + msleep(20); + } + + return 0; +} + +static int zxi2c_wait_status(struct zxi2c *i2c, u8 status) +{ + unsigned long time_left; + + time_left = wait_for_completion_timeout(&i2c->complete, msecs_to_jiffies(500)); + if (time_left <= 1) + return -ETIMEDOUT; + + if (i2c->cmd_status & status) + return 0; + + return -EIO; +} + +static irqreturn_t zxi2c_isr(int irq, void *data) +{ + struct zxi2c *i2c = data; + + /* save the status and write-clear it */ + i2c->cmd_status = readw(i2c->base + ZXI2C_REG_ISR); + if (!i2c->cmd_status) + return IRQ_NONE; + + writew(i2c->cmd_status, i2c->base + ZXI2C_REG_ISR); + + complete(&i2c->complete); + + return IRQ_HANDLED; +} + +static int zxi2c_write(struct zxi2c *i2c, struct i2c_msg *msg, bool last) +{ + u16 val, tcr_val = i2c->tcr; + int xfer_len = 0; + void __iomem *base = i2c->base; + + writew(msg->buf[0] & 0xFF, base + ZXI2C_REG_CDR); + reinit_completion(&i2c->complete); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + xfer_len++; + + val = readw(base + ZXI2C_REG_CSR); + if (val & ZXI2C_CSR_RCV_NOT_ACK) { + dev_dbg(i2c->dev, "write RCV NACK error\n"); + return -EIO; + } + + if (msg->len == 0) { + val = ZXI2C_CR_TX_END | ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE; + writew(val, base + ZXI2C_REG_CR); + break; + } + + if (xfer_len == msg->len) { + if (last) + writeb(ZXI2C_CR_TX_END, base + ZXI2C_REG_CR); + } else { + writew(msg->buf[xfer_len] & 0xFF, base + ZXI2C_REG_CDR); + writew(ZXI2C_CR_CPU_RDY | ZXI2C_CR_ENABLE, base + ZXI2C_REG_CR); + } + } + + return 0; +} + +static int zxi2c_read(struct zxi2c *i2c, struct i2c_msg *msg, bool first) +{ + u16 val, tcr_val = i2c->tcr; + u32 xfer_len = 0; + void __iomem *base = i2c->base; + + val = readw(base + ZXI2C_REG_CR); + val &= ~(ZXI2C_CR_TX_END | ZXI2C_CR_RX_END); + + if (msg->len == 1) + val |= ZXI2C_CR_RX_END; + + writew(val, base + ZXI2C_REG_CR); + + reinit_completion(&i2c->complete); + + tcr_val |= ZXI2C_TCR_MASTER_READ | msg->addr; + + writew(tcr_val, base + ZXI2C_REG_TCR); + + if (!first) { + val = readw(base + ZXI2C_REG_CR); + val |= ZXI2C_CR_CPU_RDY; + writew(val, base + ZXI2C_REG_CR); + } + + while (xfer_len < msg->len) { + int err; + + err = zxi2c_wait_status(i2c, ZXI2C_ISR_BYTE_END); + if (err) + return err; + + msg->buf[xfer_len] = readw(base + ZXI2C_REG_CDR) >> 8; + xfer_len++; + + val = readw(base + ZXI2C_REG_CR) | ZXI2C_CR_CPU_RDY; + if (xfer_len == msg->len - 1) + val |= ZXI2C_CR_RX_END; + writew(val, base + ZXI2C_REG_CR); + } + + return 0; +} + +static int zxi2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) +{ + struct i2c_msg *msg; + int i; + int ret = 0; + struct zxi2c *i2c = i2c_get_adapdata(adap); + + for (i = 0; ret >= 0 && i < num; i++) { + msg = &msgs[i]; + if (msg->len == 0) { + dev_dbg(i2c->dev, "zero len unsupported\n"); + return -ENODEV; + } + if (msg->flags & I2C_M_RD) + ret = zxi2c_read(i2c, msg, i == 0); + else + ret = zxi2c_write(i2c, msg, i == (num - 1)); + } + + return (ret < 0) ? ret : i; +} + +static int zxi2c_fifo_xfer(struct zxi2c *i2c, struct i2c_msg *msg) +{ + u16 xfered_len = 0; + u16 byte_left = msg->len; + u16 tcr_val = i2c->tcr; + void __iomem *base = i2c->base; + bool read = !!(msg->flags & I2C_M_RD); + + while (byte_left) { + u16 i; + u8 tmp; + int error; + u16 xfer_len = min_t(u16, byte_left, ZXI2C_FIFO_SIZE); + + byte_left -= xfer_len; + + /* reset fifo buffer */ + tmp = ioread8(base + ZXI2C_REG_HCR); + iowrite8(tmp | ZXI2C_HCR_RST_FIFO, base + ZXI2C_REG_HCR); + + /* set xfer len */ + if (read) { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HRLR); + } else { + iowrite8(xfer_len - 1, base + ZXI2C_REG_HTLR); + /* set write data */ + for (i = 0; i < xfer_len; i++) + iowrite8(msg->buf[xfered_len + i], base + ZXI2C_REG_HTDR); + } + + /* prepare to stop transmission */ + if (i2c->hrv && !byte_left) { + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp |= read ? ZXI2C_CR_RX_END : ZXI2C_CR_TX_END; + iowrite8(tmp, base + ZXI2C_REG_CR); + } + + reinit_completion(&i2c->complete); + + if (xfered_len) { + /* continue transmission */ + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + iowrite8(tmp |= ZXI2C_CR_CPU_RDY, i2c->base + ZXI2C_REG_CR); + } else { + /* start transmission */ + tcr_val |= (read ? ZXI2C_TCR_MASTER_READ : 0); + writew(tcr_val | msg->addr, base + ZXI2C_REG_TCR); + } + + error = zxi2c_wait_status(i2c, ZXI2C_IRQ_FIFOEND); + if (error) + return error; + + /* get the received data */ + if (read) + for (i = 0; i < xfer_len; i++) + msg->buf[xfered_len + i] = ioread8(base + ZXI2C_REG_HRDR); + + xfered_len += xfer_len; + } + + return 1; +} + +static int zxi2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) +{ + u8 tmp; + int ret; + struct zxi2c *i2c = (struct zxi2c *)i2c_get_adapdata(adap); + + ret = zxi2c_wait_bus_ready(i2c); + if (ret) + return ret; + + tmp = ioread8(i2c->base + ZXI2C_REG_CR); + tmp &= ~(ZXI2C_CR_RX_END | ZXI2C_CR_TX_END); + + if (num == 1 && msgs->len >= 2 && (i2c->hrv || msgs->len <= ZXI2C_FIFO_SIZE)) { + /* enable fifo mode */ + iowrite16(ZXI2C_CR_FIFO_MODE | tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable fifo irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IRQ_FIFOEND, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_fifo_xfer(i2c, msgs); + } else { + /* enable byte mode */ + iowrite16(tmp, i2c->base + ZXI2C_REG_CR); + /* clear irq status */ + iowrite8(ZXI2C_IRQ_MASK, i2c->base + ZXI2C_REG_ISR); + /* enable byte irq */ + iowrite8(ZXI2C_ISR_NACK_ADDR | ZXI2C_IMR_BYTE, i2c->base + ZXI2C_REG_IMR); + ret = zxi2c_xfer(adap, msgs, num); + if (ret < 0) + iowrite16(tmp | ZXI2C_CR_END_MASK, i2c->base + ZXI2C_REG_CR); + /* make sure the state machine is stopped */ + usleep_range(1, 2); + } + /* dis interrupt */ + iowrite8(0, i2c->base + ZXI2C_REG_IMR); + + /* timeout may caused by another high-priority process, try again */ + if (ret == -ETIMEDOUT) + ret = -EAGAIN; + + return ret; +} + +static u32 zxi2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm zxi2c_algorithm = { + .master_xfer = zxi2c_master_xfer, + .functionality = zxi2c_func, +}; + +static const struct i2c_adapter_quirks zxi2c_quirks = { + .flags = I2C_AQ_NO_ZERO_LEN | I2C_AQ_COMB_WRITE_THEN_READ, +}; + +static const u32 zxi2c_speed_params_table[][3] = { + /* speed, ZXI2C_TCR, ZXI2C_FSTP */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, ZXI2C_GOLD_FSTP_100K }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_400K }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, ZXI2C_GOLD_FSTP_1M }, + { I2C_MAX_HIGH_SPEED_MODE_FREQ, ZXI2C_TCR_HS_MODE | ZXI2C_TCR_FAST, + ZXI2C_GOLD_FSTP_3400K }, + /* never reached, keep for debug. freq src is 27M mode */ + { I2C_MAX_STANDARD_MODE_FREQ, 0, 0x83 }, + { I2C_MAX_FAST_MODE_FREQ, ZXI2C_TCR_FAST, 0x1e }, + { I2C_MAX_FAST_MODE_PLUS_FREQ, ZXI2C_TCR_FAST, 10 } +}; + +static void zxi2c_set_bus_speed(struct zxi2c *i2c) +{ + iowrite16(i2c->tr, i2c->base + ZXI2C_REG_TR); + iowrite8(ZXI2C_CLK_50M, i2c->base + ZXI2C_REG_CLK); + iowrite16(i2c->mcr, i2c->base + ZXI2C_REG_MCR); +} + +static void zxi2c_get_bus_speed(struct zxi2c *i2c) +{ + u8 i, count; + u8 fstp; + const u32 *params; + + u32 acpi_speed = i2c_acpi_find_bus_speed(i2c->dev); + + count = ARRAY_SIZE(zxi2c_speed_params_table); + for (i = 0; i < count; i++) + if (acpi_speed == zxi2c_speed_params_table[i][0]) + break; + /* if not found, use 400k as default */ + i = i < count ? i : 1; + + params = zxi2c_speed_params_table[i]; + fstp = ioread8(i2c->base + ZXI2C_REG_TR); + if (abs(fstp - params[2]) > 0x10) { + /* + * if BIOS setting value far from golden value, + * use golden value and warn user + */ + dev_warn(i2c->dev, "speed:%d, fstp:0x%x, golden:0x%x\n", + params[0], fstp, params[2]); + i2c->tr = params[2] | 0xff00; + } else + i2c->tr = fstp | 0xff00; + + i2c->tcr = params[1]; + i2c->mcr = ioread16(i2c->base + ZXI2C_REG_MCR); + /* for Hs-mode, use 0000 1000 as master code */ + if (params[0] == I2C_MAX_HIGH_SPEED_MODE_FREQ) + i2c->mcr |= ZXI2C_HS_MASTER_CODE; + + dev_info(i2c->dev, "speed mode is %s\n", i2c_freq_mode_string(params[0])); +} + +static int zxi2c_init(struct platform_device *pdev, struct zxi2c **pi2c) +{ + int err; + struct zxi2c *i2c; + struct resource *res; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "IORESOURCE_MEM failed\n"); + return -ENODEV; + } + i2c->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->base)) + return PTR_ERR(i2c->base); + + i2c->irq = platform_get_irq(pdev, 0); + if (i2c->irq < 0) + return i2c->irq; + + err = devm_request_irq(&pdev->dev, i2c->irq, zxi2c_isr, IRQF_SHARED, pdev->name, i2c); + if (err) + return dev_err_probe(&pdev->dev, err, "failed to request irq %i\n", i2c->irq); + + i2c->dev = &pdev->dev; + init_completion(&i2c->complete); + platform_set_drvdata(pdev, i2c); + + *pi2c = i2c; + return 0; +} + +static int zxi2c_probe(struct platform_device *pdev) +{ + int error; + struct zxi2c *i2c; + struct i2c_adapter *adap; + + error = zxi2c_init(pdev, &i2c); + if (error) + return error; + + zxi2c_get_bus_speed(i2c); + zxi2c_set_bus_speed(i2c); + i2c->hrv = ioread8(i2c->base + ZXI2C_REG_REV); + + adap = &i2c->adapter; + adap->owner = THIS_MODULE; + adap->algo = &zxi2c_algorithm; + adap->retries = 2; + adap->quirks = &zxi2c_quirks; + + adap->dev.parent = &pdev->dev; + ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); + snprintf(adap->name, sizeof(adap->name), "zhaoxin-%s-%s", dev_name(pdev->dev.parent), + dev_name(i2c->dev)); + i2c_set_adapdata(adap, i2c); + + error = i2c_add_adapter(adap); + if (error) + return error; + + dev_info(i2c->dev, "adapter /dev/i2c-%d registered. version %s\n", + adap->nr, DRIVER_VERSION); + + return 0; +} + +static int zxi2c_remove(struct platform_device *pdev) +{ + struct zxi2c *i2c = platform_get_drvdata(pdev); + + devm_free_irq(&pdev->dev, i2c->irq, i2c); + + i2c_del_adapter(&i2c->adapter); + + platform_set_drvdata(pdev, NULL); + + devm_kfree(&pdev->dev, i2c); + + return 0; +} + +static int zxi2c_resume(struct device *dev) +{ + struct zxi2c *i2c = dev_get_drvdata(dev); + + iowrite8(ZXI2C_CR_MST_RST, i2c->base + ZXI2C_REG_CR); + zxi2c_set_bus_speed(i2c); + + return 0; +} + +static const struct dev_pm_ops zxi2c_pm = { + SET_SYSTEM_SLEEP_PM_OPS(NULL, zxi2c_resume) +}; + +static const struct acpi_device_id zxi2c_acpi_match[] = { + {"IIC1D17", 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, zxi2c_acpi_match); + +static struct platform_driver zxi2c_driver = { + .probe = zxi2c_probe, + .remove = zxi2c_remove, + .driver = { + .name = ZX_I2C_NAME, + .acpi_match_table = ACPI_PTR(zxi2c_acpi_match), + .pm = &zxi2c_pm, + }, +}; + +module_platform_driver(zxi2c_driver); + +MODULE_VERSION(DRIVER_VERSION); +MODULE_AUTHOR("HansHu@zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin IIC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index ea5a6a14c5537ad73aa8c477f8d328cdfa9bf45e..e7d40379fc3c1bc2f3ac5104ef88a6728e2e8601 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -1242,6 +1242,43 @@ static struct cpuidle_state snr_cstates[] __initdata = { .enter = NULL } }; +static struct cpuidle_state srf_cstates[] __initdata = { + { + .name = "C1", + .desc = "MWAIT 0x00", + .flags = MWAIT2flg(0x00) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 1, + .target_residency = 1, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C1E", + .desc = "MWAIT 0x01", + .flags = MWAIT2flg(0x01) | CPUIDLE_FLAG_ALWAYS_ENABLE, + .exit_latency = 2, + .target_residency = 10, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6S", + .desc = "MWAIT 0x22", + .flags = MWAIT2flg(0x22) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 270, + .target_residency = 700, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .name = "C6SP", + .desc = "MWAIT 0x23", + .flags = MWAIT2flg(0x23) | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 310, + .target_residency = 900, + .enter = &intel_idle, + .enter_s2idle = intel_idle_s2idle, }, + { + .enter = NULL } +}; + static const struct idle_cpu idle_cpu_nehalem __initconst = { .state_table = nehalem_cstates, .auto_demotion_disable_flags = NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE, @@ -1387,6 +1424,12 @@ static const struct idle_cpu idle_cpu_snr __initconst = { .use_acpi = true, }; +static const struct idle_cpu idle_cpu_srf __initconst = { + .state_table = srf_cstates, + .disable_promotion_to_c1e = true, + .use_acpi = true, +}; + static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(NEHALEM_EP, &idle_cpu_nhx), X86_MATCH_INTEL_FAM6_MODEL(NEHALEM, &idle_cpu_nehalem), @@ -1432,6 +1475,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_PLUS, &idle_cpu_bxt), X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, &idle_cpu_dnv), X86_MATCH_INTEL_FAM6_MODEL(ATOM_TREMONT_D, &idle_cpu_snr), + X86_MATCH_INTEL_FAM6_MODEL(ATOM_CRESTMONT_X, &idle_cpu_srf), {} }; diff --git a/drivers/infiniband/hw/erdma/erdma_hw.h b/drivers/infiniband/hw/erdma/erdma_hw.h index 9d316fdc6f9a55a5055f793197117424800ccb01..a155519a862f83e6e4bec2dea2dec9fbc27eb141 100644 --- a/drivers/infiniband/hw/erdma/erdma_hw.h +++ b/drivers/infiniband/hw/erdma/erdma_hw.h @@ -11,8 +11,6 @@ #include /* PCIe device related definition. */ -#define PCI_VENDOR_ID_ALIBABA 0x1ded - #define ERDMA_PCI_WIDTH 64 #define ERDMA_FUNC_BAR 0 #define ERDMA_MISX_BAR 2 diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 2b12b583ef4b1edb8c296f59e7e6d4dcc4ca095e..559c4e9fba05f348d1e64fc7e1a4bc6f0cd5057f 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -187,6 +187,7 @@ config MSM_IOMMU source "drivers/iommu/amd/Kconfig" source "drivers/iommu/intel/Kconfig" source "drivers/iommu/iommufd/Kconfig" +source "drivers/iommu/sw64/Kconfig" config IRQ_REMAP bool "Support for Interrupt Remapping" diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile index 769e43d780ce89810033064bfd3baa8420889bed..f74b08c2fb006368a7b7a284f57a645f8beeb3ba 100644 --- a/drivers/iommu/Makefile +++ b/drivers/iommu/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += amd/ intel/ arm/ iommufd/ +obj-y += amd/ intel/ arm/ iommufd/ sw64/ obj-$(CONFIG_IOMMU_API) += iommu.o obj-$(CONFIG_IOMMU_API) += iommu-traces.o obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 45efb7e5d725460b39de534c67b5fb5be0d31d1d..9d24ddfc2d7e7a2abab341d7b86ddbfa4be9855f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -3001,6 +3001,9 @@ static void __init free_iommu_resources(void) /* SB IOAPIC is always on this device in AMD systems */ #define IOAPIC_SB_DEVID ((0x00 << 8) | PCI_DEVFN(0x14, 0)) +/* SB IOAPIC for Hygon family 18h model 4h is on the device 0xb */ +#define IOAPIC_SB_DEVID_FAM18H_M4H ((0x00 << 8) | PCI_DEVFN(0xb, 0)) + static bool __init check_ioapic_information(void) { const char *fw_bug = FW_BUG; @@ -3026,7 +3029,12 @@ static bool __init check_ioapic_information(void) pr_err("%s: IOAPIC[%d] not in IVRS table\n", fw_bug, id); ret = false; - } else if (devid == IOAPIC_SB_DEVID) { + } else if (devid == IOAPIC_SB_DEVID || + (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && + boot_cpu_data.x86 == 0x18 && + boot_cpu_data.x86_model >= 0x4 && + boot_cpu_data.x86_model <= 0xf && + devid == IOAPIC_SB_DEVID_FAM18H_M4H)) { has_sb_ioapic = true; ret = true; } diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index d6d1a2a55cc0692fb02f0f58b901ac438c71604c..8ce9d33e7840c1eac90b197536e480a08ae6ad2b 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -35,6 +35,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include #include "arm-smmu.h" @@ -51,6 +55,7 @@ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +#define SMR_MASK_SHIFT 16 static int force_stage; module_param(force_stage, int, S_IRUGO); @@ -1363,6 +1368,19 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) return ERR_PTR(-ENODEV); } +#ifdef CONFIG_ARCH_PHYTIUM +#define FWID_READ(id) (((u16)(id) >> 3) | (((id) >> SMR_MASK_SHIFT | 0x7000) << SMR_MASK_SHIFT)) + if (typeof_ft2000plus()) { + int num = fwspec->num_ids; + + for (i = 0; i < num; i++) { + u32 fwid = FWID_READ(fwspec->ids[i]); + + iommu_fwspec_add_ids(dev, &fwid, 1); + } + } +#endif + ret = -EINVAL; for (i = 0; i < fwspec->num_ids; i++) { u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); @@ -1458,7 +1476,12 @@ static struct iommu_group *arm_smmu_device_group(struct device *dev) mutex_unlock(&smmu->stream_map_mutex); return ERR_PTR(-EINVAL); } - +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_s2500()) + break; + if (typeof_ft2000plus() && !smmu->s2crs[idx].group) + continue; +#endif group = smmu->s2crs[idx].group; } diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 4c3707384bd92bef9d8c9a3a05a6d3aff4b3002f..ea0fbb78649b0d1924364021252e302f4ca2d053 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4428,6 +4428,9 @@ static void intel_iommu_probe_finalize(struct device *dev) { set_dma_ops(dev, NULL); iommu_setup_dma_ops(dev, 0, U64_MAX); + + if (is_zhaoxin_kh40000) + kh40000_set_iommu_dma_ops(dev); } static void intel_iommu_get_resv_regions(struct device *device, diff --git a/drivers/iommu/sw64/Kconfig b/drivers/iommu/sw64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3a6a1e994f315761cd086dd020c4d966fa80ade1 --- /dev/null +++ b/drivers/iommu/sw64/Kconfig @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0-only +# SW64 IOMMU SUPPORT +config SUNWAY_IOMMU + bool "Sunway IOMMU Support" + select IOMMU_API + select IOMMU_IOVA + select IOMMU_DMA + depends on SW64 && PCI && SUBARCH_C3B + help + Support for IOMMU on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". + +# SW64 IOMMU V2 SUPPORT +config SUNWAY_IOMMU_V2 + bool "Sunway IOMMU V2 Support" + select IOMMU_API + select IOMMU_IOVA + depends on SW64 && PCI && SUBARCH_C4 + help + Support for IOMMU V2 on SW64 platform. It can enable or bypass specific device by + adding boot param "iommu_enable" and "iommu.passthrough". diff --git a/drivers/iommu/sw64/Makefile b/drivers/iommu/sw64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e61b343490aa82963ec6da62fddafd7c6a8526bf --- /dev/null +++ b/drivers/iommu/sw64/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_SUNWAY_IOMMU) += iommu.o +obj-$(CONFIG_SUNWAY_IOMMU_V2) += iommu_v2.o diff --git a/drivers/iommu/sw64/iommu.c b/drivers/iommu/sw64/iommu.c new file mode 100644 index 0000000000000000000000000000000000000000..32b18f726fd919dca67ba722cc133f1ed26567cf --- /dev/null +++ b/drivers/iommu/sw64/iommu.c @@ -0,0 +1,1277 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_DMA_LIMIT (0xe0000000 - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff + +#define SW64_IOMMU_GRN_8K ((0UL) << 4) /* page size as 8KB */ +#define SW64_IOMMU_GRN_8M ((0x2UL) << 4) /* page size as 8MB */ +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) | ((1ULL) << PAGE_8M_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; + +static int iommu_identity_mapping; + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void dev_flush_dtlb(struct sunway_iommu_domain *sdomain, + struct sunway_iommu_dev *sdev_data) +{ + struct pci_controller *hose; + int devid; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + devid = sdev_data->devid; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, devid); + } +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev_data->pdev->bus); + + flush_addr = __pa(flush_addr); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, + unsigned long flush_addr) +{ + struct pci_controller *hose; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev_data; + + list_for_each_entry(sdev_data, &sdomain->dev_list, list) { + pdev = sdev_data->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long pde; + unsigned long *pde_ptr; + int i, pdes_one_page; + + pde_ptr = sdomain->pt_root; + if (!pde_ptr) + return; + + pdes_one_page = PAGE_SIZE/sizeof(pde); + for (i = 0; i < pdes_one_page; i++, pde_ptr++) { + pde = *pde_ptr; + if ((pde & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + pde &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + pde |= PAGE_OFFSET; + free_page(pde); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + + dma_dom->sdomain.pt_root = (unsigned long *)get_zeroed_page(GFP_KERNEL); + if (dma_dom->sdomain.pt_root == NULL) { + pr_err("Allocating a new sdomain pt_root failed!\n"); + dma_domain_free(dma_dom); + return NULL; + } + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + page = alloc_pages_node(iommu->node, __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (iommu_identity_mapping) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev_data; + unsigned long flags; + + sdev_data = dev_iommu_priv_get(dev); + sunway_domain = sdev_data->domain; + + if (WARN_ON(!sdev_data->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev_data); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn, pte_l1_val, pte_l2_val; + unsigned long *pte_l1, *pte_l2; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + return pte_l2_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 59) & 0x7; + devid = (iommu_status >> 37) & 0xffff; + dva = iommu_status & 0xffffffff; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case UNAUTHORIZED_ACCESS: + pr_info("unauthorized access\n"); + break; + case ILLEGAL_RESPONSE: + pr_info("illegal response\n"); + break; + default: + pr_info("unknown error\n"); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + + iommu_device_register(&iommu->iommu, &sunway_iommu_ops, NULL); + } + + ret = iova_cache_get(); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +device_initcall(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long *pte_l2, unmapped; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + unmapped = 0; + while (unmapped < page_size) { + pte_l2 = (unsigned long *)fetch_pte(sunway_domain, iova, PTE_LEVEL2); + *pte_l2 = 0; + + flush_pcache_by_addr(sunway_domain, (unsigned long)pte_l2); + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + + iova += PAGE_SIZE; + unmapped += PAGE_SIZE; + } + + return unmapped; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + /* + * pde: page table entry + * pte: level 2 page table entry + * pte_root: page table root + */ + struct page *page; + struct sunway_iommu *iommu; + unsigned long pde, pte, iova_pfn; + unsigned long pdebaseaddr; + u64 *ptebasecond, ptebaseaddr; + u64 pte_root = (__pa(sunway_domain->pt_root) & PAGE_MASK); + + iova_pfn = (unsigned long)(bus_addr >> PAGE_SHIFT); + + pdebaseaddr = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pdebaseaddr += ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)) + + PAGE_OFFSET; + + pde = *(unsigned long *)pdebaseaddr; + if (pde) { + ptebaseaddr = (pde & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + + goto direct_map; + } + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating pages failed.\n"); + return -1; + } + + ptebasecond = page_address(page); + pde = (__pa(ptebasecond) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + + /* + * If pde exists, no need to allocate a new page. + * Atomic compare and exchange, compare the value the pointer points to + * with 0UL. If identical, store pde where the pointer points to, return + * 0UL. Otherwise, return the value the pointer points to. + */ + if (cmpxchg64((volatile u64 *)pdebaseaddr, 0ULL, pde)) { + ptebaseaddr = ((*(volatile u64 *)pdebaseaddr) + & (~SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + PAGE_OFFSET; + ptebaseaddr += (iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3; + free_page((unsigned long)ptebasecond); + } else { + flush_pcache_by_addr(sunway_domain, pdebaseaddr); + ptebaseaddr = (unsigned long)ptebasecond + + ((iova_pfn & SW64_IOMMU_LEVEL2_OFFSET) << 3); + } + +direct_map: + /* case 8K */ + if (page_size == (1UL << PAGE_SHIFT)) { + if (*(volatile u64 *)ptebaseaddr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8K | SW64_IOMMU_ENABLE; + *(volatile u64 *)ptebaseaddr = pte; + flush_pcache_by_addr(sunway_domain, ptebaseaddr); + /* case 8M */ + } else if (page_size == (1UL << PAGE_8M_SHIFT)) { + unsigned long *ptr; + int i, ptes_one_page, ptes_one_cache; + + ptr = (unsigned long *)ptebaseaddr; + ptes_one_page = PAGE_SIZE/sizeof(pte); + ptes_one_cache = L1_CACHE_BYTES/sizeof(pte); + + pte = (paddr & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID + | SW64_IOMMU_GRN_8M | SW64_IOMMU_ENABLE; + + for (i = 0; i < ptes_one_page; i++) { + if (*ptr) { + pr_err("IOVA 4G overlap. IOVA is %#lx.\n", bus_addr); + return -EFAULT; + } + + *ptr = pte; + + /* just do once flush per cache line */ + if (i % ptes_one_cache == (ptes_one_cache - 1)) + flush_pcache_by_addr(sunway_domain, (unsigned long)ptr); + ptr++; + } + } +#ifdef CONFIG_SW64_GUEST + flush_ptlb_by_addr(sunway_domain, pfn | SW64_IOMMU_MAP_FLAG); +#endif + return 0; +} + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0ULL; + sdomain->domain.geometry.aperture_end = (~0ULL); + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); + if (!sdomain->pt_root) { + pr_err("Allocating pt_root failed!\n"); + sunway_domain_free(sdomain); + return NULL; + } + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + iommu_identity_mapping = 1; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev_data; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + if (!dev_is_pci(dev)) + return -ENODEV; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev_data = dev_iommu_priv_get(dev); + if (!sdev_data) + return -EINVAL; + + if (sdev_data->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + + if (iova >= SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + paddr &= ~SW64_IOMMU_ENTRY_VALID; + grn = paddr & SW64_PTE_GRN_MASK; /* get page granularity */ + paddr &= PAGE_MASK; + + switch (grn) { + case SW64_IOMMU_GRN_8M: + paddr += (iova & ~HPAGE_MASK); + break; + case SW64_IOMMU_GRN_8K: + default: + paddr += (iova & ~PAGE_MASK); + break; + } + + return paddr; +} + +static int +sunway_iommu_map_pages(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, size_t pgcount, + int iommu_prot, gfp_t gfp, size_t *mapped) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t size = pgcount << PAGE_SHIFT; + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova >= SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + if (ret) { + pr_info("Failed to map page from IOVA %lx.\n", iova); + return ret; + } + iova += page_size; + paddr += page_size; + } + mutex_unlock(&sdomain->api_lock); + + if (!ret && mapped) + *mapped = size; + + return ret; +} + +static size_t +sunway_iommu_unmap_pages(struct iommu_domain *dom, unsigned long iova, + size_t page_size, size_t pgcount, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + size_t total_unmap = 0; + + if (iova >= SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + while (pgcount--) { + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + iova += page_size; + total_unmap += page_size; + } + mutex_unlock(&sdomain->api_lock); + + return total_unmap; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return generic_device_group(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + if (!dev_is_pci(dev)) + return ERR_PTR(-ENODEV); + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) + return &iommu->iommu; + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + if (dev_is_pci(dev)) { + if (iommu_identity_mapping) + return IOMMU_DOMAIN_IDENTITY; + } + + return 0; +} + +static bool sunway_iommu_capable(struct device *dev, enum iommu_cap cap) +{ + return false; +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, SW64_DMA_LIMIT); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, + .default_domain_ops = &(const struct iommu_domain_ops) { + .attach_dev = sunway_iommu_attach_device, + .map_pages = sunway_iommu_map_pages, + .unmap_pages = sunway_iommu_unmap_pages, + .iova_to_phys = sunway_iommu_iova_to_phys, + .free = sunway_iommu_domain_free, + } +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +early_param("iommu_enable", iommu_enable_setup); diff --git a/drivers/iommu/sw64/iommu_v2.c b/drivers/iommu/sw64/iommu_v2.c new file mode 100644 index 0000000000000000000000000000000000000000..f3e19e524210d5da2e362ee48b7dd658742dc29d --- /dev/null +++ b/drivers/iommu/sw64/iommu_v2.c @@ -0,0 +1,1780 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * iommu.c: Generic sw64 IOMMU support + * + * This is designed and tested for 3231. If there are no changes in hardware + * in later chips, then it should work just as well. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sunway_iommu.h" + +#define MAX_DOMAIN_NUM 65536 +#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) +#define SW64_32BIT_DMA_LIMIT (0xe0000000 - 1) +#define SW64_64BIT_DMA_LIMIT ((1UL << 41) - 1) +#define SW64_BAR_ADDRESS (IO_BASE | PCI_BASE) + +#define SW64_IOMMU_PGSIZES (((1ULL) << PAGE_SHIFT) \ + | ((1ULL) << PAGE_8M_SHIFT) \ + | ((1ULL) << PAGE_512M_SHIFT) \ + | ((1ULL) << PAGE_8G_SHIFT)) + +#define IDENTMAP_ALL ((1U) << 0) +#define DMA_MASK64 ((1U) << 1) + +#define PTE_VALID 0x8000000000000000UL +#define LAST_STAGE 0x100UL +#define PTE_GRN_8M 0x10UL +#define PTE_GRN_512M 0x20UL +#define PTE_GRN_8G 0x30UL +#define PTE_WRITEE 0x2UL +#define PTE_READE 0x1UL +#define PTE_RWE 0x3UL +#define PTE_FLAGS_MASK 0x8000000000000133UL +#define PAGE_8G_OFFSET_MASK ((1UL << PAGE_8G_SHIFT) - 1) +#define PAGE_512M_OFFSET_MASK ((1UL << PAGE_512M_SHIFT) - 1) +#define PAGE_8M_OFFSET_MASK ((1UL << PAGE_8M_SHIFT) - 1) + +/* IOMMU Exceptional Status */ +enum exceptype { + DTE_LEVEL1 = 0x0, + DTE_LEVEL2, + PTE_LEVEL1, + PTE_LEVEL2, + PTE_LEVEL3, + UNAUTHORIZED_ACCESS, + ILLEGAL_RESPONSE, + DTE_LEVEL1_VAL, + DTE_LEVEL2_VAL, + PTE_LEVEL1_VAL, + PTE_LEVEL2_VAL, + PTE_LEVEL3_VAL, +}; + +u64 iommu_enable_cmd; /* default IOMMU boot param: 0 */ + +unsigned long *sunway_iommu_domain_bitmap; + +static DEFINE_SPINLOCK(domain_bitmap_lock); +static DEFINE_SPINLOCK(sunway_iommu_device_table_lock); +spinlock_t sunway_domain_lock; + +static LLIST_HEAD(dev_data_list); +LIST_HEAD(sunway_domain_list); + +struct dma_domain { + struct sunway_iommu_domain sdomain; + struct iova_domain iovad; +}; +const struct iommu_ops sunway_iommu_ops; +static const struct dma_map_ops sunway_dma_ops; + + +/* flush helpers */ +static void piu_flush_all(struct pci_controller *hose) +{ + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHALL, 0); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHALL, 0); +} + +void flush_pcache_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + hose = pci_bus_to_pci_controller(sdev->pdev->bus); + + flush_addr = __pa(flush_addr); + /* Set memory bar here */ + mb(); + write_piu_ior0(hose->node, hose->index, + PCACHE_FLUSHPADDR, flush_addr); + } +} + +void flush_ptlb_by_addr(struct sunway_iommu_domain *sdomain, unsigned long flush_addr) +{ + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + + list_for_each_entry(sdev, &sdomain->dev_list, list) { + pdev = sdev->pdev; + hose = pci_bus_to_pci_controller(pdev->bus); + + flush_addr = (pdev->bus->number << 8) + | pdev->devfn | (flush_addr << 16); + write_piu_ior0(hose->node, hose->index, + PTLB_FLUSHVADDR, flush_addr); + } +} + +/* domain helpers */ +static struct sunway_iommu_domain *to_sunway_domain(struct iommu_domain *dom) +{ + return container_of(dom, struct sunway_iommu_domain, domain); +} + +static struct dma_domain *to_dma_domain(struct sunway_iommu_domain *sdomain) +{ + return container_of(sdomain, struct dma_domain, sdomain); +} + +static void add_domain_to_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_add(&sdomain->list, &sunway_domain_list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void del_domain_from_list(struct sunway_iommu_domain *sdomain) +{ + unsigned long flags; + + spin_lock_irqsave(&sunway_domain_lock, flags); + list_del(&sdomain->list); + spin_unlock_irqrestore(&sunway_domain_lock, flags); +} + +static void free_pagetable(struct sunway_iommu_domain *sdomain) +{ + unsigned long *l2_pte, *l3_pte; + unsigned long l2_pte_val, l3_pte_val; + int l2_index, l3_index, ptes_one_page; + + l2_pte = sdomain->pt_root; + if (!l2_pte) + return; + + ptes_one_page = PAGE_SIZE/sizeof(unsigned long); + for (l2_index = 0; l2_index < ptes_one_page; l2_index++, l2_pte++) { + l2_pte_val = *l2_pte; + if ((l2_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l2_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l2_pte_val |= PAGE_OFFSET; + l3_pte = (unsigned long *)l2_pte_val; + for (l3_index = 0; l3_index < ptes_one_page; l3_index++, l3_pte++) { + l3_pte_val = *l3_pte; + if ((l3_pte_val & SW64_IOMMU_ENTRY_VALID) == 0) + continue; + + l3_pte_val &= ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK; + l3_pte_val |= PAGE_OFFSET; + free_page(l3_pte_val); + } + free_page(l2_pte_val); + } + + free_page((unsigned long)sdomain->pt_root); +} + +static void domain_id_free(int id) +{ + spin_lock(&domain_bitmap_lock); + if (id > 0) + __clear_bit(id, sunway_iommu_domain_bitmap); + spin_unlock(&domain_bitmap_lock); +} + +static void dma_domain_free(struct dma_domain *dma_dom) +{ + if (!dma_dom) + return; + + del_domain_from_list(&dma_dom->sdomain); + put_iova_domain(&dma_dom->iovad); + free_pagetable(&dma_dom->sdomain); + if (dma_dom->sdomain.id) + domain_id_free(dma_dom->sdomain.id); + + kfree(dma_dom); +} + +static void sunway_domain_free(struct sunway_iommu_domain *sdomain) +{ + if (!sdomain) + return; + + del_domain_from_list(sdomain); + if (sdomain->id) + domain_id_free(sdomain->id); + + kfree(sdomain); +} + +static u16 sunway_domain_id_alloc(void) +{ + int id; + + spin_lock(&domain_bitmap_lock); + id = find_first_zero_bit(sunway_iommu_domain_bitmap, MAX_DOMAIN_NUM); + if (id > 0 && id < MAX_DOMAIN_NUM) + __set_bit(id, sunway_iommu_domain_bitmap); + else + id = 0; + spin_unlock(&domain_bitmap_lock); + + return id; +} + +static int sunway_domain_init(struct sunway_iommu_domain *sdomain) +{ + spin_lock_init(&sdomain->lock); + mutex_init(&sdomain->api_lock); + sdomain->id = sunway_domain_id_alloc(); + if (!sdomain->id) + return -ENOMEM; + INIT_LIST_HEAD(&sdomain->dev_list); + + return 1; +} + +static struct sunway_iommu_domain *sunway_domain_alloc(void) +{ + struct sunway_iommu_domain *sdomain; + + sdomain = kzalloc(sizeof(struct sunway_iommu_domain), GFP_KERNEL); + if (!sdomain) + return NULL; + + if (!sunway_domain_init(sdomain)) { + kfree(sdomain); + return NULL; + } + + add_domain_to_list(sdomain); + return sdomain; +} + +static struct dma_domain *dma_domain_alloc(void) +{ + struct dma_domain *dma_dom; + struct page; + + dma_dom = kzalloc(sizeof(struct dma_domain), GFP_KERNEL); + if (!dma_dom) + return NULL; + + sunway_domain_init(&dma_dom->sdomain); + dma_dom->sdomain.type = IOMMU_DOMAIN_DMA; + init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_PFN(SW64_DMA_START)); + reserve_iova(&dma_dom->iovad, (0xe0000000UL >> PAGE_SHIFT), (0x100000000UL >> PAGE_SHIFT)); + + add_domain_to_list(&dma_dom->sdomain); + + return dma_dom; +} + +static void device_flush_all(struct sunway_iommu_dev *sdata) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(sdata->pdev->bus); + + if (hose == NULL) + return; + + write_piu_ior0(hose->node, hose->index, DTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PTLB_FLUSHDEV, sdata->devid); + write_piu_ior0(hose->node, hose->index, PCACHE_FLUSHDEV, sdata->devid); +} + +/* iommu_ops device attach/unattach helpers */ +static void +set_dte_entry(struct sunway_iommu_dev *sdev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct page *dt_page, *pt_page; + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_base, dte_l2_val; + + pdev = sdev->pdev; + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return; + + sdev->devid = PCI_DEVID(pdev->bus->number, pdev->devfn); + iommu = sdev->iommu; + dte_l1 = iommu->iommu_dtbr + (pdev->bus->number); + dte_l1_val = *dte_l1; + + if (!dte_l1_val) { + /* Alloc a new level-2 device table page */ + dt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, + get_order(PAGE_SIZE)); + if (!dt_page) { + pr_err("Allocating a new level-2 device table page failed.\n"); + return; + } + + dte_l2_base = (unsigned long)page_address(dt_page); + dte_l1_val = (__pa(dte_l2_base) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + *dte_l1 = dte_l1_val; + } + + if (!sdomain->pt_root) { + pt_page = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, 0); + if (!pt_page) { + pr_err("Allocating pt_root failed!\n"); + return; + } + + sdomain->pt_root = page_address(pt_page); + } + + dte_l2 = __va(dte_l1_val & ~(SW64_IOMMU_ENTRY_VALID) & PAGE_MASK) + (pdev->devfn << 3); + dte_l2_val = (__pa(sdomain->pt_root) & PAGE_MASK) | SW64_IOMMU_ENTRY_VALID; + if (sdomain->type == IOMMU_DOMAIN_IDENTITY) { + dte_l2_val |= 0x1; + sdev->passthrough = IDENTMAP_ALL; + } + *dte_l2 = dte_l2_val; + device_flush_all(sdev); +} + +static void +do_attach(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + sdev_data->domain = sdomain; + list_add(&sdev_data->list, &sdomain->dev_list); + + sdomain->dev_cnt++; + set_dte_entry(sdev_data, sdomain); + + pr_debug("iommu: device %d add to domain: %d\n", + sdev_data->devid, sdomain->id); +} + +static void do_detach(struct sunway_iommu_dev *sdev_data) +{ + struct sunway_iommu_domain *sdomain = sdev_data->domain; + + sdev_data->domain = NULL; + list_del(&sdev_data->list); + device_flush_all(sdev_data); + + sdomain->dev_cnt--; + pr_debug("iommu: device %d detached from domain %d\n", + sdev_data->devid, sdomain->id); +} + +static int +__attach_device(struct sunway_iommu_dev *sdev_data, struct sunway_iommu_domain *sdomain) +{ + int ret; + + spin_lock(&sdomain->lock); + ret = -EBUSY; + if (sdev_data->domain != NULL) + goto out_unlock; + + do_attach(sdev_data, sdomain); + ret = 0; + +out_unlock: + spin_unlock(&sdomain->lock); + return ret; +} + +static void __detach_device(struct sunway_iommu_dev *sunway_dev_data) +{ + struct sunway_iommu_domain *domain; + + domain = sunway_dev_data->domain; + + spin_lock(&domain->lock); + do_detach(sunway_dev_data); + spin_unlock(&domain->lock); +} + +static int attach_device(struct device *dev, struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *sdev; + unsigned long flags; + int ret; + + sdev = dev_iommu_priv_get(dev); + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + ret = __attach_device(sdev, sdomain); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + return ret; +} + +static void detach_device(struct device *dev) +{ + struct sunway_iommu_domain *sunway_domain; + struct sunway_iommu_dev *sdev; + unsigned long flags; + + sdev = dev_iommu_priv_get(dev); + sunway_domain = sdev->domain; + + if (WARN_ON(!sdev->domain)) + return; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + __detach_device(sdev); + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); + + if (!dev_is_pci(dev)) + return; +} + +static struct sunway_iommu_dev *search_dev_data(u16 devid) +{ + struct sunway_iommu_dev *sdev_data; + struct llist_node *node; + + if (llist_empty(&dev_data_list)) + return NULL; + + node = dev_data_list.first; + llist_for_each_entry(sdev_data, node, dev_data_list) { + if (sdev_data->devid == devid) + return sdev_data; + } + + return NULL; +} + +/* dma_ops helpers*/ +static struct sunway_iommu_domain *get_sunway_domain(struct device *dev) +{ + struct sunway_iommu_domain *sdomain; + struct iommu_domain *domain; + struct pci_dev *pdev; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + sdev = dev_iommu_priv_get(dev); + sdomain = sdev->domain; + if (sdomain == NULL) { + domain = iommu_get_domain_for_dev(dev); + sdomain = to_sunway_domain(domain); + attach_device(dev, sdomain); + } + + if (sdomain == NULL) + return ERR_PTR(-EBUSY); + + return sdomain; +} + +/********************************************************************** + * + * Following functions describe IOMMU init ops + * + **********************************************************************/ + +static struct sunway_iommu *sunway_iommu_early_init(struct pci_controller *hose) +{ + struct sunway_iommu *iommu; + struct page *page; + unsigned long base; + + hose->pci_iommu = kzalloc(sizeof(struct sunway_iommu), GFP_KERNEL); + if (!hose->pci_iommu) + return 0; + + iommu = hose->pci_iommu; + spin_lock_init(&iommu->dt_lock); + + iommu->node = hose->node; + if (!node_online(hose->node)) + iommu->node = -1; + + page = alloc_pages_node(iommu->node, __GFP_ZERO, get_order(PAGE_SIZE)); + if (!page) { + pr_err("Allocating a new iommu_dtbr page failed.\n"); + kfree(hose->pci_iommu); + return NULL; + } + iommu->iommu_dtbr = page_address(page); + + iommu->hose_pt = hose; + iommu->index = hose->index; + + iommu->enabled = true; + + base = __pa(iommu->iommu_dtbr) & PAGE_MASK; + write_piu_ior0(hose->node, hose->index, DTBASEADDR, base); + + return iommu; +} + +unsigned long fetch_dte(struct sunway_iommu *iommu, unsigned long devid, + enum exceptype type) +{ + unsigned long *dte_l1, *dte_l2; + unsigned long dte_l1_val, dte_l2_val; + + if (!iommu) + return 0; + dte_l1 = iommu->iommu_dtbr + (devid >> 8); + if (type == DTE_LEVEL1) + return (unsigned long)dte_l1; + + dte_l1_val = *dte_l1; + if (type == DTE_LEVEL1_VAL) + return dte_l1_val; + + dte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + dte_l1_val |= PAGE_OFFSET; + dte_l2 = (unsigned long *)(dte_l1_val + ((devid & 0xff) << 3)); + if (type == DTE_LEVEL2) + return (unsigned long)dte_l2; + + dte_l2_val = *dte_l2; + if (type == DTE_LEVEL2_VAL) + return dte_l2_val; + + return dte_l2_val; +} + +unsigned long fetch_pte(struct sunway_iommu_domain *sdomain, dma_addr_t iova, + enum exceptype type) +{ + unsigned long iova_pfn; + unsigned long pte_l1_val, pte_l2_val, pte_l3_val; + unsigned long *pte_l1, *pte_l2, *pte_l3; + unsigned long pte_root; + unsigned long offset; + + if (!sdomain) + return -EINVAL; + + pte_root = __pa(sdomain->pt_root) & PAGE_MASK; + iova_pfn = iova >> PAGE_SHIFT; + pte_root = ((pte_root) & (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK)); + pte_root |= PAGE_OFFSET; + offset = ((iova_pfn >> 20) & SW64_IOMMU_LEVEL1_OFFSET) << 3; + pte_l1 = (unsigned long *)(pte_root + offset); + if (type == PTE_LEVEL1) + return (unsigned long)pte_l1; + + pte_l1_val = *pte_l1; + if (type == PTE_LEVEL1_VAL) + return pte_l1_val; + + pte_l1_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l1_val |= PAGE_OFFSET; + offset = ((iova_pfn >> 10) & SW64_IOMMU_LEVEL2_OFFSET) << 3; + pte_l2 = (unsigned long *)(pte_l1_val + offset); + + if (type == PTE_LEVEL2) + return (unsigned long)pte_l2; + + pte_l2_val = *pte_l2; + if (type == PTE_LEVEL2_VAL) + return pte_l2_val; + + pte_l2_val &= (~(SW64_IOMMU_ENTRY_VALID)) & (PAGE_MASK); + pte_l2_val |= PAGE_OFFSET; + offset = (iova_pfn & SW64_IOMMU_LEVEL3_OFFSET) << 3; + pte_l3 = (unsigned long *)(pte_l2_val + offset); + if (type == PTE_LEVEL3) + return (unsigned long)pte_l3; + + pte_l3_val = *pte_l3; + if (type == PTE_LEVEL3_VAL) + return pte_l3_val; + + return pte_l3_val; +} + +/* IOMMU Interrupt handle */ +irqreturn_t iommu_interrupt(int irq, void *dev) +{ + struct pci_controller *hose = (struct pci_controller *)dev; + struct sunway_iommu_domain *sdomain; + struct sunway_iommu_dev *sdev; + unsigned long iommu_status; + unsigned long type; + unsigned long devid, dva; + + iommu_status = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (!(iommu_status >> 63)) + return IRQ_NONE; + + type = (iommu_status >> 58) & 0xf; + devid = (iommu_status >> 36) & 0xffff; + dva = ((iommu_status & 0xffffffff) >> 3) << 13; + pr_info("%s, iommu_status = %#lx, devid %#lx, dva %#lx, ", + __func__, iommu_status, devid, dva); + + sdev = search_dev_data(devid); + if (sdev == NULL) { + pr_info("no such dev!!!\n"); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + + return IRQ_HANDLED; + } + + sdomain = sdev->domain; + switch (type) { + case DTE_LEVEL1: + pr_info("invalid level1 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL1_VAL)); + break; + case DTE_LEVEL2: + pr_info("invalid level2 dte, addr:%#lx, val:%#lx\n", + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2), + fetch_dte(hose->pci_iommu, devid, DTE_LEVEL2_VAL)); + break; + case PTE_LEVEL1: + pr_info("invalid level1 pte, addr: %#lx, val:%#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL1), + fetch_pte(sdomain, dva, PTE_LEVEL1_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + case PTE_LEVEL2: + pr_info("invalid level2 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL2), + fetch_pte(sdomain, dva, PTE_LEVEL2_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + + case PTE_LEVEL3: + pr_info("invalid level3 pte, addr: %#lx, val: %#lx\n", + fetch_pte(sdomain, dva, PTE_LEVEL3), + fetch_pte(sdomain, dva, PTE_LEVEL3_VAL)); + + iommu_status &= ~(1UL << 62); + write_piu_ior0(hose->node, hose->index, + IOMMUEXCPT_STATUS, iommu_status); + break; + default: + pr_info("iommu exception type %ld\n", type); + break; + } + + return IRQ_HANDLED; +} + +struct irqaction iommu_irqaction = { + .handler = iommu_interrupt, + .flags = IRQF_SHARED | IRQF_NO_THREAD, + .name = "sunway_iommu", +}; + +void sunway_enable_iommu_func(struct pci_controller *hose) +{ + unsigned int iommu_irq, err; + unsigned long iommu_conf, iommu_ctrl; + + iommu_irq = hose->int_irq; + pr_debug("%s node %ld rc %ld iommu_irq %d\n", + __func__, hose->node, hose->index, iommu_irq); + err = request_irq(iommu_irq, iommu_interrupt, + IRQF_SHARED, "sunway_iommu", hose); + if (err < 0) + pr_info("sw iommu request irq failed!\n"); + + iommu_ctrl = (1UL << 63) | (0x100UL << 10); + write_piu_ior0(hose->node, hose->index, IOMMUEXCPT_CTRL, iommu_ctrl); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + iommu_conf = iommu_conf | (0x3 << 7); + write_piu_ior0(hose->node, hose->index, PIUCONFIG0, iommu_conf); + write_piu_ior0(hose->node, hose->index, TIMEOUT_CONFIG, 0xf); + iommu_conf = read_piu_ior0(hose->node, hose->index, PIUCONFIG0); + pr_debug("SW arch configure node %ld hose-%ld iommu_conf = %#lx\n", + hose->node, hose->index, iommu_conf); +} + +static bool is_iommu_enable(struct pci_controller *hose) +{ + u64 rc_mask = 0x1; + + rc_mask <<= (8 * hose->node + hose->index); + if (iommu_enable_cmd & rc_mask) + return true; + + return false; +} + +/* iommu cpu syscore ops */ +static int iommu_cpu_suspend(void) +{ + return 0; +} + +static void iommu_cpu_resume(void) +{ + +} + +struct syscore_ops iommu_cpu_syscore_ops = { + .suspend = iommu_cpu_suspend, + .resume = iommu_cpu_resume, +}; + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type); + +static int sunway_iommu_init(void) +{ + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + int iommu_index = 0; + + sunway_iommu_domain_bitmap = + (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + get_order(MAX_DOMAIN_NUM / 8)); + if (sunway_iommu_domain_bitmap == NULL) + return 0; + __set_bit(0, sunway_iommu_domain_bitmap); + + /* Do the loop */ + for (hose = hose_head; hose; hose = hose->next) { + if (!is_iommu_enable(hose)) { + hose->iommu_enable = false; + continue; + } + + iommu = sunway_iommu_early_init(hose); + if (!iommu) { + pr_err("Allocating sunway_iommu failed\n"); + hose->iommu_enable = false; + continue; + } + + iommu_device_sysfs_add(&iommu->iommu, NULL, NULL, "%d", + iommu_index); + iommu_device_set_ops(&iommu->iommu, &sunway_iommu_ops); + iommu_device_register(&iommu->iommu); + iommu_index++; + sunway_enable_iommu_func(hose); + hose->iommu_enable = true; + } + + ret = iova_cache_get(); + if (ret) + return ret; + + ret = bus_set_iommu(&pci_bus_type, &sunway_iommu_ops); + if (ret) + return ret; + + for (hose = hose_head; hose; hose = hose->next) + if (hose->iommu_enable) + piu_flush_all(hose); + + register_syscore_ops(&iommu_cpu_syscore_ops); + + return 1; +} +subsys_initcall_sync(sunway_iommu_init); + +/******************************************************************************* + * + * DMA OPS Functions + * + ******************************************************************************/ + +struct sunway_iommu *get_first_iommu_from_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu *iommu; + struct sunway_iommu_dev *entry; + + entry = list_first_entry(&sdomain->dev_list, struct sunway_iommu_dev, list); + iommu = entry->iommu; + + return iommu; +} + +static unsigned long +sunway_iommu_unmap_page(struct sunway_iommu_domain *sunway_domain, + unsigned long iova, unsigned long page_size) +{ + unsigned long offset, iova_pfn; + unsigned long *pte_base, *pte; + unsigned long grn; + int level, current_level; + int tmp = 1; + + pr_debug("%s iova %#lx, page_size %#lx\n", __func__, iova, page_size); + BUG_ON(!is_power_of_2(page_size)); + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + level = 2; + grn = PTE_GRN_8M; + break; + default: + level = 3; + break; + } + + pte_base = sunway_domain->pt_root; + iova_pfn = iova >> PAGE_SHIFT; + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + if (current_level == level) { + if (grn == PTE_GRN_512M) { + int i; + + for (i = 0; i < 64; i++) { + *(pte + i) = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + + } else { + *pte = 0; + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + flush_ptlb_by_addr(sunway_domain, (iova >> PAGE_SHIFT)); + break; + } + + pte_base = (unsigned long *)((*pte & (~PTE_FLAGS_MASK)) | PAGE_OFFSET); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return page_size; +} + +int sunway_iommu_map_page(struct sunway_iommu_domain *sunway_domain, + unsigned long bus_addr, unsigned long paddr, + size_t page_size) +{ + struct page *page; + struct sunway_iommu *iommu; + unsigned long iova_pfn, pte_val; + unsigned long *pte_base, *pte; + unsigned long offset, grn = 0; + int level = 0, current_level; + int tmp = 1; + + iommu = get_first_iommu_from_domain(sunway_domain); + if (!iommu) + return -1; + iova_pfn = bus_addr >> PAGE_SHIFT; + pte_base = sunway_domain->pt_root; + + switch (page_size) { + case (1UL << 33): + level = 1; + grn = PTE_GRN_8G; + break; + case (1UL << 29): + level = 2; + grn = PTE_GRN_512M; + break; + case (1UL << 23): + grn = PTE_GRN_8M; + level = 2; + break; + default: + level = 3; + break; + } + + offset = (iova_pfn >> 20) & 0x1ff; + current_level = 1; + while (current_level <= level) { + pte = &pte_base[offset]; + + if (!(*pte) || (current_level == level)) { + pte_val = PTE_VALID | PTE_RWE | grn; + if (current_level == level) { + *(volatile u64 *)(pte) = 0; + pte_val |= ((paddr & PAGE_MASK) | LAST_STAGE); + } else { + page = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 0); + if (!page) { + pr_err("Allocating level%d page table pages failed.\n", (level + 1)); + return -ENOMEM; + } + + pte_val |= (page_to_phys(page) & PAGE_MASK); + } + + if ((grn == PTE_GRN_512M) && (current_level == 2)) { + int i; + + for (i = 0; i < 64; i++) { + cmpxchg64((volatile u64 *)(pte + i), 0UL, pte_val); + flush_pcache_by_addr(sunway_domain, (unsigned long)(pte + i)); + } + } else { + if (cmpxchg64((volatile u64 *)pte, 0UL, pte_val)) + free_page((unsigned long)page_address(page)); + else + flush_pcache_by_addr(sunway_domain, (unsigned long)pte); + } + } + + pte_base = (unsigned long *)__va((*pte) & (~PTE_FLAGS_MASK)); + offset = (iova_pfn >> (tmp--) * 10) & 0x3ff; + current_level++; + } + + return 0; +} + +static unsigned long +sunway_alloc_iova(struct dma_domain *dma_dom, unsigned long pages, struct pci_dev *pdev) +{ + struct device *dev; + unsigned long pfn = 0; + + pages = __roundup_pow_of_two(pages); + dev = &(pdev->dev); + if (min(dev->coherent_dma_mask, *dev->dma_mask) == DMA_BIT_MASK(32)) { + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_32BIT_DMA_LIMIT), true); + } else { + /* IOVA boundary should be 16M ~ 3.5G */ + pfn = alloc_iova_fast(&dma_dom->iovad, pages, + IOVA_PFN(SW64_64BIT_DMA_LIMIT), true); + } + + return (pfn << PAGE_SHIFT); +} + +static void sunway_free_iova(struct dma_domain *dma_dom, + unsigned long address, unsigned long pages) +{ + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + free_iova_fast(&dma_dom->iovad, address, pages); +} + +static dma_addr_t +__sunway_map_single(struct dma_domain *dma_dom, + struct pci_dev *pdev, phys_addr_t paddr, size_t size) +{ + dma_addr_t ret, address, start; + unsigned long npages, i; + + npages = iommu_num_pages(paddr, size, PAGE_SIZE); + + address = sunway_alloc_iova(dma_dom, npages, pdev); + if (!address) + return 0; + + start = address; + for (i = 0; i < npages; ++i) { + ret = sunway_iommu_map_page(&dma_dom->sdomain, start, + paddr, PAGE_SIZE); + if (ret) { + pr_info("error when map page.\n"); + goto out_unmap; + } + + start += PAGE_SIZE; + paddr += PAGE_SIZE; + } + + address += paddr & ~PAGE_MASK; + return address; + +out_unmap: + for (--i; i >= 0; --i) { + start -= PAGE_SIZE; + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + } + + sunway_free_iova(dma_dom, address, npages); + return 0; +} + +static dma_addr_t +pci_iommu_map_single(struct pci_dev *pdev, + struct dma_domain *dma_dom, void *cpu_addr, size_t size) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(pdev->bus); + unsigned long paddr; + + if (hose == NULL) { + pr_err("%s: hose does not exist!\n", __func__); + return 0; + } + + paddr = __sunway_map_single(dma_dom, pdev, __pa(cpu_addr), size); + + pr_debug("pci_alloc_consistent: %zx -> [%px,%lx] from %ps\n", + size, cpu_addr, paddr, __builtin_return_address(0)); + + return paddr; +} + +static void *sunway_alloc_coherent(struct device *dev, + size_t size, + dma_addr_t *dma_addr, gfp_t gfp, + unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct sunway_iommu_dev *sdev; + struct page *page; + void *cpu_addr; + + if (!pdev) + return NULL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return NULL; + + gfp &= ~GFP_DMA; + +try_again: + page = alloc_pages_node(dev_to_node(dev), gfp | __GFP_ZERO, get_order(size)); + if (!page) { + pr_err("Allocating pages failed.\n"); + return NULL; + } + + cpu_addr = page_address(page); + if (!cpu_addr) { + pr_info + ("pci_alloc_consistent: get_free_pages failed from %ps\n", + __builtin_return_address(0)); + + return NULL; + } + + *dma_addr = __pa(cpu_addr); + if (!(hose->iommu_enable)) + return cpu_addr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return cpu_addr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return cpu_addr; + } + + __free_pages(page, get_order(size)); + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->alloc(dev, size, dma_addr, gfp, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + *dma_addr = pci_iommu_map_single(pdev, dma_dom, cpu_addr, size); + if (*dma_addr == 0) { + free_pages((unsigned long)cpu_addr, get_order(size)); + if (gfp & GFP_DMA) + return NULL; + + gfp |= GFP_DMA; + goto try_again; + } + + return cpu_addr; +} + +static void +__sunway_unmap_single(struct dma_domain *dma_dom, dma_addr_t dma_addr, size_t size) +{ + dma_addr_t start; + unsigned long npages; + int i; + + npages = iommu_num_pages(dma_addr, size, PAGE_SIZE); + dma_addr &= PAGE_MASK; + start = dma_addr; + + for (i = 0; i < npages; i++) { + sunway_iommu_unmap_page(&dma_dom->sdomain, start, PAGE_SIZE); + start += PAGE_SIZE; + } + + sunway_free_iova(dma_dom, dma_addr, npages); + pr_debug("pci_free_consistent: %zx -> [%llx] from %ps\n", + size, dma_addr, __builtin_return_address(0)); + +} + +static void +sunway_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_addr, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + if (!pdev) + goto out_unmap; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + goto out_unmap; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + goto out_unmap; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); + goto out_free; + +out_unmap: + pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL); + +out_free: + pr_debug("sunway_free_consistent: [%llx,%zx] from %ps\n", + dma_addr, size, __builtin_return_address(0)); + + free_pages((unsigned long)vaddr, get_order(size)); +} + +static dma_addr_t +sunway_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, unsigned long attrs) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + phys_addr_t paddr = page_to_phys(page) + offset; + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose || !(hose->iommu_enable)) + return paddr; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + return paddr; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + return paddr; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_page(dev, page, offset, size, dir, attrs); + } + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + return pci_iommu_map_single(pdev, dma_dom, + (char *)page_address(page) + offset, size); +} + +static void +sunway_unmap_page(struct device *dev, dma_addr_t dma_addr, + size_t size, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (hose == NULL) + return; + + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + __sunway_unmap_single(dma_dom, dma_addr, size); +} + +#define SG_ENT_VIRT_ADDRESS(SG) (sg_virt((SG))) +static int +sunway_map_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom = NULL; + struct scatterlist *sg; + struct pci_dev *pdev = to_pci_dev(dev); + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + int i, out_nents = 0; + + if (dir == PCI_DMA_NONE) + BUG(); + + if (!pdev) + return 0; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return 0; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, i) { + BUG_ON(!sg_page(sg)); + + sg_dma_address(sg) = __pa(SG_ENT_VIRT_ADDRESS(sg)); + if (!(hose->iommu_enable)) + goto check; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough & DMA_MASK64) + goto check; + else if (sdev->passthrough) { + if (min(dev->coherent_dma_mask, *dev->dma_mask) > DMA_BIT_MASK(32)) { + sdev->passthrough |= DMA_MASK64; + goto check; + } + + set_dma_ops(dev, get_arch_dma_ops(dev->bus)); + return dev->dma_ops->map_sg(dev, sgl, nents, dir, attrs); + } + + sg_dma_address(sg) = + pci_iommu_map_single(pdev, dma_dom, + SG_ENT_VIRT_ADDRESS(sg), sg->length); +check: + if (sg_dma_address(sg) == 0) + goto error; + + sg_dma_len(sg) = sg->length; + out_nents++; + } + + return nents; + +error: + pr_warn("pci_map_sg failed:"); + pr_warn("could not allocate dma page tables\n"); + + if (out_nents) + pci_unmap_sg(pdev, sgl, out_nents, dir); + return 0; +} + +static void +sunway_unmap_sg(struct device *dev, struct scatterlist *sgl, + int nents, enum dma_data_direction dir, unsigned long attrs) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + struct scatterlist *sg; + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu_dev *sdev; + dma_addr_t dma_addr; + long size; + int j; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->passthrough) + return; + + sdomain = get_sunway_domain(dev); + dma_dom = to_dma_domain(sdomain); + + for_each_sg(sgl, sg, nents, j) { + dma_addr = sg->dma_address; + size = sg->dma_length; + if (!size) + break; + + __sunway_unmap_single(dma_dom, dma_addr, size); + } +} + +static const struct dma_map_ops sunway_dma_ops = { + .alloc = sunway_alloc_coherent, + .free = sunway_free_coherent, + .map_sg = sunway_map_sg, + .unmap_sg = sunway_unmap_sg, + .map_page = sunway_map_page, + .unmap_page = sunway_unmap_page, + .dma_supported = dma_direct_supported, +}; + +/********************************************************************** + * + * IOMMU OPS Functions + * + **********************************************************************/ + +static struct iommu_domain *sunway_iommu_domain_alloc(unsigned int type) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + sdomain = sunway_domain_alloc(); + if (!sdomain) { + pr_err("Allocating sunway_domain failed!\n"); + return NULL; + } + + sdomain->domain.geometry.aperture_start = 0UL; + sdomain->domain.geometry.aperture_end = ~0ULL; + sdomain->domain.geometry.force_aperture = true; + sdomain->type = IOMMU_DOMAIN_UNMANAGED; + break; + + case IOMMU_DOMAIN_DMA: + dma_dom = dma_domain_alloc(); + if (!dma_dom) { + pr_err("Failed to alloc dma domain!\n"); + return NULL; + } + + sdomain = &dma_dom->sdomain; + break; + + case IOMMU_DOMAIN_IDENTITY: + sdomain = sunway_domain_alloc(); + if (!sdomain) + return NULL; + + sdomain->type = IOMMU_DOMAIN_IDENTITY; + break; + + default: + return NULL; + } + + return &sdomain->domain; +} + +static void clean_domain(struct sunway_iommu_domain *sdomain) +{ + struct sunway_iommu_dev *entry; + unsigned long flags; + + spin_lock_irqsave(&sunway_iommu_device_table_lock, flags); + + while (!list_empty(&sdomain->dev_list)) { + entry = list_first_entry(&sdomain->dev_list, + struct sunway_iommu_dev, list); + + BUG_ON(!entry->domain); + __detach_device(entry); + } + + spin_unlock_irqrestore(&sunway_iommu_device_table_lock, flags); +} + +static void sunway_iommu_domain_free(struct iommu_domain *dom) +{ + struct sunway_iommu_domain *sdomain; + struct dma_domain *dma_dom; + + sdomain = to_sunway_domain(dom); + + if (sdomain->dev_cnt > 0) + clean_domain(sdomain); + + BUG_ON(sdomain->dev_cnt != 0); + + if (!dom) + return; + + switch (dom->type) { + case IOMMU_DOMAIN_DMA: + dma_dom = to_dma_domain(sdomain); + dma_domain_free(dma_dom); + break; + + default: + free_pagetable(sdomain); + sunway_domain_free(sdomain); + break; + } + +} + +static int sunway_iommu_attach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev; + struct pci_controller *hose; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return -EINVAL; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return -EINVAL; + + if (!hose->iommu_enable) + return -EINVAL; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return -EINVAL; + + if (sdev->domain) + detach_device(dev); + + ret = attach_device(dev, sdomain); + + return ret; +} + +static void sunway_iommu_detach_device(struct iommu_domain *dom, struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct pci_dev *pdev = to_pci_dev(dev); + + if (!pdev) + return; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain != NULL) + detach_device(dev); +} + +static phys_addr_t +sunway_iommu_iova_to_phys(struct iommu_domain *dom, dma_addr_t iova) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + unsigned long paddr, grn; + unsigned long is_last; + + if (iova > SW64_BAR_ADDRESS) + return iova; + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL1_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_8G) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8G_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL2_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + is_last = paddr & SW64_PTE_LAST_MASK; + grn = paddr & SW64_PTE_GRN_MASK; + if (is_last) { + if (grn == PTE_GRN_512M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_512M_OFFSET_MASK; + return paddr; + } + + if (grn == PTE_GRN_8M) { + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_8M_OFFSET_MASK; + return paddr; + } + + return 0; + } + + paddr = fetch_pte(sdomain, iova, PTE_LEVEL3_VAL); + if ((paddr & SW64_IOMMU_ENTRY_VALID) == 0) + return 0; + + grn = paddr & SW64_PTE_GRN_MASK; + if (grn != 0) + return 0; + + paddr &= ~PTE_FLAGS_MASK; + paddr += iova & PAGE_MASK; + return paddr; +} + +static int +sunway_iommu_map(struct iommu_domain *dom, unsigned long iova, + phys_addr_t paddr, size_t page_size, int iommu_prot, gfp_t gfp) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + int ret; + + /* + * As VFIO cannot distinguish between normal DMA request + * and pci device BAR, check should be introduced manually + * to avoid VFIO trying to map pci config space. + */ + if (iova > SW64_BAR_ADDRESS) + return 0; + + mutex_lock(&sdomain->api_lock); + ret = sunway_iommu_map_page(sdomain, iova, paddr, page_size); + mutex_unlock(&sdomain->api_lock); + + return ret; +} + +static size_t +sunway_iommu_unmap(struct iommu_domain *dom, unsigned long iova, + size_t page_size, + struct iommu_iotlb_gather *gather) +{ + struct sunway_iommu_domain *sdomain = to_sunway_domain(dom); + size_t unmap_size; + + if (iova > SW64_BAR_ADDRESS) + return page_size; + + mutex_lock(&sdomain->api_lock); + unmap_size = sunway_iommu_unmap_page(sdomain, iova, page_size); + mutex_unlock(&sdomain->api_lock); + + return unmap_size; +} + +static struct iommu_group *sunway_iommu_device_group(struct device *dev) +{ + return pci_device_group(dev); +} + +static void iommu_uninit_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (!sdev) + return; + + if (sdev->domain) + detach_device(dev); + + dev_iommu_priv_set(dev, NULL); +} + +static void sunway_iommu_release_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + + pdev = to_pci_dev(dev); + if (!pdev) + return; + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose->iommu_enable) + return; + + iommu_uninit_device(dev); +} + +static int iommu_init_device(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + struct pci_controller *hose; + + if (dev_iommu_priv_get(dev)) + return 0; + + sdev = kzalloc(sizeof(struct sunway_iommu_dev), GFP_KERNEL); + if (!sdev) + return -ENOMEM; + + pdev = to_pci_dev(dev); + hose = pci_bus_to_pci_controller(pdev->bus); + iommu = hose->pci_iommu; + llist_add(&sdev->dev_data_list, &dev_data_list); + sdev->pdev = pdev; + sdev->iommu = iommu; + + dev_iommu_priv_set(dev, sdev); + + return 0; +} + +static struct iommu_device *sunway_iommu_probe_device(struct device *dev) +{ + struct pci_dev *pdev; + struct pci_controller *hose; + struct sunway_iommu *iommu; + int ret; + + pdev = to_pci_dev(dev); + if (!pdev) + return ERR_PTR(-ENODEV); + + if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) + return ERR_PTR(-ENODEV); + + if (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT) + return ERR_PTR(-ENODEV); + + hose = pci_bus_to_pci_controller(pdev->bus); + if (!hose) + return ERR_PTR(-ENODEV); + + if (!hose->iommu_enable) + return ERR_PTR(-ENODEV); + + if (dev_iommu_priv_get(dev)) { + iommu = hose->pci_iommu; + return &iommu->iommu; + } + + ret = iommu_init_device(dev); + if (ret) + return ERR_PTR(ret); + + iommu = hose->pci_iommu; + + return &iommu->iommu; +} + +static int sunway_iommu_def_domain_type(struct device *dev) +{ + struct sunway_iommu_dev *sdev; + + sdev = dev_iommu_priv_get(dev); + if (sdev->domain) + return 0; + + return sdev->domain->type; +} + +static bool sunway_iommu_capable(enum iommu_cap cap) +{ + switch (cap) { + case IOMMU_CAP_INTR_REMAP: + return true; + default: + return false; + } +} + +static void sunway_iommu_probe_finalize(struct device *dev) +{ + struct iommu_domain *domain; + + domain = iommu_get_domain_for_dev(dev); + if (domain) + set_dma_ops(dev, &sunway_dma_ops); +} + +const struct iommu_ops sunway_iommu_ops = { + .capable = sunway_iommu_capable, + .domain_alloc = sunway_iommu_domain_alloc, + .domain_free = sunway_iommu_domain_free, + .attach_dev = sunway_iommu_attach_device, + .detach_dev = sunway_iommu_detach_device, + .probe_device = sunway_iommu_probe_device, + .probe_finalize = sunway_iommu_probe_finalize, + .release_device = sunway_iommu_release_device, + .map = sunway_iommu_map, + .unmap = sunway_iommu_unmap, + .iova_to_phys = sunway_iommu_iova_to_phys, + .device_group = sunway_iommu_device_group, + .pgsize_bitmap = SW64_IOMMU_PGSIZES, + .def_domain_type = sunway_iommu_def_domain_type, +}; + +/***************************************************************************** + * + * Boot param handle + * Each bit of iommu_enable bitmap represents an rc enable, and every 8 bits + * represents one cpu node. For example, iommu_enable=0x0100 means enabling + * rc0 for cpu node 1. + * + *****************************************************************************/ +static int __init iommu_enable_setup(char *str) +{ + int ret; + unsigned long rc_bitmap = 0xffffffffUL; + + ret = kstrtoul(str, 16, &rc_bitmap); + iommu_enable_cmd = rc_bitmap; + + return ret; +} +__setup("iommu_enable=", iommu_enable_setup); diff --git a/drivers/iommu/sw64/sunway_iommu.h b/drivers/iommu/sw64/sunway_iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..94a155001d1b192e99bdbd814ef996550a31e27f --- /dev/null +++ b/drivers/iommu/sw64/sunway_iommu.h @@ -0,0 +1,79 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains declarations and inline functions for interfacing + * with the PCI initialization routines. + */ +#include +#include +#include +#include + +struct sunway_iommu_bypass_id { + unsigned int vendor; + unsigned int device; +}; + +struct sunway_iommu { + int index; + bool enabled; + unsigned long *iommu_dtbr; + spinlock_t dt_lock; /* Device Table Lock */ + int node; /* NUMA node */ + + struct pci_controller *hose_pt; + struct iommu_device iommu; /* IOMMU core code handle */ +}; + +struct sunway_iommu_dev { + struct list_head list; /* For domain->dev_list */ + struct llist_node dev_data_list; /* Global device list */ + u16 devid; + int alias; + unsigned int passthrough; + struct sunway_iommu *iommu; + struct pci_dev *pdev; + + spinlock_t lock; /* Lock the page table mainly */ + struct sunway_iommu_domain *domain; /* Domain device is bound to */ +}; + +struct sunway_iommu_domain { + unsigned int type; + spinlock_t lock; + struct mutex api_lock; + u16 id; /* Domain ID */ + struct list_head list; /* For list of all SW domains */ + struct list_head dev_list; /* List of devices in this domain */ + struct iommu_domain domain; /* IOMMU domain handle */ + unsigned long *pt_root; /* Page Table root */ + unsigned int dev_cnt; /* Number of devices in this domain */ +}; + +struct sw64dev_table_entry { + u64 data; +}; + +struct sunway_iommu_group { + struct pci_dev *dev; + struct iommu_group *group; +}; + +#define SW64_IOMMU_ENTRY_VALID ((1UL) << 63) +#define SW64_PTE_LAST_MASK ((1UL) << 8) /*last stage valid*/ +#define SW64_DMA_START 0x1000000 +#define SW64_PTE_GRN_MASK ((0x3UL) << 4) +#define PAGE_8M_SHIFT 23 +#define PAGE_512M_SHIFT 29 +#define PAGE_8G_SHIFT 33 +#define SW64_IOMMU_ENABLE 3 +#define SW64_IOMMU_DISABLE 0 +#define SW64_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW64_IOMMU_LEVEL2_OFFSET 0x3ff +#define SW64_IOMMU_LEVEL3_OFFSET 0x3ff +#define SW64_IOMMU_BYPASS 0x1 +#define SW64_IOMMU_MAP_FLAG ((0x1UL) << 20) + +#define PAGE_SHIFT_IOMMU 18 +#define PAGE_SIZE_IOMMU (_AC(1, UL) << PAGE_SHIFT_IOMMU) + +#define PCACHE_FLUSHPADDR_MASK 0xffffffffff80UL diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index f7149d0f3d45ca2358e220a5f229491d86271a7e..9944776010a0ac20e0e8ad98ebf2e06417f5482e 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -11,6 +11,39 @@ config ARM_GIC select IRQ_DOMAIN_HIERARCHY select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP +config SW64_INTC_V2 + bool "SW64 Interrupt Controller V2" + depends on UNCORE_XUELANG + default y + select GENERIC_IRQ_CHIP + select IRQ_DOMAIN + help + This enables support for the INTC chip found in SW CHIP3 systems. + The INTC controls devices interrupts and connects them to each + core's local interrupt controller. + +config SW64_LPC_INTC + bool "SW64 cpu builtin LPC Interrupt Controller" + depends on SW64_INTC_V2 + help + Say yes here to add support for the SW64 cpu builtin LPC + IRQ controller. + +config SW64_IRQ_CPU + bool + depends on SW64 + default y + +config SW64_IRQ_MSI + bool + depends on SW64 && PCI_MSI + default y + +config SW64_IRQ_MSI_VT + bool + depends on SW64_IRQ_MSI + default y + config ARM_GIC_PM bool depends on PM @@ -56,6 +89,14 @@ config ARM_GIC_V3_ITS_FSL_MC depends on FSL_MC_BUS default ARM_GIC_V3_ITS +config ARM_GIC_PHYTIUM_2500 + bool + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + select PARTITION_PERCPU + select GENERIC_IRQ_EFFECTIVE_AFF_MASK + select GENERIC_MSI_IRQ_DOMAIN + config ARM_NVIC bool select IRQ_DOMAIN_HIERARCHY diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index ffd945fe71aa2ce7e97d1c7e86509886b2fecb23..246aa0603d6ec0155b37ad7a017c9d66cb20111b 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -27,6 +27,17 @@ obj-$(CONFIG_SUN6I_R_INTC) += irq-sun6i-r.o obj-$(CONFIG_SUNXI_NMI_INTC) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_SW64_INTC_V2) += irq-sw64-intc-v2.o +obj-$(CONFIG_SW64_LPC_INTC) += irq-sw64-lpc-intc.o +obj-$(CONFIG_SW64_IRQ_CPU) += irq-sunway-cpu.o + +ifeq ($(CONFIG_UNCORE_XUELANG),y) +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi.o +else +obj-$(CONFIG_SW64_IRQ_MSI) += irq-sunway-msi-v2.o +endif + +obj-$(CONFIG_SW64_IRQ_MSI_VT) += irq-sunway-msi-vt.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o @@ -34,6 +45,7 @@ obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC) += irq-gic-v3-its-fsl-mc-msi.o +obj-$(CONFIG_ARM_GIC_PHYTIUM_2500) += irq-gic-phytium-2500.o irq-gic-phytium-2500-its.o obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o diff --git a/drivers/irqchip/irq-gic-phytium-2500-its.c b/drivers/irqchip/irq-gic-phytium-2500-its.c new file mode 100644 index 0000000000000000000000000000000000000000..5685f5f901a1c8f771db25eb43a4b84a38deb88a --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500-its.c @@ -0,0 +1,5766 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include + +#include "irq-gic-common.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) +#define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) +#define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) +#define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) + +#define RD_LOCAL_LPI_ENABLED BIT(0) +#define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) +#define RD_LOCAL_MEMRESERVE_DONE BIT(2) + +static u32 lpi_id_bits; + +/* + * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_NRBITS lpi_id_bits +#define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) +#define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) + +#define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS_BASER structure - contains memory information, cached + * value of BASER register configuration and ITS page size. + */ +struct its_baser { + void *base; + u64 val; + u32 order; + u32 psz; +}; + +struct its_device; + +/* + * The ITS structure - contains most of the infrastructure, with the + * top-level MSI domain, the command queue, the collections, and the + * list of devices writing to it. + * + * dev_alloc_lock has to be taken for device allocations, while the + * spinlock must be taken to parse data structures such as the device + * list. + */ +struct its_node { + raw_spinlock_t lock; + struct mutex dev_alloc_lock; + struct list_head entry; + void __iomem *base; + void __iomem *sgir_base; + phys_addr_t phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + struct its_baser tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); + u64 typer; + u64 cbaser_save; + u32 ctlr_save; + u32 mpidr; + struct list_head its_device_list; + u64 flags; + unsigned long list_nr; + int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ + int vlpi_redist_offset; +}; + +#define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) +#define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) +#define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) + +#define ITS_ITT_ALIGN SZ_256 + +/* The maximum number of VPEID bits supported by VLPI commands */ +#define ITS_MAX_VPEID_BITS \ + ({ \ + int nvpeid = 16; \ + if (gic_rdists->has_rvpeid && \ + gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ + nvpeid = 1 + (gic_rdists->gicd_typer2 & \ + GICD_TYPER2_VID); \ + \ + nvpeid; \ + }) +#define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) + +/* Convert page order to size in bytes */ +#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) + +struct event_lpi_map { + unsigned long *lpi_map; + u16 *col_map; + irq_hw_number_t lpi_base; + int nr_lpis; + raw_spinlock_t vlpi_lock; + struct its_vm *vm; + struct its_vlpi_map *vlpi_maps; + int nr_vlpis; +}; + +/* + * The ITS view of a device - belongs to an ITS, owns an interrupt + * translation table, and a list of interrupts. If it some of its + * LPIs are injected into a guest (GICv4), the event_map.vm field + * indicates which one. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct event_lpi_map event_map; + void *itt; + u32 nr_ites; + u32 device_id; + bool shared; +}; + +static struct { + raw_spinlock_t lock; + struct its_device *dev; + struct its_vpe **vpes; + int next_victim; +} vpe_proxy; + +struct cpu_lpi_count { + atomic_t managed; + atomic_t unmanaged; +}; + +static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count_ft2500); + +static LIST_HEAD(its_nodes); +static DEFINE_RAW_SPINLOCK(its_lock); +static struct rdists *gic_rdists; +static struct irq_domain *its_parent; + +static unsigned long its_list_map; +static u16 vmovp_seq_num; +static DEFINE_RAW_SPINLOCK(vmovp_lock); + +static DEFINE_IDA(its_vpeid_ida); + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) + +/* + * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we + * always have vSGIs mapped. + */ +static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) +{ + return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); +} + +static u16 get_its_list(struct its_vm *vm) +{ + struct its_node *its; + unsigned long its_list = 0; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (require_its_list_vmovp(vm, its)) + __set_bit(its->list_nr, &its_list); + } + + return (u16)its_list; +} + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + return d->hwirq - its_dev->event_map.lpi_base; +} + +static struct its_collection *dev_event_to_col(struct its_device *its_dev, + u32 event) +{ + struct its_node *its = its_dev->its; + + return its->collections + its_dev->event_map.col_map[event]; +} + +static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, + u32 event) +{ + if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) + return NULL; + + return &its_dev->event_map.vlpi_maps[event]; +} + +static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + return dev_event_to_vlpi_map(its_dev, event); + } + + return NULL; +} + +static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) +{ + raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); + return vpe->col_idx; +} + +static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) +{ + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); +} + +static struct irq_chip its_vpe_irq_chip; + +static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) +{ + struct its_vpe *vpe = NULL; + int cpu; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); + } else { + /* Physical LPIs are already locked via the irq_desc lock */ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + cpu = its_dev->event_map.col_map[its_get_event_id(d)]; + /* Keep GCC quiet... */ + *flags = 0; + } + + return cpu; +} + +static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) +{ + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + + if (map) + vpe = map->vpe; + } + + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); +} + +static struct its_collection *valid_col(struct its_collection *col) +{ + if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) + return NULL; + + return col; +} + +static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) +{ + if (valid_col(its->collections + vpe->col_idx)) + return vpe; + + return NULL; +} + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_clear_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapti_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 event_id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + + struct { + struct its_vpe *vpe; + } its_vinvall_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + bool valid; + } its_vmapp_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 virt_id; + u32 event_id; + bool db_enabled; + } its_vmapti_cmd; + + struct { + struct its_vpe *vpe; + struct its_device *dev; + u32 event_id; + bool db_enabled; + } its_vmovi_cmd; + + struct { + struct its_vpe *vpe; + struct its_collection *col; + u16 seq_num; + u16 its_list; + } its_vmovp_cmd; + + struct { + struct its_vpe *vpe; + } its_invdb_cmd; + + struct { + struct its_vpe *vpe; + u8 sgi; + u8 priority; + bool enable; + bool group; + bool clear; + } its_vsgi_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + union { + u64 raw_cmd[4]; + __le64 raw_cmd_le[4]; + }; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) +{ + u64 mask = GENMASK_ULL(h, l); + *raw_cmd &= ~mask; + *raw_cmd |= (val << l) & mask; +} + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0); +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32); +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + its_mask_encode(&cmd->raw_cmd[1], id, 31, 0); +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32); +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + its_mask_encode(&cmd->raw_cmd[1], size, 4, 0); +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63); +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + its_mask_encode(&cmd->raw_cmd[2], col, 15, 0); +} + +static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) +{ + its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32); +} + +static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) +{ + its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0); +} + +static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) +{ + its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32); +} + +static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) +{ + its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0); +} + +static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) +{ + its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32); +} + +static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) +{ + its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0); +} + +static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); +} + +static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) +{ + its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0); +} + +static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) +{ + its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16); +} + +static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) +{ + its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8); +} + +static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) +{ + its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9); +} + +static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0); +} + +static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, + u32 vpe_db_lpi) +{ + its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0); +} + +static void its_encode_db(struct its_cmd_block *cmd, bool db) +{ + its_mask_encode(&cmd->raw_cmd[2], db, 63, 63); +} + +static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) +{ + its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32); +} + +static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) +{ + its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20); +} + +static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) +{ + its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10); +} + +static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) +{ + its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9); +} + +static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) +{ + its_mask_encode(&cmd->raw_cmd[0], en, 8, 8); +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return NULL; +} + +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_mapti_cmd.dev, + desc->its_mapti_cmd.event_id); + col->col_id = col->col_id % 64; + + its_encode_cmd(cmd, GITS_CMD_MAPTI); + its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapti_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id); + its_encode_collection(cmd, col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_movi_cmd.dev, + desc->its_movi_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.event_id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_discard_cmd.dev, + desc->its_discard_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_collection *col; + + col = dev_event_to_col(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_col(col); +} + +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_invall_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_invall_cmd.col; +} + +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_VINVALL); + its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vinvall_cmd.vpe); +} + +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long vpt_addr, vconf_addr; + u64 target; + bool alloc; + + its_encode_cmd(cmd, GITS_CMD_VMAPP); + its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); + its_encode_valid(cmd, desc->its_vmapp_cmd.valid); + + if (!desc->its_vmapp_cmd.valid) { + if (is_v4_1(its)) { + alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count); + its_encode_alloc(cmd, alloc); + } + + goto out; + } + + vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; + + its_encode_target(cmd, target); + its_encode_vpt_addr(cmd, vpt_addr); + its_encode_vpt_size(cmd, LPI_NRBITS - 1); + + if (!is_v4_1(its)) + goto out; + + vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); + + alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count); + + its_encode_alloc(cmd, alloc); + + /* + * GICv4.1 provides a way to get the VLPI state, which needs the vPE + * to be unmapped first, and in this case, we may remap the vPE + * back while the VPT is not empty. So we can't assume that the + * VPT is empty on map. This is why we never advertise PTZ. + */ + its_encode_ptz(cmd, false); + its_encode_vconf_addr(cmd, vconf_addr); + its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi); + +out: + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapp_cmd.vpe); +} + +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) + db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMAPTI); + its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmapti_cmd.vpe); +} + +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u32 db; + + if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) + db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; + else + db = 1023; + + its_encode_cmd(cmd, GITS_CMD_VMOVI); + its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id); + its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id); + its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id); + its_encode_db_phys_id(cmd, db); + its_encode_db_valid(cmd, true); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovi_cmd.vpe); +} + +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; + its_encode_cmd(cmd, GITS_CMD_VMOVP); + its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); + its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); + its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); + its_encode_target(cmd, target); + + if (is_v4_1(its)) { + its_encode_db(cmd, true); + its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi); + } + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vmovp_cmd.vpe); +} + +static struct its_vpe *its_build_vinv_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev, + desc->its_inv_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vint_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_int_cmd.dev, + desc->its_int_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_INT); + its_encode_devid(cmd, desc->its_int_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_int_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_vclear_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + struct its_vlpi_map *map; + + map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev, + desc->its_clear_cmd.event_id); + + its_encode_cmd(cmd, GITS_CMD_CLEAR); + its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_clear_cmd.event_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, map->vpe); +} + +static struct its_vpe *its_build_invdb_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_INVDB); + its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_invdb_cmd.vpe); +} + +static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, + struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + if (WARN_ON(!is_v4_1(its))) + return NULL; + + its_encode_cmd(cmd, GITS_CMD_VSGI); + its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id); + its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi); + its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority); + its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group); + its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear); + its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable); + + its_fixup_cmd(cmd); + + return valid_vpe(its, desc->its_vsgi_cmd.vpe); +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + /* Clear command */ + cmd->raw_cmd[0] = 0; + cmd->raw_cmd[1] = 0; + cmd->raw_cmd[2] = 0; + cmd->raw_cmd[3] = 0; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static int its_wait_for_range_completion(struct its_node *its, + u64 prev_idx, + struct its_cmd_block *to) +{ + u64 rd_idx, to_idx, linear_idx; + u32 count = 1000000; /* 1s! */ + + /* Linearize to_idx if the command set has wrapped around */ + to_idx = its_cmd_ptr_to_offset(its, to); + if (to_idx < prev_idx) + to_idx += ITS_CMD_QUEUE_SZ; + + linear_idx = prev_idx; + + while (1) { + s64 delta; + + rd_idx = readl_relaxed(its->base + GITS_CREADR); + + /* + * Compute the read pointer progress, taking the + * potential wrap-around into account. + */ + delta = rd_idx - prev_idx; + if (rd_idx < prev_idx) + delta += ITS_CMD_QUEUE_SZ; + + linear_idx += delta; + if (linear_idx >= to_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout (%llu %llu)\n", + to_idx, linear_idx); + return -1; + } + prev_idx = rd_idx; + cpu_relax(); + udelay(1); + } + + return 0; +} + +/* Warning, macro hell follows */ +#define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ +void name(struct its_node *its, \ + buildtype builder, \ + struct its_cmd_desc *desc) \ +{ \ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ + synctype *sync_obj; \ + unsigned long flags; \ + u64 rd_idx; \ + \ + raw_spin_lock_irqsave(&its->lock, flags); \ + \ + cmd = its_allocate_entry(its); \ + if (!cmd) { /* We're soooooo screewed... */ \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + return; \ + } \ + sync_obj = builder(its, cmd, desc); \ + its_flush_cmd(its, cmd); \ + \ + if (sync_obj) { \ + sync_cmd = its_allocate_entry(its); \ + if (!sync_cmd) \ + goto post; \ + \ + buildfn(its, sync_cmd, sync_obj); \ + its_flush_cmd(its, sync_cmd); \ + } \ + \ +post: \ + rd_idx = readl_relaxed(its->base + GITS_CREADR); \ + next_cmd = its_post_commands(its); \ + raw_spin_unlock_irqrestore(&its->lock, flags); \ + \ + if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ +} + +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_collection *sync_col) +{ + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, + struct its_collection, its_build_sync_cmd) + +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, + struct its_vpe *sync_vpe) +{ + its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); + its_encode_vpeid(sync_cmd, sync_vpe->vpe_id); + + its_fixup_cmd(sync_cmd); +} + +static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, + struct its_vpe, its_build_vsync_cmd) + +static void its_send_int(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_int_cmd, &desc); +} + +static void its_send_clear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_clear_cmd, &desc); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapti_cmd.dev = dev; + desc.its_mapti_cmd.phys_id = irq_id; + desc.its_mapti_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapti_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +static void its_send_vmapti(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmapti_cmd.vpe = map->vpe; + desc.its_vmapti_cmd.dev = dev; + desc.its_vmapti_cmd.virt_id = map->vintid; + desc.its_vmapti_cmd.event_id = id; + desc.its_vmapti_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc); +} + +static void its_send_vmovi(struct its_device *dev, u32 id) +{ + struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id); + struct its_cmd_desc desc; + + desc.its_vmovi_cmd.vpe = map->vpe; + desc.its_vmovi_cmd.dev = dev; + desc.its_vmovi_cmd.event_id = id; + desc.its_vmovi_cmd.db_enabled = map->db_enabled; + + its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); +} + +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) +{ + struct its_cmd_desc desc; + + desc.its_vmapp_cmd.vpe = vpe; + desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; + + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); +} + +static void its_send_vmovp(struct its_vpe *vpe) +{ + struct its_cmd_desc desc = {}; + struct its_node *its; + unsigned long flags; + int col_id = vpe->col_idx; + + desc.its_vmovp_cmd.vpe = vpe; + + if (!its_list_map) { + its = list_first_entry(&its_nodes, struct its_node, entry); + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + return; + } + + /* + * Yet another marvel of the architecture. If using the + * its_list "feature", we need to make sure that all ITSs + * receive all VMOVP commands in the same order. The only way + * to guarantee this is to make vmovp a serialization point. + * + * Wall <-- Head. + */ + raw_spin_lock_irqsave(&vmovp_lock, flags); + + desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; + desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm); + + /* Emit VMOVPs */ + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (!require_its_list_vmovp(vpe->its_vm, its)) + continue; + + desc.its_vmovp_cmd.col = &its->collections[col_id]; + its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_vinvall_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); +} + +static void its_send_vinv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINV command. This is just a normal INV, + * with a VSYNC instead of a SYNC. + */ + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc); +} + +static void its_send_vint(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VINT command. This is just a normal INT, + * with a VSYNC instead of a SYNC. + */ + desc.its_int_cmd.dev = dev; + desc.its_int_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc); +} + +static void its_send_vclear(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + /* + * There is no real VCLEAR command. This is just a normal CLEAR, + * with a VSYNC instead of a SYNC. + */ + desc.its_clear_cmd.dev = dev; + desc.its_clear_cmd.event_id = event_id; + + its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc); +} + +static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) +{ + struct its_cmd_desc desc; + + desc.its_invdb_cmd.vpe = vpe; + its_send_single_vcommand(its, its_build_invdb_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ +static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + irq_hw_number_t hwirq; + void *va; + u8 *cfg; + + if (map) { + va = page_address(map->vm->vprop_page); + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; + } else { + va = gic_rdists->prop_table_va; + hwirq = d->hwirq; + } + + cfg = va + hwirq - 8192; + *cfg &= ~clr; + *cfg |= set | LPI_PROP_GROUP1; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); + else + dsb(ishst); +} + +static void wait_for_syncr(void __iomem *rdbase) +{ + while (readl_relaxed(rdbase + GICR_SYNCR) & 1) + cpu_relax(); +} + +static void __direct_lpi_inv(struct irq_data *d, u64 val) +{ + void __iomem *rdbase; + unsigned long flags; + int cpu; + + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + + if (map) { + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + WARN_ON(!is_v4_1(its_dev->its)); + + val = GICR_INVLPIR_V; + val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); + val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); + } else { + val = d->hwirq; + } + + __direct_lpi_inv(d, val); +} + +static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + lpi_write_config(d, clr, set); + if (gic_rdists->has_direct_lpi && + (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) + direct_lpi_inv(d); + else if (!irqd_is_forwarded_to_vcpu(d)) + its_send_inv(its_dev, its_get_event_id(d)); + else + its_send_vinv(its_dev, its_get_event_id(d)); +} + +static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + struct its_vlpi_map *map; + + /* + * GICv4.1 does away with the per-LPI nonsense, nothing to do + * here. + */ + if (is_v4_1(its_dev->its)) + return; + + map = dev_event_to_vlpi_map(its_dev, event); + + if (map->db_enabled == enable) + return; + + map->db_enabled = enable; + + /* + * More fun with the architecture: + * + * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI + * value or to 1023, depending on the enable bit. But that + * would be issuing a mapping for an /existing/ DevID+EventID + * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI + * to the /same/ vPE, using this opportunity to adjust the + * doorbell. Mouahahahaha. We loves it, Precious. + */ + its_send_vmovi(its_dev, event); +} + +static void its_mask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, false); + + lpi_update_config(d, LPI_PROP_ENABLED, 0); +} + +static void its_unmask_irq(struct irq_data *d) +{ + if (irqd_is_forwarded_to_vcpu(d)) + its_vlpi_set_doorbell(d, true); + + lpi_update_config(d, 0, LPI_PROP_ENABLED); +} + +static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + + return atomic_read(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_inc_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_inc(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static void its_dec_lpi_count(struct irq_data *d, int cpu) +{ + if (irqd_affinity_is_managed(d)) + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->managed); + else + atomic_dec(&per_cpu_ptr(&cpu_lpi_count_ft2500, cpu)->unmanaged); +} + +static unsigned int cpumask_pick_least_loaded(struct irq_data *d, + const struct cpumask *cpu_mask) +{ + unsigned int cpu = nr_cpu_ids, tmp; + int count = S32_MAX; + + for_each_cpu(tmp, cpu_mask) { + int this_count = its_read_lpi_count(d, tmp); + + if (this_count < count) { + cpu = tmp; + count = this_count; + } + } + + return cpu; +} + +/* + * As suggested by Thomas Gleixner in: + * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de + */ +static int its_select_cpu(struct irq_data *d, + const struct cpumask *aff_mask) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + static DEFINE_RAW_SPINLOCK(tmpmask_lock); + static struct cpumask __tmpmask; + struct cpumask *tmpmask; + unsigned long flags; + int cpu, node; + + node = its_dev->its->numa_node; + tmpmask = &__tmpmask; + + raw_spin_lock_irqsave(&tmpmask_lock, flags); + + if (!irqd_affinity_is_managed(d)) { + /* First try the NUMA node */ + if (node != NUMA_NO_NODE) { + /* + * Try the intersection of the affinity mask and the + * node mask (and the online mask, just to be safe). + */ + cpumask_and(tmpmask, cpumask_of_node(node), aff_mask); + cpumask_and(tmpmask, tmpmask, cpu_online_mask); + + /* + * Ideally, we would check if the mask is empty, and + * try again on the full node here. + * + * But it turns out that the way ACPI describes the + * affinity for ITSs only deals about memory, and + * not target CPUs, so it cannot describe a single + * ITS placed next to two NUMA nodes. + * + * Instead, just fallback on the online mask. This + * diverges from Thomas' suggestion above. + */ + cpu = cpumask_pick_least_loaded(d, tmpmask); + if (cpu < nr_cpu_ids) + goto out; + + /* If we can't cross sockets, give up */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) + goto out; + + /* If the above failed, expand the search */ + } + + /* Try the intersection of the affinity and online masks */ + cpumask_and(tmpmask, aff_mask, cpu_online_mask); + + /* If that doesn't fly, the online mask is the last resort */ + if (cpumask_empty(tmpmask)) + cpumask_copy(tmpmask, cpu_online_mask); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } else { + cpumask_copy(tmpmask, aff_mask); + + /* If we cannot cross sockets, limit the search to that node */ + if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && + node != NUMA_NO_NODE) + cpumask_and(tmpmask, tmpmask, cpumask_of_node(node)); + + cpu = cpumask_pick_least_loaded(d, tmpmask); + } +out: + raw_spin_unlock_irqrestore(&tmpmask_lock, flags); + + pr_debug("IRQ%d -> %*pbl CPU%d\n", d->irq, cpumask_pr_args(aff_mask), cpu); + return cpu; +} + +#define MAX_MARS3_SKT_COUNT 8 + +static int its_cpumask_select(struct its_device *its_dev, + const struct cpumask *mask_val, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) { + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + } + + cpu = cpumask_any_and(mask_val, cpu_mask); + cpus = cpus + cpu % skt_cpu_cnt[skt_id]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + + return cpus; +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + const struct cpumask *cpu_mask = cpu_online_mask; + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + int prev_cpu; + unsigned int skt_t1, skt_t2, cpu_idx; + + /* A forwarded interrupt should use irq_set_vcpu_affinity */ + if (irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + prev_cpu = its_dev->event_map.col_map[id]; + its_dec_lpi_count(d, prev_cpu); + + cpu_idx = its_cpumask_select(its_dev, mask_val, cpu_mask); + skt_t1 = (cpu_logical_map(cpu_idx) >> 16) & 0xff; + if (!force) + cpu = its_select_cpu(d, mask_val); + else + cpu = cpumask_pick_least_loaded(d, mask_val); + skt_t2 = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_t1 != skt_t2) + cpu = cpu_idx; + + if (cpu < 0 || cpu >= nr_cpu_ids) + goto err; + + /* don't set the affinity when the target cpu is same as current one */ + if (cpu != prev_cpu) { + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->event_map.col_map[id] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + } + + its_inc_lpi_count(d, cpu); + + return IRQ_SET_MASK_OK_DONE; + +err: + its_inc_lpi_count(d, prev_cpu); + return -EINVAL; +} + +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->get_msi_base(its_dev); + + msg->address_lo = lower_32_bits(addr); + msg->address_hi = upper_32_bits(addr); + msg->data = its_get_event_id(d); +} + +static int its_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (irqd_is_forwarded_to_vcpu(d)) { + if (state) + its_send_vint(its_dev, event); + else + its_send_vclear(its_dev, event); + } else { + if (state) + its_send_int(its_dev, event); + else + its_send_clear(its_dev, event); + } + + return 0; +} + +static int its_irq_retrigger(struct irq_data *d) +{ + return !its_irq_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +/* + * Two favourable cases: + * + * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times + * for vSGI delivery + * + * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough + * and we're better off mapping all VPEs always + * + * If neither (a) nor (b) is true, then we map vPEs on demand. + * + */ +static bool gic_requires_eager_mapping(void) +{ + if (!its_list_map || gic_rdists->has_rvpeid) + return true; + + return false; +} + +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (gic_requires_eager_mapping()) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + if (!info->map) + return -EINVAL; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm) { + struct its_vlpi_map *maps; + + maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps), + GFP_ATOMIC); + if (!maps) { + ret = -ENOMEM; + goto out; + } + + its_dev->event_map.vm = info->map->vm; + its_dev->event_map.vlpi_maps = maps; + } else if (its_dev->event_map.vm != info->map->vm) { + ret = -EINVAL; + goto out; + } + + /* Get our private copy of the mapping information */ + its_dev->event_map.vlpi_maps[event] = *info->map; + + if (irqd_is_forwarded_to_vcpu(d)) { + /* Already mapped, move it around */ + its_send_vmovi(its_dev, event); + } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + + /* Drop the physical mapping */ + its_send_discard(its_dev, event); + + /* and install the virtual one */ + its_send_vmapti(its_dev, event); + + /* Increment the number of VLPIs */ + its_dev->event_map.nr_vlpis++; + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_vlpi_map *map; + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + map = get_vlpi_map(d); + + if (!its_dev->event_map.vm || !map) { + ret = -EINVAL; + goto out; + } + + /* Copy our mapping information to the incoming request */ + *info->map = *map; + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_unmap(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + int ret = 0; + + raw_spin_lock(&its_dev->event_map.vlpi_lock); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { + ret = -EINVAL; + goto out; + } + + /* Drop the virtual mapping */ + its_send_discard(its_dev, event); + + /* and restore the physical one */ + irqd_clr_forwarded_to_vcpu(d); + its_send_mapti(its_dev, d->hwirq, event); + lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO | + LPI_PROP_ENABLED | + LPI_PROP_GROUP1)); + + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + + /* + * Drop the refcount and make the device available again if + * this was the last VLPI. + */ + if (!--its_dev->event_map.nr_vlpis) { + its_dev->event_map.vm = NULL; + kfree(its_dev->event_map.vlpi_maps); + } + +out: + raw_spin_unlock(&its_dev->event_map.vlpi_lock); + return ret; +} + +static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + + if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) + return -EINVAL; + + if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) + lpi_update_config(d, 0xff, info->config); + else + lpi_write_config(d, 0xff, info->config); + its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED)); + + return 0; +} + +static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + /* Need a v4 ITS */ + if (!is_v4(its_dev->its)) + return -EINVAL; + + /* Unmap request? */ + if (!info) + return its_vlpi_unmap(d); + + switch (info->cmd_type) { + case MAP_VLPI: + return its_vlpi_map(d, info); + + case GET_VLPI: + return its_vlpi_get(d, info); + + case PROP_UPDATE_VLPI: + case PROP_UPDATE_AND_INV_VLPI: + return its_vlpi_prop_update(d, info); + + default: + return -EINVAL; + } +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, + .irq_set_irqchip_state = its_irq_set_irqchip_state, + .irq_retrigger = its_irq_retrigger, + .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, +}; + + +/* + * How we allocate LPIs: + * + * lpi_range_list contains ranges of LPIs that are to available to + * allocate from. To allocate LPIs, just pick the first range that + * fits the required allocation, and reduce it by the required + * amount. Once empty, remove the range from the list. + * + * To free a range of LPIs, add a free range to the list, sort it and + * merge the result if the new range happens to be adjacent to an + * already free block. + * + * The consequence of the above is that allocation is cost is low, but + * freeing is expensive. We assumes that freeing rarely occurs. + */ +#define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ + +static DEFINE_MUTEX(lpi_range_lock); +static LIST_HEAD(lpi_range_list); + +struct lpi_range { + struct list_head entry; + u32 base_id; + u32 span; +}; + +static struct lpi_range *mk_lpi_range(u32 base, u32 span) +{ + struct lpi_range *range; + + range = kmalloc(sizeof(*range), GFP_KERNEL); + if (range) { + range->base_id = base; + range->span = span; + } + + return range; +} + +static int alloc_lpi_range(u32 nr_lpis, u32 *base) +{ + struct lpi_range *range, *tmp; + int err = -ENOSPC; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { + if (range->span >= nr_lpis) { + *base = range->base_id; + range->base_id += nr_lpis; + range->span -= nr_lpis; + + if (range->span == 0) { + list_del(&range->entry); + kfree(range); + } + + err = 0; + break; + } + } + + mutex_unlock(&lpi_range_lock); + + pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis); + return err; +} + +static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) +{ + if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) + return; + if (a->base_id + a->span != b->base_id) + return; + b->base_id = a->base_id; + b->span += a->span; + list_del(&a->entry); + kfree(a); +} + +static int free_lpi_range(u32 base, u32 nr_lpis) +{ + struct lpi_range *new, *old; + + new = mk_lpi_range(base, nr_lpis); + if (!new) + return -ENOMEM; + + mutex_lock(&lpi_range_lock); + + list_for_each_entry_reverse(old, &lpi_range_list, entry) { + if (old->base_id < base) + break; + } + /* + * old is the last element with ->base_id smaller than base, + * so new goes right after it. If there are no elements with + * ->base_id smaller than base, &old->entry ends up pointing + * at the head of the list, and inserting new it the start of + * the list is the right thing to do in that case as well. + */ + list_add(&new->entry, &old->entry); + /* + * Now check if we can merge with the preceding and/or + * following ranges. + */ + merge_lpi_ranges(old, new); + merge_lpi_ranges(new, list_next_entry(new, entry)); + + mutex_unlock(&lpi_range_lock); + return 0; +} + +static int __init its_lpi_init(u32 id_bits) +{ + u32 lpis = (1UL << id_bits) - 8192; + u32 numlpis; + int err; + + numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); + + if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { + lpis = numlpis; + pr_info("ITS: Using hypervisor restricted LPI range [%u]\n", + lpis); + } + + /* + * Initializing the allocator is just the same as freeing the + * full range of LPIs. + */ + err = free_lpi_range(8192, lpis); + pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis); + return err; +} + +static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int err = 0; + + do { + err = alloc_lpi_range(nr_irqs, base); + if (!err) + break; + + nr_irqs /= 2; + } while (nr_irqs > 0); + + if (!nr_irqs) + err = -ENOSPC; + + if (err) + goto out; + + bitmap = bitmap_zalloc(nr_irqs, GFP_ATOMIC); + if (!bitmap) + goto out; + + *nr_ids = nr_irqs; + +out: + if (!bitmap) + *base = *nr_ids = 0; + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) +{ + WARN_ON(free_lpi_range(base, nr_ids)); + bitmap_free(bitmap); +} + +static void gic_reset_prop_table(void *va) +{ + /* Priority 0xa0, Group-1, disabled */ + memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); +} + +static struct page *its_allocate_prop_table(gfp_t gfp_flags) +{ + struct page *prop_page; + + prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ)); + if (!prop_page) + return NULL; + + gic_reset_prop_table(page_address(prop_page)); + + return prop_page; +} + +static void its_free_prop_table(struct page *prop_page) +{ + free_pages((unsigned long)page_address(prop_page), + get_order(LPI_PROPBASE_SZ)); +} + +static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) +{ + phys_addr_t start, end, addr_end; + u64 i; + + /* + * We don't bother checking for a kdump kernel as by + * construction, the LPI tables are out of this kernel's + * memory map. + */ + if (is_kdump_kernel()) + return true; + + addr_end = addr + size - 1; + + for_each_reserved_mem_range(i, &start, &end) { + if (addr >= start && addr_end <= end) + return true; + } + + /* Not found, not a good sign... */ + pr_warn("GIC-2500: Expected reserved range [%pa:%pa], not found\n", + &addr, &addr_end); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + return false; +} + +static int gic_reserve_range(phys_addr_t addr, unsigned long size) +{ + if (efi_enabled(EFI_CONFIG_TABLES)) + return efi_mem_reserve_persistent(addr, size); + + return 0; +} + +static int __init its_setup_lpi_prop_table(void) +{ + if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { + u64 val; + + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; + + gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); + gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ, + MEMREMAP_WB); + gic_reset_prop_table(gic_rdists->prop_table_va); + } else { + struct page *page; + + lpi_id_bits = min_t(u32, + GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), + ITS_MAX_LPI_NRBITS); + page = its_allocate_prop_table(GFP_NOWAIT); + if (!page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + gic_rdists->prop_table_pa = page_to_phys(page); + gic_rdists->prop_table_va = page_address(page); + WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, + LPI_PROPBASE_SZ)); + } + + pr_info("GIC-2500: using LPI property table @%pa\n", + &gic_rdists->prop_table_pa); + + return its_lpi_init(lpi_id_bits); +} + +static const char * const its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static u64 its_read_baser(struct its_node *its, struct its_baser *baser) +{ + u32 idx = baser - its->tables; + + return gits_read_baser(its->base + GITS_BASER + (idx << 3)); +} + +static void its_write_baser(struct its_node *its, struct its_baser *baser, + u64 val) +{ + u32 idx = baser - its->tables; + + gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); + baser->val = its_read_baser(its, baser); +} + +static int its_setup_baser(struct its_node *its, struct its_baser *baser, + u64 cache, u64 shr, u32 order, bool indirect) +{ + u64 val = its_read_baser(its, baser); + u64 esz = GITS_BASER_ENTRY_SIZE(val); + u64 type = GITS_BASER_TYPE(val); + u64 baser_phys, tmp; + u32 alloc_pages, psz; + struct page *page; + void *base; + + psz = baser->psz; + alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); + if (alloc_pages > GITS_BASER_PAGES_MAX) { + pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n", + &its->phys_base, its_base_type_string[type], + alloc_pages, GITS_BASER_PAGES_MAX); + alloc_pages = GITS_BASER_PAGES_MAX; + order = get_order(GITS_BASER_PAGES_MAX * psz); + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); + if (!page) + return -ENOMEM; + + base = (void *)page_address(page); + baser_phys = virt_to_phys(base); + + /* Check if the physical address of the memory is above 48bits */ + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { + + /* 52bit PA is supported only when PageSize=64K */ + if (psz != SZ_64K) { + pr_err("ITS: no 52bit PA support when psz=%d\n", psz); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + /* Convert 52bit PA to 48bit field */ + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); + } + +retry_baser: + val = (baser_phys | + (type << GITS_BASER_TYPE_SHIFT) | + ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | + cache | + shr | + GITS_BASER_VALID); + + val |= indirect ? GITS_BASER_INDIRECT : 0x0; + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + its_write_baser(its, baser, val); + tmp = baser->val; + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_BASER_SHAREABILITY_MASK; + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. If that's zero, make it + * non-cacheable as well. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + if (!shr) { + cache = GITS_BASER_nC; + gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); + } + goto retry_baser; + } + + if (val != tmp) { + pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n", + &its->phys_base, its_base_type_string[type], + val, tmp); + free_pages((unsigned long)base, order); + return -ENXIO; + } + + baser->order = order; + baser->base = base; + baser->psz = psz; + tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; + + pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n", + &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + indirect ? "indirect" : "flat", (int)esz, + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + + return 0; +} + +static bool its_parse_indirect_baser(struct its_node *its, + struct its_baser *baser, + u32 *order, u32 ids) +{ + u64 tmp = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(tmp); + u64 esz = GITS_BASER_ENTRY_SIZE(tmp); + u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; + u32 new_order = *order; + u32 psz = baser->psz; + bool indirect = false; + + /* No need to enable Indirection if memory requirement < (psz*2)bytes */ + if ((esz << ids) > (psz * 2)) { + /* + * Find out whether hw supports a single or two-level table by + * table by reading bit at offset '62' after writing '1' to it. + */ + its_write_baser(its, baser, val | GITS_BASER_INDIRECT); + indirect = !!(baser->val & GITS_BASER_INDIRECT); + + if (indirect) { + /* + * The size of the lvl2 table is equal to ITS page size + * which is 'psz'. For computing lvl1 table size, + * subtract ID bits that sparse lvl2 table from 'ids' + * which is reported by ITS hardware times lvl1 table + * entry size. + */ + ids -= ilog2(psz / (int)esz); + esz = GITS_LVL1_ENTRY_SIZE; + } + } + + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory if two-level device table + * feature is not supported by hardware. + */ + new_order = max_t(u32, get_order(esz << ids), new_order); + if (new_order > MAX_ORDER) { + new_order = MAX_ORDER; + ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); + pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n", + &its->phys_base, its_base_type_string[type], + device_ids(its), ids); + } + + *order = new_order; + + return indirect; +} + +static u32 compute_common_aff(u64 val) +{ + u32 aff, clpiaff; + + aff = FIELD_GET(GICR_TYPER_AFFINITY, val); + clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); + + return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); +} + +static u32 compute_its_aff(struct its_node *its) +{ + u64 val; + u32 svpet; + + /* + * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute + * the resulting affinity. We then use that to see if this match + * our own affinity. + */ + svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); + val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); + return compute_common_aff(val); +} + +static struct its_node *find_sibling_its(struct its_node *cur_its) +{ + struct its_node *its; + u32 aff; + + if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) + return NULL; + + aff = compute_its_aff(cur_its); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser; + + if (!is_v4_1(its) || its == cur_its) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + return its; + } + + return NULL; +} + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i].base) { + free_pages((unsigned long)its->tables[i].base, + its->tables[i].order); + its->tables[i].base = NULL; + } + } +} + +static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) +{ + u64 psz = SZ_64K; + + while (psz) { + u64 val, gpsz; + + val = its_read_baser(its, baser); + val &= ~GITS_BASER_PAGE_SIZE_MASK; + + switch (psz) { + case SZ_64K: + gpsz = GITS_BASER_PAGE_SIZE_64K; + break; + case SZ_16K: + gpsz = GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_4K: + default: + gpsz = GITS_BASER_PAGE_SIZE_4K; + break; + } + + gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; + + val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); + its_write_baser(its, baser, val); + + if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) + break; + + switch (psz) { + case SZ_64K: + psz = SZ_16K; + break; + case SZ_16K: + psz = SZ_4K; + break; + case SZ_4K: + default: + return -1; + } + } + + baser->psz = psz; + return 0; +} + +static int its_alloc_tables(struct its_node *its) +{ + u64 shr = GITS_BASER_InnerShareable; + u64 cache = GITS_BASER_RaWaWb; + int err, i; + + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = its->tables + i; + u64 val = its_read_baser(its, baser); + u64 type = GITS_BASER_TYPE(val); + bool indirect = false; + u32 order; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + if (its_probe_baser_psz(its, baser)) { + its_free_tables(its); + return -ENXIO; + } + + order = get_order(baser->psz); + + switch (type) { + case GITS_BASER_TYPE_DEVICE: + indirect = its_parse_indirect_baser(its, baser, &order, + device_ids(its)); + break; + + case GITS_BASER_TYPE_VCPU: + if (is_v4_1(its)) { + struct its_node *sibling; + + WARN_ON(i != 2); + sibling = find_sibling_its(its); + if (sibling != NULL) { + *baser = sibling->tables[2]; + its_write_baser(its, baser, baser->val); + continue; + } + } + + indirect = its_parse_indirect_baser(its, baser, &order, + ITS_MAX_VPEID_BITS); + break; + } + + err = its_setup_baser(its, baser, cache, shr, order, indirect); + if (err < 0) { + its_free_tables(its); + return err; + } + + /* Update settings which will be used for next BASERn */ + cache = baser->val & GITS_BASER_CACHEABILITY_MASK; + shr = baser->val & GITS_BASER_SHAREABILITY_MASK; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_its(void) +{ + struct its_node *its; + u64 val; + u32 aff; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + list_for_each_entry(its, &its_nodes, entry) { + u64 baser, addr; + + if (!is_v4_1(its)) + continue; + + if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) + continue; + + if (aff != compute_its_aff(its)) + continue; + + /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ + baser = its->tables[2].val; + if (!(baser & GITS_BASER_VALID)) + continue; + + /* We have a winner! */ + gic_data_rdist()->vpe_l1_base = its->tables[2].base; + + val = GICR_VPROPBASER_4_1_VALID; + if (baser & GITS_BASER_INDIRECT) + val |= GICR_VPROPBASER_4_1_INDIRECT; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, + FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); + switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { + case GIC_PAGE_SIZE_64K: + addr = GITS_BASER_ADDR_48_to_52(baser); + break; + default: + addr = baser & GENMASK_ULL(47, 12); + break; + } + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); + val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, + FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, + FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); + + return val; + } + + return 0; +} + +static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) +{ + u32 aff; + u64 val; + int cpu; + + val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + aff = compute_common_aff(val); + + for_each_possible_cpu(cpu) { + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + + if (!base || cpu == smp_processor_id()) + continue; + + val = gic_read_typer(base + GICR_TYPER); + if (aff != compute_common_aff(val)) + continue; + + /* + * At this point, we have a victim. This particular CPU + * has already booted, and has an affinity that matches + * ours wrt CommonLPIAff. Let's use its own VPROPBASER. + * Make sure we don't write the Z bit in that case. + */ + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_Z; + + gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; + *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; + + return val; + } + + return 0; +} + +static bool allocate_vpe_l2_table(int cpu, u32 id) +{ + void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; + unsigned int psz, esz, idx, npg, gpsz; + u64 val; + struct page *page; + __le64 *table; + + if (!gic_rdists->has_rvpeid) + return true; + + /* Skip non-present CPUs */ + if (!base) + return true; + + val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); + + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; + + switch (gpsz) { + default: + WARN_ON(1); + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* Don't allow vpe_id that exceeds single, flat table limit */ + if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) + return (id < (npg * psz / (esz * SZ_8))); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(psz / (esz * SZ_8)); + if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = gic_data_rdist_cpu(cpu)->vpe_l1_base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to RD hardware */ + dsb(sy); + } + + return true; +} + +static int allocate_vpe_l1_table(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val, gpsz, npg, pa; + unsigned int psz = SZ_64K; + unsigned int np, epp, esz; + struct page *page; + + if (!gic_rdists->has_rvpeid) + return 0; + + /* + * if VPENDBASER.Valid is set, disable any previously programmed + * VPE by setting PendingLast while clearing Valid. This has the + * effect of making sure no doorbell will be generated and we can + * then safely clear VPROPBASER.Valid. + */ + if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + vlpi_base + GICR_VPENDBASER); + + /* + * If we can inherit the configuration from another RD, let's do + * so. Otherwise, we have to go through the allocation process. We + * assume that all RDs have the exact same requirements, as + * nothing will work otherwise. + */ + val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_ATOMIC); + if (!gic_data_rdist()->vpe_table_mask) + return -ENOMEM; + + val = inherit_vpe_l1_table_from_its(); + if (val & GICR_VPROPBASER_4_1_VALID) + goto out; + + /* First probe the page size */ + val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); + gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); + esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); + + switch (gpsz) { + default: + gpsz = GIC_PAGE_SIZE_4K; + fallthrough; + case GIC_PAGE_SIZE_4K: + psz = SZ_4K; + break; + case GIC_PAGE_SIZE_16K: + psz = SZ_16K; + break; + case GIC_PAGE_SIZE_64K: + psz = SZ_64K; + break; + } + + /* + * Start populating the register from scratch, including RO fields + * (which we want to print in debug cases...) + */ + val = 0; + val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); + + /* How many entries per GIC page? */ + esz++; + epp = psz / (esz * SZ_8); + + /* + * If we need more than just a single L1 page, flag the table + * as indirect and compute the number of required L1 pages. + */ + if (epp < ITS_MAX_VPEID) { + int nl2; + + val |= GICR_VPROPBASER_4_1_INDIRECT; + + /* Number of L2 pages required to cover the VPEID space */ + nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); + + /* Number of L1 pages to point to the L2 pages */ + npg = DIV_ROUND_UP(nl2 * SZ_8, psz); + } else { + npg = 1; + } + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); + + /* Right, that's the number of CPU pages we need for L1 */ + np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); + + pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n", + np, npg, psz, epp, esz); + page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, get_order(np * PAGE_SIZE)); + if (!page) + return -ENOMEM; + + gic_data_rdist()->vpe_l1_base = page_address(page); + pa = virt_to_phys(page_address(page)); + WARN_ON(!IS_ALIGNED(pa, psz)); + + val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + val |= GICR_VPROPBASER_4_1_Z; + val |= GICR_VPROPBASER_4_1_VALID; + +out: + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); + + pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n", + smp_processor_id(), val, + cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); + + return 0; +} + +static int its_alloc_collections(struct its_node *its) +{ + int i; + + its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + for (i = 0; i < nr_cpu_ids; i++) + its->collections[i].target_address = ~0ULL; + + return 0; +} + +static struct page *its_allocate_pending_table(gfp_t gfp_flags) +{ + struct page *pend_page; + + pend_page = alloc_pages(gfp_flags | __GFP_ZERO, + get_order(LPI_PENDBASE_SZ)); + if (!pend_page) + return NULL; + + /* Make sure the GIC will observe the zero-ed page */ + gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); + + return pend_page; +} + +static void its_free_pending_table(struct page *pt) +{ + free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ)); +} + +/* + * Booting with kdump and LPIs enabled is generally fine. Any other + * case is wrong in the absence of firmware/EFI support. + */ +static bool enabled_lpis_allowed(void) +{ + phys_addr_t addr; + u64 val; + + if (is_kdump_kernel()) + return true; + + /* Check whether the property table is in a reserved region */ + val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); + addr = val & GENMASK_ULL(51, 12); + + return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); +} + +static int __init allocate_lpi_tables(void) +{ + u64 val; + int err, cpu; + + /* + * If LPIs are enabled while we run this from the boot CPU, + * flag the RD tables as pre-allocated if the stars do align. + */ + val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); + if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { + gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | + RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); + pr_info("GIC-2500: Using preallocated redistributor tables\n"); + } + + err = its_setup_lpi_prop_table(); + if (err) + return err; + + /* + * We allocate all the pending tables anyway, as we may have a + * mix of RDs that have had LPIs enabled, and some that + * don't. We'll free the unused ones as each CPU comes online. + */ + for_each_possible_cpu(cpu) { + struct page *pend_page; + + pend_page = its_allocate_pending_table(GFP_NOWAIT); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu); + return -ENOMEM; + } + + gic_data_rdist_cpu(cpu)->pend_page = pend_page; + } + + return 0; +} + +static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) +{ + u32 count = 1000000; /* 1s! */ + bool clean; + u64 val; + + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + val &= ~GICR_VPENDBASER_Valid; + val &= ~clr; + val |= set; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); + + do { + val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); + clean = !(val & GICR_VPENDBASER_Dirty); + if (!clean) { + count--; + cpu_relax(); + udelay(1); + } + } while (!clean && count); + + if (unlikely(val & GICR_VPENDBASER_Dirty)) { + pr_err_ratelimited("ITS virtual pending table not cleaning\n"); + val |= GICR_VPENDBASER_PendingLast; + } + + return val; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + phys_addr_t paddr; + u64 val, tmp; + + if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) + return; + + val = readl_relaxed(rbase + GICR_CTLR); + if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && + (val & GICR_CTLR_ENABLE_LPIS)) { + /* + * Check that we get the same property table on all + * RDs. If we don't, this is hopeless. + */ + paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); + paddr &= GENMASK_ULL(51, 12); + if (WARN_ON(gic_rdists->prop_table_pa != paddr)) + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); + paddr &= GENMASK_ULL(51, 16); + + WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); + gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; + + goto out; + } + + pend_page = gic_data_rdist()->pend_page; + paddr = page_to_phys(pend_page); + + /* set PROPBASE */ + val = (gic_rdists->prop_table_pa | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_RaWaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | + GICR_PROPBASER_CACHEABILITY_MASK); + val |= GICR_PROPBASER_nC; + gicr_write_propbaser(val, rbase + GICR_PROPBASER); + } + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PENDBASER_InnerShareable | + GICR_PENDBASER_RaWaWb); + + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); + + if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; + + if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must remove the + * cacheability attributes as well. + */ + val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | + GICR_PENDBASER_CACHEABILITY_MASK); + val |= GICR_PENDBASER_nC; + gicr_write_pendbaser(val, rbase + GICR_PENDBASER); + } + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + + /* + * It's possible for CPU to receive VLPIs before it is + * scheduled as a vPE, especially for the first CPU, and the + * VLPI with INTID larger than 2^(IDbits+1) will be considered + * as out of range and dropped by GIC. + * So we initialize IDbits to known value to avoid VLPI drop. + */ + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n", + smp_processor_id(), val); + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + /* + * Also clear Valid bit of GICR_VPENDBASER, in case some + * ancient programming gets left in and has possibility of + * corrupting memory. + */ + val = its_clear_vpend_valid(vlpi_base, 0, 0); + } + + if (allocate_vpe_l1_table()) { + /* + * If the allocation has failed, we're in massive trouble. + * Disable direct injection, and pray that no VM was + * already running... + */ + gic_rdists->has_rvpeid = false; + gic_rdists->has_vlpis = false; + } + + /* Make sure the GIC has seen the above */ + dsb(sy); +out: + gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; + pr_info("GIC-2500: CPU%d: using %s LPI pending table @%pa\n", + smp_processor_id(), + gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? + "reserved" : "allocated", + &paddr); +} + +static void its_cpu_init_collection(struct its_node *its) +{ + int cpu = smp_processor_id(); + u64 target; + unsigned long mpid; + phys_addr_t its_phys_base; + unsigned long skt_id; + + /* avoid cross node collections and its mapping */ + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { + struct device_node *cpu_node; + + cpu_node = of_get_cpu_node(cpu, NULL); + if (its->numa_node != NUMA_NO_NODE && + its->numa_node != of_node_to_nid(cpu_node)) + return; + } + + mpid = cpu_logical_map(cpu); + its_phys_base = its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* This ITS wants a linear CPU number. */ + target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target) << 16; + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu % 64; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); +} + +static void its_cpu_init_collections(void) +{ + struct its_node *its; + + raw_spin_lock(&its_lock); + + list_for_each_entry(its, &its_nodes, entry) + its_cpu_init_collection(its); + + raw_spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + unsigned long flags; + + raw_spin_lock_irqsave(&its->lock, flags); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock_irqrestore(&its->lock, flags); + + return its_dev; +} + +static struct its_baser *its_get_baser(struct its_node *its, u32 type) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (GITS_BASER_TYPE(its->tables[i].val) == type) + return &its->tables[i]; + } + + return NULL; +} + +static bool its_alloc_table_entry(struct its_node *its, + struct its_baser *baser, u32 id) +{ + struct page *page; + u32 esz, idx; + __le64 *table; + + /* Don't allow device id that exceeds single, flat table limit */ + esz = GITS_BASER_ENTRY_SIZE(baser->val); + if (!(baser->val & GITS_BASER_INDIRECT)) + return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); + + /* Compute 1st level table index & check if that exceeds table limit */ + idx = id >> ilog2(baser->psz / esz); + if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) + return false; + + table = baser->base; + + /* Allocate memory for 2nd level table */ + if (!table[idx]) { + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(baser->psz)); + if (!page) + return false; + + /* Flush Lvl2 table to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(page_address(page), baser->psz); + + table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); + + /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ + if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) + gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); + + /* Ensure updated table contents are visible to ITS hardware */ + dsb(sy); + } + + return true; +} + +static bool its_alloc_device_table(struct its_node *its, u32 dev_id) +{ + struct its_baser *baser; + + baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); + + /* Don't allow device id that exceeds ITS hardware limit */ + if (!baser) + return (ilog2(dev_id) < device_ids(its)); + + return its_alloc_table_entry(its, baser, dev_id); +} + +static bool its_alloc_vpe_table(u32 vpe_id) +{ + struct its_node *its; + int cpu; + + /* + * Make sure the L2 tables are allocated on *all* v4 ITSs. We + * could try and only do it on ITSs corresponding to devices + * that have interrupts targeted at this VPE, but the + * complexity becomes crazy (and you have tons of memory + * anyway, right?). + */ + list_for_each_entry(its, &its_nodes, entry) { + struct its_baser *baser; + + if (!is_v4(its)) + continue; + + baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); + if (!baser) + return false; + + if (!its_alloc_table_entry(its, baser, vpe_id)) + return false; + } + + /* Non v4.1? No need to iterate RDs and go back early. */ + if (!gic_rdists->has_rvpeid) + return true; + + /* + * Make sure the L2 tables are allocated for all copies of + * the L1 table on *all* v4.1 RDs. + */ + for_each_possible_cpu(cpu) { + if (!allocate_vpe_l2_table(cpu, vpe_id)) + return false; + } + + return true; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs, bool alloc_lpis) +{ + struct its_device *dev; + unsigned long *lpi_map = NULL; + unsigned long flags; + u16 *col_map = NULL; + void *itt; + int lpi_base; + int nr_lpis; + int nr_ites; + int sz; + + if (!its_alloc_device_table(its, dev_id)) + return NULL; + + if (WARN_ON(!is_power_of_2(nvecs))) + nvecs = roundup_pow_of_two(nvecs); + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + /* + * Even if the device wants a single LPI, the ITT must be + * sized as a power of two (and you need at least one bit...). + */ + nr_ites = max(2, nvecs); + sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); + if (alloc_lpis) { + lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); + if (lpi_map) + col_map = kcalloc(nr_lpis, sizeof(*col_map), + GFP_KERNEL); + } else { + col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL); + nr_lpis = 0; + lpi_base = 0; + } + + if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { + kfree(dev); + kfree(itt); + bitmap_free(lpi_map); + kfree(col_map); + return NULL; + } + + gic_flush_dcache_to_poc(itt, sz); + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nr_ites; + dev->event_map.lpi_map = lpi_map; + dev->event_map.col_map = col_map; + dev->event_map.lpi_base = lpi_base; + dev->event_map.nr_lpis = nr_lpis; + raw_spin_lock_init(&dev->event_map.vlpi_lock); + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock_irqsave(&its->lock, flags); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock_irqrestore(&its->lock, flags); + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); + list_del(&its_dev->entry); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); + kfree(its_dev->event_map.col_map); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) +{ + int idx; + + /* Find a free LPI region in lpi_map and allocate them. */ + idx = bitmap_find_free_region(dev->event_map.lpi_map, + dev->event_map.nr_lpis, + get_count_order(nvecs)); + if (idx < 0) + return -ENOSPC; + + *hwirq = dev->event_map.lpi_base + idx; + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct its_node *its; + struct its_device *its_dev; + struct msi_domain_info *msi_info; + u32 dev_id; + int err = 0; + + /* + * We ignore "dev" entirely, and rely on the dev_id that has + * been passed via the scratchpad. This limits this domain's + * usefulness to upper layers that definitely know that they + * are built on top of the ITS. + */ + dev_id = info->scratchpad[0].ul; + + msi_info = msi_get_domain_info(domain); + its = msi_info->data; + + if (!gic_rdists->has_direct_lpi && + vpe_proxy.dev && + vpe_proxy.dev->its == its && + dev_id == vpe_proxy.dev->device_id) { + /* Bad luck. Get yourself a better implementation */ + WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n", + dev_id); + return -EINVAL; + } + + mutex_lock(&its->dev_alloc_lock); + its_dev = its_find_device(its, dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + its_dev->shared = true; + pr_debug("Reusing ITT for devID %x\n", dev_id); + goto out; + } + + its_dev = its_create_device(its, dev_id, nvec, true); + if (!its_dev) { + err = -ENOMEM; + goto out; + } + + if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) + its_dev->shared = true; + + pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec)); +out: + mutex_unlock(&its->dev_alloc_lock); + info->scratchpad[0].ptr = its_dev; + return err; +} + +static struct msi_domain_ops its_msi_domain_ops = { + .msi_prepare = its_msi_prepare, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct irq_fwspec fwspec; + + if (irq_domain_get_of_node(domain->parent)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = GIC_IRQ_TYPE_LPI; + fwspec.param[1] = hwirq; + fwspec.param[2] = IRQ_TYPE_EDGE_RISING; + } else if (is_fwnode_irqchip(domain->parent->fwnode)) { + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 2; + fwspec.param[0] = hwirq; + fwspec.param[1] = IRQ_TYPE_EDGE_RISING; + } else { + return -EINVAL; + } + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + struct its_node *its = its_dev->its; + struct irq_data *irqd; + irq_hw_number_t hwirq; + int err; + int i; + + err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq); + if (err) + return err; + + err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev)); + if (err) + return err; + + for (i = 0; i < nr_irqs; i++) { + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq + i, &its_irq_chip, its_dev); + irqd = irq_get_irq_data(virq + i); + irqd_set_single_target(irqd); + irqd_set_affinity_on_activate(irqd); + irqd_set_resend_when_in_progress(irqd); + pr_debug("ID:%d pID:%d vID:%d\n", + (int)(hwirq + i - its_dev->event_map.lpi_base), + (int)(hwirq + i), virq + i); + } + + return 0; +} + +static int its_cpumask_first(struct its_device *its_dev, + const struct cpumask *cpu_mask) +{ + unsigned int skt, skt_id, i; + phys_addr_t its_phys_base; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SKT_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + its_phys_base = its_dev->its->phys_base; + skt_id = (its_phys_base >> 41) & 0x7; + + if (skt_id != 0) + for (i = 0; i < skt_id; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_first(cpu_mask); + if ((cpu > cpus) && (cpu < (cpus + skt_cpu_cnt[skt_id]))) + cpus = cpu; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (skt_id == skt) + return cpu; + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SKT_COUNT)) { + if (skt_id == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + + return cpus; +} + +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + const struct cpumask *cpu_mask = cpu_online_mask; + int cpu; + + cpu = its_cpumask_first(its_dev, cpu_mask); + + if (cpu < 0 || cpu >= nr_cpu_ids) + return -EINVAL; + + its_inc_lpi_count(d, cpu); + its_dev->event_map.col_map[event] = cpu; + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + /* Map the GIC IRQ and event to the device */ + its_send_mapti(its_dev, d->hwirq, event); + return 0; +} + +static void its_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + u32 event = its_get_event_id(d); + + its_dec_lpi_count(d, its_dev->event_map.col_map[event]); + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its = its_dev->its; + int i; + + bitmap_release_region(its_dev->event_map.lpi_map, + its_get_event_id(irq_domain_get_irq_data(domain, virq)), + get_count_order(nr_irqs)); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(data); + } + + mutex_lock(&its->dev_alloc_lock); + + /* + * If all interrupts have been freed, start mopping the + * floor. This is conditioned on the device not being shared. + */ + if (!its_dev->shared && + bitmap_empty(its_dev->event_map.lpi_map, + its_dev->event_map.nr_lpis)) { + its_lpi_free(its_dev->event_map.lpi_map, + its_dev->event_map.lpi_base, + its_dev->event_map.nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + mutex_unlock(&its->dev_alloc_lock); + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, + .activate = its_irq_domain_activate, + .deactivate = its_irq_domain_deactivate, +}; + +/* + * This is insane. + * + * If a GICv4.0 doesn't implement Direct LPIs (which is extremely + * likely), the only way to perform an invalidate is to use a fake + * device to issue an INV command, implying that the LPI has first + * been mapped to some event on that device. Since this is not exactly + * cheap, we try to keep that mapping around as long as possible, and + * only issue an UNMAP if we're short on available slots. + * + * Broken by design(tm). + * + * GICv4.1, on the other hand, mandates that we're able to invalidate + * by writing to a MMIO register. It doesn't implement the whole of + * DirectLPI, but that's good enough. And most of the time, we don't + * even have to invalidate anything, as the redistributor can be told + * whether to generate a doorbell or not (we thus leave it enabled, + * always). + */ +static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already unmapped? */ + if (vpe->vpe_proxy_event == -1) + return; + + its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event); + vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; + + /* + * We don't track empty slots at all, so let's move the + * next_victim pointer if we can quickly reuse that slot + * instead of nuking an existing entry. Not clear that this is + * always a win though, and this might just generate a ripple + * effect... Let's just hope VPEs don't migrate too often. + */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + vpe_proxy.next_victim = vpe->vpe_proxy_event; + + vpe->vpe_proxy_event = -1; +} + +static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (!gic_rdists->has_direct_lpi) { + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + its_vpe_db_proxy_unmap_locked(vpe); + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); + } +} + +static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) +{ + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + /* Already mapped? */ + if (vpe->vpe_proxy_event != -1) + return; + + /* This slot was already allocated. Kick the other VPE out. */ + if (vpe_proxy.vpes[vpe_proxy.next_victim]) + its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]); + + /* Map the new VPE instead */ + vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; + vpe->vpe_proxy_event = vpe_proxy.next_victim; + vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; + + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; + its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event); +} + +static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) +{ + unsigned long flags; + struct its_collection *target_col; + + /* GICv4.1 doesn't use a proxy, so nothing to do here */ + if (gic_rdists->has_rvpeid) + return; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + + return; + } + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + + target_col = &vpe_proxy.dev->its->collections[to]; + its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event); + vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static int its_vpe_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + int from, cpu = cpumask_first(mask_val); + unsigned long flags; + + /* + * Changing affinity is mega expensive, so let's be as lazy as + * we can and only do it if we really have to. Also, if mapped + * into the proxy device, we need to move the doorbell + * interrupt to its new location. + * + * Another thing is that changing the affinity of a vPE affects + * *other interrupts* such as all the vLPIs that are routed to + * this vPE. This means that the irq_desc lock is not enough to + * protect us, and that we must ensure nobody samples vpe->col_idx + * during the update, hence the lock below which must also be + * taken on any vLPI handling path that evaluates vpe->col_idx. + */ + from = vpe_to_cpuid_lock(vpe, &flags); + if (from == cpu) + goto out; + + vpe->col_idx = cpu; + + /* + * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD + * is sharing its VPE table with the current one. + */ + if (gic_data_rdist_cpu(cpu)->vpe_table_mask && + cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) + goto out; + + its_send_vmovp(vpe); + its_vpe_db_proxy_move(vpe, from, cpu); + +out: + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + vpe_to_cpuid_unlock(vpe, flags); + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_wait_vpt_parse_complete(void) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (!gic_rdists->has_vpend_valid_dirty) + return; + + WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, + val, + !(val & GICR_VPENDBASER_Dirty), + 1, 500)); +} + +static void its_vpe_schedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + /* Schedule the VPE */ + val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & + GENMASK_ULL(51, 12); + val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; + val |= GICR_VPROPBASER_RaWb; + val |= GICR_VPROPBASER_InnerShareable; + gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); + + val = virt_to_phys(page_address(vpe->vpt_page)) & + GENMASK_ULL(51, 16); + val |= GICR_VPENDBASER_RaWaWb; + val |= GICR_VPENDBASER_InnerShareable; + /* + * There is no good way of finding out if the pending table is + * empty as we can race against the doorbell interrupt very + * easily. So in the end, vpe->pending_last is only an + * indication that the vcpu has something pending, not one + * that the pending table is empty. A good implementation + * would be able to read its coarse map pretty quickly anyway, + * making this a tolerable issue. + */ + val |= GICR_VPENDBASER_PendingLast; + val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; + val |= GICR_VPENDBASER_Valid; + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_deschedule(struct its_vpe *vpe) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + val = its_clear_vpend_valid(vlpi_base, 0, 0); + + vpe->idai = !!(val & GICR_VPENDBASER_IDAI); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); +} + +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + +static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_schedule(vpe); + return 0; + + case DESCHEDULE_VPE: + its_vpe_deschedule(vpe); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static void its_vpe_send_cmd(struct its_vpe *vpe, + void (*cmd)(struct its_device *, u32)) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&vpe_proxy.lock, flags); + + its_vpe_db_proxy_map_locked(vpe); + cmd(vpe_proxy.dev, vpe->vpe_proxy_event); + + raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); +} + +static void its_vpe_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else + its_vpe_send_cmd(vpe, its_send_inv); +} + +static void its_vpe_mask_irq(struct irq_data *d) +{ + /* + * We need to unmask the LPI, which is described by the parent + * irq_data. Instead of calling into the parent (which won't + * exactly do the right thing, let's simply use the + * parent_data pointer. Yes, I'm naughty. + */ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_send_inv(d); +} + +static void its_vpe_unmask_irq(struct irq_data *d) +{ + /* Same hack as above... */ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_send_inv(d); +} + +static int its_vpe_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (gic_rdists->has_direct_lpi) { + void __iomem *rdbase; + + rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; + if (state) { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); + } else { + gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); + wait_for_syncr(rdbase); + } + } else { + if (state) + its_vpe_send_cmd(vpe, its_send_int); + else + its_vpe_send_cmd(vpe, its_send_clear); + } + + return 0; +} + +static int its_vpe_retrigger(struct irq_data *d) +{ + return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true); +} + +static struct irq_chip its_vpe_irq_chip = { + .name = "GICv4-vpe", + .irq_mask = its_vpe_mask_irq, + .irq_unmask = its_vpe_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_retrigger = its_vpe_retrigger, + .irq_set_irqchip_state = its_vpe_set_irqchip_state, + .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, +}; + +static struct its_node *find_4_1_its(void) +{ + static struct its_node *its; + + if (!its) { + list_for_each_entry(its, &its_nodes, entry) { + if (is_v4_1(its)) + return its; + } + + /* Oops? */ + its = NULL; + } + + return its; +} + +static void its_vpe_4_1_send_inv(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * GICv4.1 wants doorbells to be invalidated using the + * INVDB command in order to be broadcast to all RDs. Send + * it to the first valid ITS, and let the HW do its magic. + */ + its = find_4_1_its(); + if (its) + its_send_invdb(its, vpe); +} + +static void its_vpe_4_1_mask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_unmask_irq(struct irq_data *d) +{ + lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED); + its_vpe_4_1_send_inv(d); +} + +static void its_vpe_4_1_schedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val = 0; + + /* Schedule the VPE */ + val |= GICR_VPENDBASER_Valid; + val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; + val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; + val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); + + gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); +} + +static void its_vpe_4_1_deschedule(struct its_vpe *vpe, + struct its_cmd_info *info) +{ + void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); + u64 val; + + if (info->req_db) { + unsigned long flags; + + /* + * vPE is going to block: make the vPE non-resident with + * PendingLast clear and DB set. The GIC guarantees that if + * we read-back PendingLast clear, then a doorbell will be + * delivered when an interrupt comes. + * + * Note the locking to deal with the concurrent update of + * pending_last from the doorbell interrupt handler that can + * run concurrently. + */ + raw_spin_lock_irqsave(&vpe->vpe_lock, flags); + val = its_clear_vpend_valid(vlpi_base, + GICR_VPENDBASER_PendingLast, + GICR_VPENDBASER_4_1_DB); + vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); + raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); + } else { + /* + * We're not blocking, so just make the vPE non-resident + * with PendingLast set, indicating that we'll be back. + */ + val = its_clear_vpend_valid(vlpi_base, + 0, + GICR_VPENDBASER_PendingLast); + vpe->pending_last = true; + } +} + +static void its_vpe_4_1_invall(struct its_vpe *vpe) +{ + void __iomem *rdbase; + unsigned long flags; + u64 val; + int cpu; + + val = GICR_INVALLR_V; + val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); + + /* Target the redistributor this vPE is currently known on */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVALLR); + + wait_for_syncr(rdbase); + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); +} + +static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case SCHEDULE_VPE: + its_vpe_4_1_schedule(vpe, info); + return 0; + + case DESCHEDULE_VPE: + its_vpe_4_1_deschedule(vpe, info); + return 0; + + case COMMIT_VPE: + its_wait_vpt_parse_complete(); + return 0; + + case INVALL_VPE: + its_vpe_4_1_invall(vpe); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_vpe_4_1_irq_chip = { + .name = "GICv4.1-vpe", + .irq_mask = its_vpe_4_1_mask_irq, + .irq_unmask = its_vpe_4_1_unmask_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = its_vpe_set_affinity, + .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, +}; + +static void its_configure_sgi(struct irq_data *d, bool clear) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_desc desc; + + desc.its_vsgi_cmd.vpe = vpe; + desc.its_vsgi_cmd.sgi = d->hwirq; + desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; + desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; + desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; + desc.its_vsgi_cmd.clear = clear; + + /* + * GICv4.1 allows us to send VSGI commands to any ITS as long as the + * destination VPE is mapped there. Since we map them eagerly at + * activation time, we're pretty sure the first GICv4.1 ITS will do. + */ + its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc); +} + +static void its_sgi_mask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); +} + +static void its_sgi_unmask_irq(struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + vpe->sgi_config[d->hwirq].enabled = true; + its_configure_sgi(d, false); +} + +static int its_sgi_set_affinity(struct irq_data *d, + const struct cpumask *mask_val, + bool force) +{ + /* + * There is no notion of affinity for virtual SGIs, at least + * not on the host (since they can only be targeting a vPE). + * Tell the kernel we've done whatever it asked for. + */ + irq_data_update_effective_affinity(d, mask_val); + return IRQ_SET_MASK_OK; +} + +static int its_sgi_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, + bool state) +{ + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + if (state) { + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its = find_4_1_its(); + u64 val; + + val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); + val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); + writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); + } else { + its_configure_sgi(d, true); + } + + return 0; +} + +static int its_sgi_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + void __iomem *base; + unsigned long flags; + u32 count = 1000000; /* 1s! */ + u32 status; + int cpu; + + if (which != IRQCHIP_STATE_PENDING) + return -EINVAL; + + /* + * Locking galore! We can race against two different events: + * + * - Concurrent vPE affinity change: we must make sure it cannot + * happen, or we'll talk to the wrong redistributor. This is + * identical to what happens with vLPIs. + * + * - Concurrent VSGIPENDR access: As it involves accessing two + * MMIO registers, this must be made atomic one way or another. + */ + cpu = vpe_to_cpuid_lock(vpe, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; + writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); + do { + status = readl_relaxed(base + GICR_VSGIPENDR); + if (!(status & GICR_VSGIPENDR_BUSY)) + goto out; + + count--; + if (!count) { + pr_err_ratelimited("Unable to get SGI status\n"); + goto out; + } + cpu_relax(); + udelay(1); + } while (count); + +out: + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + vpe_to_cpuid_unlock(vpe, flags); + + if (!count) + return -ENXIO; + + *val = !!(status & (1 << d->hwirq)); + + return 0; +} + +static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_cmd_info *info = vcpu_info; + + switch (info->cmd_type) { + case PROP_UPDATE_VSGI: + vpe->sgi_config[d->hwirq].priority = info->priority; + vpe->sgi_config[d->hwirq].group = info->group; + its_configure_sgi(d, false); + return 0; + + default: + return -EINVAL; + } +} + +static struct irq_chip its_sgi_irq_chip = { + .name = "GICv4.1-sgi", + .irq_mask = its_sgi_mask_irq, + .irq_unmask = its_sgi_unmask_irq, + .irq_set_affinity = its_sgi_set_affinity, + .irq_set_irqchip_state = its_sgi_set_irqchip_state, + .irq_get_irqchip_state = its_sgi_get_irqchip_state, + .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, +}; + +static int its_sgi_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *args) +{ + struct its_vpe *vpe = args; + int i; + + /* Yes, we do want 16 SGIs */ + WARN_ON(nr_irqs != 16); + + for (i = 0; i < 16; i++) { + vpe->sgi_config[i].priority = 0; + vpe->sgi_config[i].enabled = false; + vpe->sgi_config[i].group = false; + + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + &its_sgi_irq_chip, vpe); + irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY); + } + + return 0; +} + +static void its_sgi_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + /* Nothing to do */ +} + +static int its_sgi_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + /* Write out the initial SGI configuration */ + its_configure_sgi(d, false); + return 0; +} + +static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + + /* + * The VSGI command is awkward: + * + * - To change the configuration, CLEAR must be set to false, + * leaving the pending bit unchanged. + * - To clear the pending bit, CLEAR must be set to true, leaving + * the configuration unchanged. + * + * You just can't do both at once, hence the two commands below. + */ + vpe->sgi_config[d->hwirq].enabled = false; + its_configure_sgi(d, false); + its_configure_sgi(d, true); +} + +static const struct irq_domain_ops its_sgi_domain_ops = { + .alloc = its_sgi_irq_domain_alloc, + .free = its_sgi_irq_domain_free, + .activate = its_sgi_irq_domain_activate, + .deactivate = its_sgi_irq_domain_deactivate, +}; + +static int its_vpe_id_alloc(void) +{ + return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); +} + +static void its_vpe_id_free(u16 id) +{ + ida_simple_remove(&its_vpeid_ida, id); +} + +static int its_vpe_init(struct its_vpe *vpe) +{ + struct page *vpt_page; + int vpe_id; + + /* Allocate vpe_id */ + vpe_id = its_vpe_id_alloc(); + if (vpe_id < 0) + return vpe_id; + + /* Allocate VPT */ + vpt_page = its_allocate_pending_table(GFP_KERNEL); + if (!vpt_page) { + its_vpe_id_free(vpe_id); + return -ENOMEM; + } + + if (!its_alloc_vpe_table(vpe_id)) { + its_vpe_id_free(vpe_id); + its_free_pending_table(vpt_page); + return -ENOMEM; + } + + raw_spin_lock_init(&vpe->vpe_lock); + vpe->vpe_id = vpe_id; + vpe->vpt_page = vpt_page; + if (gic_rdists->has_rvpeid) + atomic_set(&vpe->vmapp_count, 0); + else + vpe->vpe_proxy_event = -1; + + return 0; +} + +static void its_vpe_teardown(struct its_vpe *vpe) +{ + its_vpe_db_proxy_unmap(vpe); + its_vpe_id_free(vpe->vpe_id); + its_free_pending_table(vpe->vpt_page); +} + +static void its_vpe_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct its_vm *vm = domain->host_data; + int i; + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + struct its_vpe *vpe = irq_data_get_irq_chip_data(data); + + BUG_ON(vm != vpe->its_vm); + + clear_bit(data->hwirq, vm->db_bitmap); + its_vpe_teardown(vpe); + irq_domain_reset_irq_data(data); + } + + if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) { + its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis); + its_free_prop_table(vm->vprop_page); + } +} + +static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct irq_chip *irqchip = &its_vpe_irq_chip; + struct its_vm *vm = args; + unsigned long *bitmap; + struct page *vprop_page; + int base, nr_ids, i, err = 0; + + BUG_ON(!vm); + + bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids); + if (!bitmap) + return -ENOMEM; + + if (nr_ids < nr_irqs) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vprop_page = its_allocate_prop_table(GFP_KERNEL); + if (!vprop_page) { + its_lpi_free(bitmap, base, nr_ids); + return -ENOMEM; + } + + vm->db_bitmap = bitmap; + vm->db_lpi_base = base; + vm->nr_db_lpis = nr_ids; + vm->vprop_page = vprop_page; + + if (gic_rdists->has_rvpeid) + irqchip = &its_vpe_4_1_irq_chip; + + for (i = 0; i < nr_irqs; i++) { + vm->vpes[i]->vpe_db_lpi = base + i; + err = its_vpe_init(vm->vpes[i]); + if (err) + break; + err = its_irq_gic_domain_alloc(domain, virq + i, + vm->vpes[i]->vpe_db_lpi); + if (err) + break; + irq_domain_set_hwirq_and_chip(domain, virq + i, i, + irqchip, vm->vpes[i]); + set_bit(i, bitmap); + irqd_set_resend_when_in_progress(irq_get_irq_data(virq + i)); + } + + if (err) { + if (i > 0) + its_vpe_irq_domain_free(domain, virq, i); + + its_lpi_free(bitmap, base, nr_ids); + its_free_prop_table(vprop_page); + } + + return err; +} + +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool reserve) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we issue VMAPP on demand... Unless + * we're on a GICv4.1 and we eagerly map the VPE on all ITSs + * so that VSGIs can work. + */ + if (!gic_requires_eager_mapping()) + return 0; + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; +} + +static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, + struct irq_data *d) +{ + struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map on GICv4.0, we unmap the VPE once no + * VLPIs are associated with the VM. + */ + if (!gic_requires_eager_mapping()) + return; + + list_for_each_entry(its, &its_nodes, entry) { + if (!is_v4(its)) + continue; + + its_send_vmapp(its, vpe, false); + } + + /* + * There may be a direct read to the VPT after unmapping the + * vPE, to guarantee the validity of this, we make the VPT + * memory coherent with the CPU caches here. + */ + if (find_4_1_its() && !atomic_read(&vpe->vmapp_count)) + gic_flush_dcache_to_poc(page_address(vpe->vpt_page), + LPI_PENDBASE_SZ); +} + +static const struct irq_domain_ops its_vpe_domain_ops = { + .alloc = its_vpe_irq_domain_alloc, + .free = its_vpe_irq_domain_free, + .activate = its_vpe_irq_domain_activate, + .deactivate = its_vpe_irq_domain_deactivate, +}; + +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + /* + * GIC architecture specification requires the ITS to be both + * disabled and quiescent for writes to GITS_BASER or + * GITS_CBASER to not have UNPREDICTABLE results. + */ + if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) +{ + struct its_node *its = data; + + /* erratum 22375: only alloc 8MB table size (20 bits) */ + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; +} + +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; +} + +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +{ + struct its_node *its = data; + + /* On QDF2400, the size of the ITE is 16Bytes */ + its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; + its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (device_ids(its) > ids) { + its->typer &= ~GITS_TYPER_DEVBITS; + its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); + } + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; +} + +static bool __maybe_unused its_enable_rk3588001(void *data) +{ + struct its_node *its = data; + + if (!of_machine_is_compatible("rockchip,rk3588") && + !of_machine_is_compatible("rockchip,rk3588s")) + return false; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + + return true; +} + +static bool its_set_non_coherent(void *data) +{ + struct its_node *its = data; + + its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk its_quirks[] = { +#ifdef CONFIG_CAVIUM_ERRATUM_22375 + { + .desc = "ITS: Cavium errata 22375, 24313", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_22375, + }, +#endif +#ifdef CONFIG_CAVIUM_ERRATUM_23144 + { + .desc = "ITS: Cavium erratum 23144", + .iidr = 0xa100034c, /* ThunderX pass 1.x */ + .mask = 0xffff0fff, + .init = its_enable_quirk_cavium_23144, + }, +#endif +#ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 + { + .desc = "ITS: QDF2400 erratum 0065", + .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ + .mask = 0xffffffff, + .init = its_enable_quirk_qdf2400_e0065, + }, +#endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif +#ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 + { + .desc = "ITS: Rockchip erratum RK3588001", + .iidr = 0x0201743b, + .mask = 0xffffffff, + .init = its_enable_rk3588001, + }, +#endif + { + .desc = "ITS: non-coherent attribute", + .property = "dma-noncoherent", + .init = its_set_non_coherent, + }, + { + } +}; + +static void its_enable_quirks(struct its_node *its) +{ + u32 iidr = readl_relaxed(its->base + GITS_IIDR); + + gic_enable_quirks(iidr, its_quirks, its); + + if (is_of_node(its->fwnode_handle)) + gic_enable_of_quirks(to_of_node(its->fwnode_handle), + its_quirks, its); +} + +static int its_save_disable(void) +{ + struct its_node *its; + int err = 0; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + its->ctlr_save = readl_relaxed(base + GITS_CTLR); + err = its_force_quiescent(base); + if (err) { + pr_err("ITS@%pa: failed to quiesce: %d\n", + &its->phys_base, err); + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + goto err; + } + + its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); + } + +err: + if (err) { + list_for_each_entry_continue_reverse(its, &its_nodes, entry) { + void __iomem *base; + + base = its->base; + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + } + } + raw_spin_unlock(&its_lock); + + return err; +} + +static void its_restore_enable(void) +{ + struct its_node *its; + int ret; + + raw_spin_lock(&its_lock); + list_for_each_entry(its, &its_nodes, entry) { + void __iomem *base; + int i; + + base = its->base; + + /* + * Make sure that the ITS is disabled. If it fails to quiesce, + * don't restore it since writing to CBASER or BASER + * registers is undefined according to the GIC v3 ITS + * Specification. + * + * Firmware resuming with the ITS enabled is terminally broken. + */ + WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); + ret = its_force_quiescent(base); + if (ret) { + pr_err("ITS@%pa: failed to quiesce on resume: %d\n", + &its->phys_base, ret); + continue; + } + + gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); + + /* + * Writing CBASER resets CREADR to 0, so make CWRITER and + * cmd_write line up with it. + */ + its->cmd_write = its->cmd_base; + gits_write_cwriter(0, base + GITS_CWRITER); + + /* Restore GITS_BASER from the value cache. */ + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + struct its_baser *baser = &its->tables[i]; + + if (!(baser->val & GITS_BASER_VALID)) + continue; + + its_write_baser(its, baser, baser->val); + } + writel_relaxed(its->ctlr_save, base + GITS_CTLR); + + /* + * Reinit the collection if it's stored in the ITS. This is + * indicated by the col_id being less than the HCC field. + * CID < HCC as specified in the GIC v3 Documentation. + */ + if (its->collections[smp_processor_id()].col_id < + GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) + its_cpu_init_collection(its); + } + raw_spin_unlock(&its_lock); +} + +static struct syscore_ops its_syscore_ops = { + .suspend = its_save_disable, + .resume = its_restore_enable, +}; + +static void __init __iomem *its_map_one(struct resource *res, int *err) +{ + void __iomem *its_base; + u32 val; + + its_base = ioremap(res->start, SZ_64K); + if (!its_base) { + pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start); + *err = -ENOMEM; + return NULL; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start); + *err = -ENODEV; + goto out_unmap; + } + + *err = its_force_quiescent(its_base); + if (*err) { + pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start); + goto out_unmap; + } + + return its_base; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static int its_init_domain(struct its_node *its) +{ + struct irq_domain *inner_domain; + struct msi_domain_info *info; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->ops = &its_msi_domain_ops; + info->data = its; + + inner_domain = irq_domain_create_hierarchy(its_parent, + its->msi_domain_flags, 0, + its->fwnode_handle, &its_domain_ops, + info); + if (!inner_domain) { + kfree(info); + return -ENOMEM; + } + + irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); + + return 0; +} + +static int its_init_vpe_domain(void) +{ + struct its_node *its; + u32 devid; + int entries; + + if (gic_rdists->has_direct_lpi) { + pr_info("ITS: Using DirectLPI for VPE invalidation\n"); + return 0; + } + + /* Any ITS will do, even if not v4 */ + its = list_first_entry(&its_nodes, struct its_node, entry); + + entries = roundup_pow_of_two(nr_cpu_ids); + vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes), + GFP_KERNEL); + if (!vpe_proxy.vpes) + return -ENOMEM; + + /* Use the last possible DevID */ + devid = GENMASK(device_ids(its) - 1, 0); + vpe_proxy.dev = its_create_device(its, devid, entries, false); + if (!vpe_proxy.dev) { + kfree(vpe_proxy.vpes); + pr_err("ITS: Can't allocate GICv4 proxy device\n"); + return -ENOMEM; + } + + BUG_ON(entries > vpe_proxy.dev->nr_ites); + + raw_spin_lock_init(&vpe_proxy.lock); + vpe_proxy.next_victim = 0; + pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n", + devid, vpe_proxy.dev->nr_ites); + + return 0; +} + +static int __init its_compute_its_list_map(struct its_node *its) +{ + int its_number; + u32 ctlr; + + /* + * This is assumed to be done early enough that we're + * guaranteed to be single-threaded, hence no + * locking. Should this change, we should address + * this. + */ + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { + pr_err("ITS@%pa: No ITSList entry available!\n", + &its->phys_base); + return -EINVAL; + } + + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr &= ~GITS_CTLR_ITS_NUMBER; + ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; + writel_relaxed(ctlr, its->base + GITS_CTLR); + ctlr = readl_relaxed(its->base + GITS_CTLR); + if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { + its_number = ctlr & GITS_CTLR_ITS_NUMBER; + its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; + } + + if (test_and_set_bit(its_number, &its_list_map)) { + pr_err("ITS@%pa: Duplicate ITSList entry %d\n", + &its->phys_base, its_number); + return -EINVAL; + } + + return its_number; +} + +static int __init its_probe_one(struct its_node *its) +{ + u64 baser, tmp; + struct page *page; + u32 ctlr; + int err; + + if (is_v4(its)) { + if (!(its->typer & GITS_TYPER_VMOVP)) { + err = its_compute_its_list_map(its); + if (err < 0) + goto out; + + its->list_nr = err; + + pr_info("ITS@%pa: Using ITS number %d\n", + &its->phys_base, err); + } else { + pr_info("ITS@%pa: Single VMOVP capable\n", &its->phys_base); + } + + if (is_v4_1(its)) { + u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); + + its->sgir_base = ioremap(its->phys_base + SZ_128K, SZ_64K); + if (!its->sgir_base) { + err = -ENOMEM; + goto out; + } + + its->mpidr = readl_relaxed(its->base + GITS_MPIDR); + + pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n", + &its->phys_base, its->mpidr, svpet); + } + } + + page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, + get_order(ITS_CMD_QUEUE_SZ)); + if (!page) { + err = -ENOMEM; + goto out_unmap_sgir; + } + its->cmd_base = (void *)page_address(page); + its->cmd_write = its->cmd_base; + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_RaWaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + gits_write_cbaser(baser, its->base + GITS_CBASER); + tmp = gits_read_cbaser(its->base + GITS_CBASER); + + if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) + tmp &= ~GITS_CBASER_SHAREABILITY_MASK; + + if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { + if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { + /* + * The HW reports non-shareable, we must + * remove the cacheability attributes as + * well. + */ + baser &= ~(GITS_CBASER_SHAREABILITY_MASK | + GITS_CBASER_CACHEABILITY_MASK); + baser |= GITS_CBASER_nC; + gits_write_cbaser(baser, its->base + GITS_CBASER); + } + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + gits_write_cwriter(0, its->base + GITS_CWRITER); + ctlr = readl_relaxed(its->base + GITS_CTLR); + ctlr |= GITS_CTLR_ENABLE; + if (is_v4(its)) + ctlr |= GITS_CTLR_ImDe; + writel_relaxed(ctlr, its->base + GITS_CTLR); + + err = its_init_domain(its); + if (err) + goto out_free_tables; + + raw_spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + raw_spin_unlock(&its_lock); + + return 0; + +out_free_tables: + its_free_tables(its); +out_free_cmd: + free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ)); +out_unmap_sgir: + if (its->sgir_base) + iounmap(its->sgir_base); +out: + pr_err("ITS@%pa: failed probing (%d)\n", &its->phys_base, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +static int redist_disable_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + u64 timeout = USEC_PER_SEC; + u64 val; + + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + val = readl_relaxed(rbase + GICR_CTLR); + if (!(val & GICR_CTLR_ENABLE_LPIS)) + return 0; + + /* + * If coming via a CPU hotplug event, we don't need to disable + * LPIs before trying to re-enable them. They are already + * configured and all is well in the world. + * + * If running with preallocated tables, there is nothing to do. + */ + if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || + (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) + return 0; + + /* + * From that point on, we only try to do some damage control. + */ + pr_warn("GIC-2500: CPU%d: Booted with LPIs enabled, memory probably corrupted\n", + smp_processor_id()); + add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); + + /* Disable LPIs */ + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure any change to GICR_CTLR is observable by the GIC */ + dsb(sy); + + /* + * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs + * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. + * Error out if we time out waiting for RWP to clear. + */ + while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { + if (!timeout) { + pr_err("CPU%d: Timeout while disabling LPIs\n", + smp_processor_id()); + return -ETIMEDOUT; + } + udelay(1); + timeout--; + } + + /* + * After it has been written to 1, it is IMPLEMENTATION + * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be + * cleared to 0. Error out if clearing the bit failed. + */ + if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { + pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id()); + return -EBUSY; + } + + return 0; +} + +int phytium_its_cpu_init(void) +{ + if (!list_empty(&its_nodes)) { + int ret; + + ret = redist_disable_lpis(); + if (ret) + return ret; + + its_cpu_init_lpis(); + its_cpu_init_collections(); + } + + return 0; +} + +static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) +{ + cpuhp_remove_state_nocalls(gic_rdists->cpuhp_memreserve_state); + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; +} + +static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, + rdist_memreserve_cpuhp_cleanup_workfn); + +/* Mark all the BASER registers as invalid before they get reprogrammed */ +static int __init its_reset_one(struct resource *res) +{ + void __iomem *its_base; + int err, i; + + its_base = its_map_one(res, &err); + if (!its_base) + return err; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) + gits_write_baser(0, its_base + GITS_BASER + (i << 3)); + + iounmap(its_base); + return 0; +} + +static const struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-phytium-2500-its", }, + {}, +}; + +static struct its_node __init *its_node_init(struct resource *res, + struct fwnode_handle *handle, int numa_node) +{ + void __iomem *its_base; + struct its_node *its; + int err; + + its_base = its_map_one(res, &err); + if (!its_base) + return NULL; + + pr_info("ITS %pR\n", res); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) + goto out_unmap; + + raw_spin_lock_init(&its->lock); + mutex_init(&its->dev_alloc_lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + + its->typer = gic_read_typer(its_base + GITS_TYPER); + its->base = its_base; + its->phys_base = res->start; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; + + its->numa_node = numa_node; + its->fwnode_handle = handle; + + return its; + +out_unmap: + iounmap(its_base); + return NULL; +} + +static void its_node_destroy(struct its_node *its) +{ + iounmap(its->base); + kfree(its); +} + +static int __init its_of_probe(struct device_node *node) +{ + struct device_node *np; + struct resource res; + int err; + + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + if (!of_device_is_available(np) || + !of_property_read_bool(np, "msi-controller") || + of_address_to_resource(np, 0, &res)) + continue; + + err = its_reset_one(&res); + if (err) + return err; + } + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + struct its_node *its; + + if (!of_device_is_available(np)) + continue; + if (!of_property_read_bool(np, "msi-controller")) { + pr_warn("%pOF: no msi-controller property, ITS ignored\n", + np); + continue; + } + + if (of_address_to_resource(np, 0, &res)) { + pr_warn("%pOF: no regs?\n", np); + continue; + } + + + its = its_node_init(&res, &np->fwnode, of_node_to_nid(np)); + if (!its) + return -ENOMEM; + + its_enable_quirks(its); + err = its_probe_one(its); + if (err) { + its_node_destroy(its); + return err; + } + } + return 0; +} + +#ifdef CONFIG_ACPI + +#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) + +#ifdef CONFIG_ACPI_NUMA +struct its_srat_map { + /* numa node id */ + u32 numa_node; + /* GIC ITS ID */ + u32 its_id; +}; + +static struct its_srat_map *its_srat_maps __initdata; +static int its_in_srat __initdata; + +static int __init acpi_get_its_numa_node(u32 its_id) +{ + int i; + + for (i = 0; i < its_in_srat; i++) { + if (its_id == its_srat_maps[i].its_id) + return its_srat_maps[i].numa_node; + } + return NUMA_NO_NODE; +} + +static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + return 0; +} + +static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + int node; + struct acpi_srat_gic_its_affinity *its_affinity; + + its_affinity = (struct acpi_srat_gic_its_affinity *)header; + if (!its_affinity) + return -EINVAL; + + if (its_affinity->header.length < sizeof(*its_affinity)) { + pr_err("SRAT: Invalid header length %d in ITS affinity\n", + its_affinity->header.length); + return -EINVAL; + } + + /* + * Note that in theory a new proximity node could be created by this + * entry as it is an SRAT resource allocation structure. + * We do not currently support doing so. + */ + node = pxm_to_node(its_affinity->proximity_domain); + + if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { + pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node); + return 0; + } + + its_srat_maps[its_in_srat].numa_node = node; + its_srat_maps[its_in_srat].its_id = its_affinity->its_id; + its_in_srat++; + pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n", + its_affinity->proximity_domain, its_affinity->its_id, node); + + return 0; +} + +static void __init acpi_table_parse_srat_its(void) +{ + int count; + + count = acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_match_srat_its, 0); + if (count <= 0) + return; + + its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map), + GFP_KERNEL); + if (!its_srat_maps) + return; + + acpi_table_parse_entries(ACPI_SIG_SRAT, + sizeof(struct acpi_table_srat), + ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, + gic_acpi_parse_srat_its, 0); +} + +/* free the its_srat_maps after ITS probing */ +static void __init acpi_its_srat_maps_free(void) +{ + kfree(its_srat_maps); +} +#else +static void __init acpi_table_parse_srat_its(void) { } +static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } +static void __init acpi_its_srat_maps_free(void) { } +#endif + +static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct fwnode_handle *dom_handle; + struct its_node *its; + struct resource res; + int err; + + its_entry = (struct acpi_madt_generic_translator *)header; + memset(&res, 0, sizeof(res)); + res.start = its_entry->base_address; + res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; + res.flags = IORESOURCE_MEM; + + dom_handle = irq_domain_alloc_fwnode(&res.start); + if (!dom_handle) { + pr_err("ITS@%pa: Unable to allocate GIC-phytium-2500 ITS domain token\n", + &res.start); + return -ENOMEM; + } + + err = iort_register_domain_token(its_entry->translation_id, res.start, + dom_handle); + if (err) { + pr_err("ITS@%pa: Unable to register GIC-phytium-2500 ITS domain token (ITS ID %d) to IORT\n", + &res.start, its_entry->translation_id); + goto dom_err; + } + + its = its_node_init(&res, dom_handle, + acpi_get_its_numa_node(its_entry->translation_id)); + if (!its) { + err = -ENOMEM; + goto node_err; + } + + err = its_probe_one(its); + if (!err) + return 0; + +node_err: + iort_deregister_domain_token(its_entry->translation_id); +dom_err: + irq_domain_free_fwnode(dom_handle); + return err; +} + +static int __init its_acpi_reset(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_translator *its_entry; + struct resource res; + + its_entry = (struct acpi_madt_generic_translator *)header; + res = (struct resource) { + .start = its_entry->base_address, + .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, + .flags = IORESOURCE_MEM, + }; + + return its_reset_one(&res); +} + +static void __init its_acpi_probe(void) +{ + acpi_table_parse_srat_its(); + /* + * Make sure *all* the ITS are reset before we probe any, as + * they may be sharing memory. If any of the ITS fails to + * reset, don't even try to go any further, as this could + * result in something even worse. + */ + if (acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + its_acpi_reset, 0) > 0) + acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR, + gic_acpi_parse_madt_its, 0); + acpi_its_srat_maps_free(); +} +#else +static void __init its_acpi_probe(void) { } +#endif + +int __init phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *of_node; + struct its_node *its; + bool has_v4 = false; + bool has_v4_1 = false; + int err; + + gic_rdists = rdists; + + its_parent = parent_domain; + of_node = to_of_node(handle); + if (of_node) + its_of_probe(of_node); + else + its_acpi_probe(); + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + err = allocate_lpi_tables(); + if (err) + return err; + + list_for_each_entry(its, &its_nodes, entry) { + has_v4 |= is_v4(its); + has_v4_1 |= is_v4_1(its); + } + + /* Don't bother with inconsistent systems */ + if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) + rdists->has_rvpeid = false; + + if (has_v4 & rdists->has_vlpis) { + const struct irq_domain_ops *sgi_ops; + + if (has_v4_1) + sgi_ops = &its_sgi_domain_ops; + else + sgi_ops = NULL; + + if (its_init_vpe_domain() || + its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) { + rdists->has_vlpis = false; + pr_err("ITS: Disabling GICv4 support\n"); + } + } + + register_syscore_ops(&its_syscore_ops); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-phytium-2500.c b/drivers/irqchip/irq-gic-phytium-2500.c new file mode 100644 index 0000000000000000000000000000000000000000..dbeeb795b58146d8b01834a41162e1d790f0d0bd --- /dev/null +++ b/drivers/irqchip/irq-gic-phytium-2500.c @@ -0,0 +1,2916 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2022 Phytium Corporation. + * Author: + * Wang Yinfeng + * Chen Baozi + * Chen Siyu + * Cui Fulong + * Li Yuting + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + + +#define pr_fmt(fmt) "GIC-2500: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "irq-gic-common.h" + +#define MAX_MARS3_SOC_COUNT 8 +#define MARS3_ADDR_SKTID_SHIFT 41 + +struct gic_dist_desc { + void __iomem *dist_base; + phys_addr_t phys_base; + unsigned long size; +}; + +static struct gic_dist_desc mars3_gic_dists[MAX_MARS3_SOC_COUNT] __read_mostly; + +static unsigned int mars3_sockets_bitmap = 0x1; + +#define mars3_irq_to_skt(hwirq) (((hwirq) - 32) % 8) + +#define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) + +#define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) +#define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) +#define FLAGS_WORKAROUND_MTK_GICR_SAVE (1ULL << 2) +#define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 3) + +#define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) + +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; + bool single_redist; +}; + +struct gic_chip_data { + struct fwnode_handle *fwnode; + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *redist_regions; + struct rdists rdists; + struct irq_domain *domain; + u64 redist_stride; + u32 nr_redist_regions; + u64 flags; + bool has_rss; + unsigned int ppi_nr; + struct partition_desc **ppi_descs; +}; + +#define T241_CHIPS_MAX 4 +static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; +static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); + +static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); + +static struct gic_chip_data gic_data __read_mostly; +static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); + +#define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) +#define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) +#define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) + +/* + * The behaviours of RPR and PMR registers differ depending on the value of + * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the + * distributor and redistributors depends on whether security is enabled in the + * GIC. + * + * When security is enabled, non-secure priority values from the (re)distributor + * are presented to the GIC CPUIF as follow: + * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; + * + * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure + * EL1 are subject to a similar operation thus matching the priorities presented + * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, + * these values are unchanged by the GIC. + * + * see GICv3/GICv4 Architecture Specification (IHI0069D): + * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt + * priorities. + * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 + * interrupt. + */ +static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis_ft2500); + +extern struct static_key_false gic_nonsecure_priorities; + +/* + * When the Non-secure world has access to group 0 interrupts (as a + * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will + * return the Distributor's view of the interrupt priority. + * + * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority + * written by software is moved to the Non-secure range by the Distributor. + * + * If both are true (which is when gic_nonsecure_priorities gets enabled), + * we need to shift down the priority programmed by software to match it + * against the value returned by ICC_RPR_EL1. + */ +#define GICD_INT_RPR_PRI(priority) \ + ({ \ + u32 __priority = (priority); \ + if (static_branch_unlikely(&gic_nonsecure_priorities)) \ + __priority = 0x80 | (__priority >> 1); \ + \ + __priority; \ + }) + +/* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ +static refcount_t *ppi_nmi_refs; + +static struct gic_kvm_info gic_v3_kvm_info __initdata; +static DEFINE_PER_CPU(bool, has_rss_ft2500); + +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) +#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) + +/* Our default, arbitrary priority value. Linux only uses one anyway. */ +#define DEFAULT_PMR_VALUE 0xf0 + +enum gic_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + EPPI_RANGE, + ESPI_RANGE, + LPI_RANGE, + __INVALID_RANGE__ +}; + +static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) +{ + switch (hwirq) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): + return EPPI_RANGE; + case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): + return ESPI_RANGE; + case 8192 ... GENMASK(23, 0): + return LPI_RANGE; + default: + return __INVALID_RANGE__; + } +} + +static enum gic_intid_range get_intid_range(struct irq_data *d) +{ + return __get_intid_range(d->hwirq); +} + +static inline unsigned int gic_irq(struct irq_data *d) +{ + return d->hwirq; +} + +static inline bool gic_irq_in_rdist(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + return true; + default: + return false; + } +} + +static inline void __iomem *gic_dist_base_alias(struct irq_data *d) +{ + if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { + irq_hw_number_t hwirq = irqd_to_hwirq(d); + u32 chip; + + /* + * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} + * registers are directed to the chip that owns the SPI. The + * the alias region can also be used for writes to the + * GICD_In{E} except GICD_ICENABLERn. Each chip has support + * for 320 {E}SPIs. Mappings for all 4 chips: + * Chip0 = 32-351 + * Chip1 = 352-671 + * Chip2 = 672-991 + * Chip3 = 4096-4415 + */ + switch (__get_intid_range(hwirq)) { + case SPI_RANGE: + chip = (hwirq - 32) / 320; + break; + case ESPI_RANGE: + chip = 3; + break; + default: + unreachable(); + } + return t241_dist_base_alias[chip]; + } + + return gic_data.dist_base; +} + +static inline void __iomem *gic_dist_base(struct irq_data *d) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + /* SGI+PPI -> SGI_base for this CPU */ + return gic_data_rdist_sgi_base(); + + case SPI_RANGE: + case ESPI_RANGE: + /* SPI -> dist_base */ + return gic_data.dist_base; + + default: + return NULL; + } +} + +static void gic_do_wait_for_rwp(void __iomem *base) +{ + u32 count = 1000000; /* 1s! */ + + while (readl_relaxed(base + GICD_CTLR) & GICD_CTLR_RWP) { + count--; + if (!count) { + pr_err_ratelimited("RWP timeout, gone fishing\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +/* Wait for completion of a distributor change */ +static void gic_dist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data.dist_base); +} + +/* Wait for completion of a redistributor change */ +static void gic_redist_wait_for_rwp(void) +{ + gic_do_wait_for_rwp(gic_data_rdist_rd_base()); +} + +#ifdef CONFIG_ARM64 + +static u64 __maybe_unused gic_read_iar(void) +{ + if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) + return gic_read_iar_cavium_thunderx(); + else + return gic_read_iar_common(); +} +#endif + +static void gic_enable_redist(bool enable) +{ + void __iomem *rbase; + u32 count = 1000000; /* 1s! */ + u32 val; + unsigned long mpidr; + int i; + + if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) + return; + + rbase = gic_data_rdist_rd_base(); + + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + } + if (!count) + pr_err_ratelimited("redistributor failed to %s...\n", + enable ? "wakeup" : "sleep"); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if (mpidr & 0xFFFF) // either Aff1 or Aff0 is not zero + return; + + rbase = rbase + 64 * SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable) + /* Wake up this CPU redistributor */ + val &= ~GICR_WAKER_ProcessorSleep; + else + val |= GICR_WAKER_ProcessorSleep; + writel_relaxed(val, rbase + GICR_WAKER); + + if (!enable) { /* Check that GICR_WAKER is writeable */ + val = readl_relaxed(rbase + GICR_WAKER); + if (!(val & GICR_WAKER_ProcessorSleep)) + return; /* No PM support in this redistributor */ + } + + count = 1000000; /* 1s! */ + while (--count) { + val = readl_relaxed(rbase + GICR_WAKER); + if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) + break; + cpu_relax(); + udelay(1); + }; + if (!count) + pr_err_ratelimited("CPU MPIDR 0x%lx: redistributor %d failed to %s...\n", + mpidr, 64 + i, enable ? "wakeup" : "sleep"); + + rbase = rbase + SZ_128K; // next redistributor + } +} + +/* + * Routines to disable, enable, EOI and route interrupts + */ +static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) +{ + switch (get_intid_range(d)) { + case SGI_RANGE: + case PPI_RANGE: + case SPI_RANGE: + *index = d->hwirq; + return offset; + case EPPI_RANGE: + /* + * Contrary to the ESPI range, the EPPI range is contiguous + * to the PPI range in the registers, so let's adjust the + * displacement accordingly. Consistency is overrated. + */ + *index = d->hwirq - EPPI_BASE_INTID + 32; + return offset; + case ESPI_RANGE: + *index = d->hwirq - ESPI_BASE_INTID; + switch (offset) { + case GICD_ISENABLER: + return GICD_ISENABLERnE; + case GICD_ICENABLER: + return GICD_ICENABLERnE; + case GICD_ISPENDR: + return GICD_ISPENDRnE; + case GICD_ICPENDR: + return GICD_ICPENDRnE; + case GICD_ISACTIVER: + return GICD_ISACTIVERnE; + case GICD_ICACTIVER: + return GICD_ICACTIVERnE; + case GICD_IPRIORITYR: + return GICD_IPRIORITYRnE; + case GICD_ICFGR: + return GICD_ICFGRnE; + case GICD_IROUTER: + return GICD_IROUTERnE; + default: + break; + } + break; + default: + break; + } + + WARN_ON(1); + *index = d->hwirq; + return offset; +} + +static int gic_peek_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) + base = gic_data_rdist_sgi_base(); + else { + unsigned int skt; + + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + } + + return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); +} + +static void gic_poke_irq(struct irq_data *d, u32 offset) +{ + void __iomem *base; + + unsigned long mpidr; + void __iomem *rbase; + int i; + unsigned int skt; + u32 index, mask; + + offset = convert_offset_index(d, offset, &index); + mask = 1 << (index % 32); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_redist_wait_for_rwp(); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 are zero + rbase = base + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + writel_relaxed(mask, rbase + offset + (index / 32) * 4); + gic_do_wait_for_rwp(rbase - SZ_64K); // RD from SGI base + rbase = rbase + SZ_128K; + } + } // core 0 of each socket + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + writel_relaxed(mask, base + offset + (index / 32) * 4); + gic_do_wait_for_rwp(base); + } +} + +static void gic_mask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ICENABLER); + if (gic_irq_in_rdist(d)) + gic_redist_wait_for_rwp(); + else + gic_dist_wait_for_rwp(); +} + +static void gic_eoimode1_mask_irq(struct irq_data *d) +{ + gic_mask_irq(d); + /* + * When masking a forwarded interrupt, make sure it is + * deactivated as well. + * + * This ensures that an interrupt that is getting + * disabled/masked will not get "stuck", because there is + * noone to deactivate it (guest is being terminated). + */ + if (irqd_is_forwarded_to_vcpu(d)) + gic_poke_irq(d, GICD_ICACTIVER); +} + +static void gic_unmask_irq(struct irq_data *d) +{ + gic_poke_irq(d, GICD_ISENABLER); +} + +static inline bool gic_supports_nmi_ft2500(void) +{ + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + static_branch_likely(&supports_pseudo_nmis_ft2500); +} + +static int gic_irq_set_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool val) +{ + u32 reg; + + if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + reg = val ? GICD_ISPENDR : GICD_ICPENDR; + break; + + case IRQCHIP_STATE_ACTIVE: + reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; + break; + + case IRQCHIP_STATE_MASKED: + if (val) { + gic_mask_irq(d); + return 0; + } + reg = GICD_ISENABLER; + break; + + default: + return -EINVAL; + } + + gic_poke_irq(d, reg); + return 0; +} + +static int gic_irq_get_irqchip_state(struct irq_data *d, + enum irqchip_irq_state which, bool *val) +{ + if (d->hwirq >= 8192) /* PPI/SPI only */ + return -EINVAL; + + switch (which) { + case IRQCHIP_STATE_PENDING: + *val = gic_peek_irq(d, GICD_ISPENDR); + break; + + case IRQCHIP_STATE_ACTIVE: + *val = gic_peek_irq(d, GICD_ISACTIVER); + break; + + case IRQCHIP_STATE_MASKED: + *val = !gic_peek_irq(d, GICD_ISENABLER); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static void gic_irq_set_prio(struct irq_data *d, u8 prio) +{ + void __iomem *base = gic_dist_base(d); + u32 offset, index; + + offset = convert_offset_index(d, GICD_IPRIORITYR, &index); + + writeb_relaxed(prio, base + offset + index); +} + +static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) +{ + switch (__get_intid_range(hwirq)) { + case PPI_RANGE: + return hwirq - 16; + case EPPI_RANGE: + return hwirq - EPPI_BASE_INTID + 16; + default: + unreachable(); + } +} + +static u32 gic_get_ppi_index(struct irq_data *d) +{ + return __gic_get_ppi_index(d->hwirq); +} + +static int gic_irq_nmi_setup(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (!gic_supports_nmi_ft2500()) + return -EINVAL; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return -EINVAL; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return -EINVAL; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Setting up PPI as NMI, only switch handler for first NMI */ + if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { + refcount_set(&ppi_nmi_refs[idx], 1); + desc->handle_irq = handle_percpu_devid_fasteoi_nmi; + } + } else { + desc->handle_irq = handle_fasteoi_nmi; + } + + gic_irq_set_prio(d, GICD_INT_NMI_PRI); + + return 0; +} + +static void gic_irq_nmi_teardown(struct irq_data *d) +{ + struct irq_desc *desc = irq_to_desc(d->irq); + + if (WARN_ON(!gic_supports_nmi_ft2500())) + return; + + if (gic_peek_irq(d, GICD_ISENABLER)) { + pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); + return; + } + + /* + * A secondary irq_chip should be in charge of LPI request, + * it should not be possible to get there + */ + if (WARN_ON(gic_irq(d) >= 8192)) + return; + + /* desc lock should already be held */ + if (gic_irq_in_rdist(d)) { + u32 idx = gic_get_ppi_index(d); + + /* Tearing down NMI, only switch handler for last NMI */ + if (refcount_dec_and_test(&ppi_nmi_refs[idx])) + desc->handle_irq = handle_percpu_devid_irq; + } else { + desc->handle_irq = handle_fasteoi_irq; + } + + gic_irq_set_prio(d, GICD_INT_DEF_PRI); +} + +static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) +{ + enum gic_intid_range range; + + if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) + return false; + + range = get_intid_range(d); + + /* + * The workaround is needed if the IRQ is an SPI and + * the target cpu is different from the one we are + * executing on. + */ + return (range == SPI_RANGE || range == ESPI_RANGE) && + !cpumask_test_cpu(raw_smp_processor_id(), + irq_data_get_effective_affinity_mask(d)); +} + +static void gic_eoi_irq(struct irq_data *d) +{ + write_gicreg(gic_irq(d), ICC_EOIR1_EL1); + isb(); + + if (gic_arm64_erratum_2941627_needed(d)) { + /* + * Make sure the GIC stream deactivate packet + * issued by ICC_EOIR1_EL1 has completed before + * deactivating through GICD_IACTIVER. + */ + dsb(sy); + gic_poke_irq(d, GICD_ICACTIVER); + } +} + +static void gic_eoimode1_eoi_irq(struct irq_data *d) +{ + /* + * No need to deactivate an LPI, or an interrupt that + * is is getting forwarded to a vcpu. + */ + if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) + return; + + if (!gic_arm64_erratum_2941627_needed(d)) + gic_write_dir(gic_irq(d)); + else + gic_poke_irq(d, GICD_ICACTIVER); +} + +static int gic_set_type(struct irq_data *d, unsigned int type) +{ + enum gic_intid_range range; + unsigned int irq = gic_irq(d); + void __iomem *base, *rbase; + u32 offset, index, skt; + int ret, i; + unsigned long mpidr; + + range = get_intid_range(d); + + /* Interrupt configuration for SGIs can't be changed */ + if (range == SGI_RANGE) + return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; + + /* SPIs have restrictions on the supported types */ + if ((range == SPI_RANGE || range == ESPI_RANGE) && + type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) + return -EINVAL; + + offset = convert_offset_index(d, GICD_ICFGR, &index); + + if (gic_irq_in_rdist(d)) { + base = gic_data_rdist_sgi_base(); + ret = gic_configure_irq(index, type, base + offset, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xffff) == 0) { + rbase = base + 64*SZ_128K; + + for (i = 0; i < 4; i++) { + ret = gic_configure_irq(index, type, rbase + offset, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + rbase = rbase + SZ_128K; + } + } + } else { + skt = mars3_irq_to_skt(gic_irq(d)); + base = mars3_gic_dists[skt].dist_base; + ret = gic_configure_irq(index, type, base + offset, NULL); + gic_do_wait_for_rwp(base); + } + + if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { + /* Misconfigured PPIs are usually not fatal */ + pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); + ret = 0; + } + + return ret; +} + +static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) +{ + if (get_intid_range(d) == SGI_RANGE) + return -EINVAL; + + if (vcpu) + irqd_set_forwarded_to_vcpu(d); + else + irqd_clr_forwarded_to_vcpu(d); + return 0; +} + +static u64 gic_cpu_to_affinity(int cpu) +{ + u64 mpidr = cpu_logical_map(cpu); + u64 aff; + + /* ASR8601 needs to have its affinities shifted down... */ + if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) + mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | + (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); + + aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | + MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | + MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + return aff; +} + +static void gic_deactivate_unhandled(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) { + if (irqnr < 8192) + gic_write_dir(irqnr); + } else { + write_gicreg(irqnr, ICC_EOIR1_EL1); + isb(); + } +} + +/* + * Follow a read of the IAR with any HW maintenance that needs to happen prior + * to invoking the relevant IRQ handler. We must do two things: + * + * (1) Ensure instruction ordering between a read of IAR and subsequent + * instructions in the IRQ handler using an ISB. + * + * It is possible for the IAR to report an IRQ which was signalled *after* + * the CPU took an IRQ exception as multiple interrupts can race to be + * recognized by the GIC, earlier interrupts could be withdrawn, and/or + * later interrupts could be prioritized by the GIC. + * + * For devices which are tightly coupled to the CPU, such as PMUs, a + * context synchronization event is necessary to ensure that system + * register state is not stale, as these may have been indirectly written + * *after* exception entry. + * + * (2) Deactivate the interrupt when EOI mode 1 is in use. + */ +static inline void gic_complete_ack(u32 irqnr) +{ + if (static_branch_likely(&supports_deactivate_key)) + write_gicreg(irqnr, ICC_EOIR1_EL1); + + isb(); +} + +static bool gic_rpr_is_nmi_prio(void) +{ + if (!gic_supports_nmi_ft2500()) + return false; + + return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); +} + +static bool gic_irqnr_is_special(u32 irqnr) +{ + return irqnr >= 1020 && irqnr <= 1023; +} + +static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_irq(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) +{ + if (gic_irqnr_is_special(irqnr)) + return; + + gic_complete_ack(irqnr); + + if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { + WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); + gic_deactivate_unhandled(irqnr); + } +} + +/* + * An exception has been taken from a context with IRQs enabled, and this could + * be an IRQ or an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear + * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, + * after handling any NMI but before handling any IRQ. + * + * The entry code has performed IRQ entry, and if an NMI is detected we must + * perform NMI entry/exit around invoking the handler. + */ +static void __gic_handle_irq_from_irqson(struct pt_regs *regs) +{ + bool is_nmi; + u32 irqnr; + + irqnr = gic_read_iar(); + + is_nmi = gic_rpr_is_nmi_prio(); + + if (is_nmi) { + nmi_enter(); + __gic_handle_nmi(irqnr, regs); + nmi_exit(); + } + + if (gic_prio_masking_enabled()) { + gic_pmr_mask_irqs(); + gic_arch_enable_irqs(); + } + + if (!is_nmi) + __gic_handle_irq(irqnr, regs); +} + +/* + * An exception has been taken from a context with IRQs disabled, which can only + * be an NMI. + * + * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave + * DAIF.IF (and ICC_PMR_EL1) unchanged. + * + * The entry code has performed NMI entry. + */ +static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) +{ + u64 pmr; + u32 irqnr; + + /* + * We were in a context with IRQs disabled. However, the + * entry code has set PMR to a value that allows any + * interrupt to be acknowledged, and not just NMIs. This can + * lead to surprising effects if the NMI has been retired in + * the meantime, and that there is an IRQ pending. The IRQ + * would then be taken in NMI context, something that nobody + * wants to debug twice. + * + * Until we sort this, drop PMR again to a level that will + * actually only allow NMIs before reading IAR, and then + * restore it to what it was. + */ + pmr = gic_read_pmr(); + gic_pmr_mask_irqs(); + isb(); + irqnr = gic_read_iar(); + gic_write_pmr(pmr); + + __gic_handle_nmi(irqnr, regs); +} + +static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +{ + if (unlikely(gic_supports_nmi_ft2500() && !interrupts_enabled(regs))) + __gic_handle_irq_from_irqsoff(regs); + else + __gic_handle_irq_from_irqson(regs); +} + +static u32 gic_get_pribits(void) +{ + u32 pribits; + + pribits = gic_read_ctlr(); + pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; + pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; + pribits++; + + return pribits; +} + +static bool gic_has_group0(void) +{ + u32 val; + u32 old_pmr; + + old_pmr = gic_read_pmr(); + + /* + * Let's find out if Group0 is under control of EL3 or not by + * setting the highest possible, non-zero priority in PMR. + * + * If SCR_EL3.FIQ is set, the priority gets shifted down in + * order for the CPU interface to set bit 7, and keep the + * actual priority in the non-secure range. In the process, it + * looses the least significant bit and the actual priority + * becomes 0x80. Reading it back returns 0, indicating that + * we're don't have access to Group0. + */ + gic_write_pmr(BIT(8 - gic_get_pribits())); + val = gic_read_pmr(); + + gic_write_pmr(old_pmr); + + return val != 0; +} + +static void __init gic_dist_init(void) +{ + unsigned int i; + u64 affinity; + void __iomem *base = gic_data.dist_base; + u32 val, skt; + + for (skt = 0; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + base = mars3_gic_dists[skt].dist_base; + + /* Disable the distributor */ + writel_relaxed(0, base + GICD_CTLR); + gic_do_wait_for_rwp(base); + + /* + * Configure SPIs as non-secure Group-1. This will only matter + * if the GIC only has a single security state. This will not + * do the right thing if the kernel is running in secure mode, + * but that's not the intended use case anyway. + */ + for (i = 32; i < GIC_LINE_NR; i += 32) + writel_relaxed(~0, base + GICD_IGROUPR + i / 8); + + /* Extended SPI range, not handled by the GICv2/GICv3 common code */ + for (i = 0; i < GIC_ESPI_NR; i += 32) { + writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); + writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); + } + + for (i = 0; i < GIC_ESPI_NR; i += 32) + writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); + + for (i = 0; i < GIC_ESPI_NR; i += 16) + writel_relaxed(0, base + GICD_ICFGRnE + i / 4); + + for (i = 0; i < GIC_ESPI_NR; i += 4) + writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); + + /* Now do the common stuff */ + gic_dist_config(base, GIC_LINE_NR, NULL); + gic_do_wait_for_rwp(base); + + val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; + if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { + pr_info("Enabling SGIs without active state\n"); + val |= GICD_CTLR_nASSGIreq; + } + + /* Enable distributor with ARE, Group1, and wait for it to drain */ + writel_relaxed(val, base + GICD_CTLR); + gic_dist_wait_for_rwp(); + + /* + * Set all global interrupts to the boot CPU only. ARE must be + * enabled. + */ + affinity = gic_cpu_to_affinity(smp_processor_id()); + for (i = 32; i < GIC_LINE_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); + + for (i = 0; i < GIC_ESPI_NR; i++) + gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); + } +} + +static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) +{ + int ret = -ENODEV; + int i; + + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; + u64 typer; + u32 reg; + + reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (reg != GIC_PIDR2_ARCH_GICv3 && + reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ + pr_warn("No redistributor present @%p\n", ptr); + break; + } + + do { + typer = gic_read_typer(ptr + GICR_TYPER); + ret = fn(gic_data.redist_regions + i, ptr); + if (!ret) + return 0; + + if (gic_data.redist_regions[i].single_redist) + break; + + if (gic_data.redist_stride) { + ptr += gic_data.redist_stride; + } else { + ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ + if (typer & GICR_TYPER_VLPIS) + ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ + } + } while (!(typer & GICR_TYPER_LAST)); + } + + return ret ? -ENODEV : 0; +} + +static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) +{ + unsigned long mpidr; + u64 typer; + u32 aff; + u32 aff2_skt; + u32 redist_skt; + + /* + * Convert affinity to a 32bit value that can be matched to + * GICR_TYPER bits [63:32]. + */ + mpidr = gic_cpu_to_affinity(smp_processor_id()); + + aff = (MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | + MPIDR_AFFINITY_LEVEL(mpidr, 0)); + + aff2_skt = MPIDR_AFFINITY_LEVEL(mpidr, 2) & 0x7; + redist_skt = (((u64)region->phys_base >> MARS3_ADDR_SKTID_SHIFT) & 0x7); + + if (aff2_skt != redist_skt) + return 1; + + typer = gic_read_typer(ptr + GICR_TYPER); + if ((typer >> 32) == aff) { + u64 offset = ptr - region->redist_base; + + raw_spin_lock_init(&gic_data_rdist()->rd_lock); + gic_data_rdist_rd_base() = ptr; + gic_data_rdist()->phys_base = region->phys_base + offset; + + pr_info("CPU%d: found redistributor %lx region %d:%pa\n", + smp_processor_id(), mpidr, + (int)(region - gic_data.redist_regions), + &gic_data_rdist()->phys_base); + return 0; + } + + /* Try next one */ + return 1; +} + +static int gic_populate_rdist(void) +{ + if (gic_iterate_rdists(__gic_populate_rdist) == 0) + return 0; + + /* We couldn't even deal with ourselves... */ + WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", + smp_processor_id(), + (unsigned long)cpu_logical_map(smp_processor_id())); + return -ENODEV; +} + +static int __gic_update_rdist_properties(struct redist_region *region, + void __iomem *ptr) +{ + u64 typer = gic_read_typer(ptr + GICR_TYPER); + u32 ctlr = readl_relaxed(ptr + GICR_CTLR); + + /* Boot-time cleanup */ + if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { + u64 val; + + /* Deactivate any present vPE */ + val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); + if (val & GICR_VPENDBASER_Valid) + gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, + ptr + SZ_128K + GICR_VPENDBASER); + + /* Mark the VPE table as invalid */ + val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); + val &= ~GICR_VPROPBASER_4_1_VALID; + gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); + } + + gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); + + /* + * TYPER.RVPEID implies some form of DirectLPI, no matter what the + * doc says... :-/ And CTLR.IR implies another subset of DirectLPI + * that the ITS driver can make use of for LPIs (and not VLPIs). + * + * These are 3 different ways to express the same thing, depending + * on the revision of the architecture and its relaxations over + * time. Just group them under the 'direct_lpi' banner. + */ + gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); + gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | + !!(ctlr & GICR_CTLR_IR) | + gic_data.rdists.has_rvpeid); + gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); + + /* Detect non-sensical configurations */ + if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { + gic_data.rdists.has_direct_lpi = false; + gic_data.rdists.has_vlpis = false; + gic_data.rdists.has_rvpeid = false; + } + + gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); + + return 1; +} + +static void gic_update_rdist_properties(void) +{ + gic_data.ppi_nr = UINT_MAX; + gic_iterate_rdists(__gic_update_rdist_properties); + if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) + gic_data.ppi_nr = 0; + pr_info("GICv3 features: %d PPIs%s%s\n", + gic_data.ppi_nr, + gic_data.has_rss ? ", RSS" : "", + gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); + + if (gic_data.rdists.has_vlpis) + pr_info("GICv4 features: %s%s%s\n", + gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", + gic_data.rdists.has_rvpeid ? "RVPEID " : "", + gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); +} + +/* Check whether it's single security state view */ +static inline bool gic_dist_security_disabled(void) +{ + return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; +} + +static void gic_cpu_sys_reg_init(void) +{ + int i, cpu = smp_processor_id(); + u64 mpidr = gic_cpu_to_affinity(cpu); + u64 need_rss = MPIDR_RS(mpidr); + bool group0; + u32 pribits; + + /* + * Need to check that the SRE bit has actually been set. If + * not, it means that SRE is disabled at EL2. We're going to + * die painfully, and there is nothing we can do about it. + * + * Kindly inform the luser. + */ + if (!gic_enable_sre()) + pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); + + pribits = gic_get_pribits(); + + group0 = gic_has_group0(); + + /* Set priority mask register */ + if (!gic_prio_masking_enabled()) { + write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); + } else if (gic_supports_nmi_ft2500()) { + /* + * Mismatch configuration with boot CPU, the system is likely + * to die as interrupt masking will not work properly on all + * CPUs + * + * The boot CPU calls this function before enabling NMI support, + * and as a result we'll never see this warning in the boot path + * for that CPU. + */ + if (static_branch_unlikely(&gic_nonsecure_priorities)) + WARN_ON(!group0 || gic_dist_security_disabled()); + else + WARN_ON(group0 && !gic_dist_security_disabled()); + } + + /* + * Some firmwares hand over to the kernel with the BPR changed from + * its reset value (and with a value large enough to prevent + * any pre-emptive interrupts from working at all). Writing a zero + * to BPR restores is reset value. + */ + gic_write_bpr1(0); + + if (static_branch_likely(&supports_deactivate_key)) { + /* EOI drops priority only (mode 1) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); + } else { + /* EOI deactivates interrupt too (mode 0) */ + gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); + } + + /* Always whack Group0 before Group1 */ + if (group0) { + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP0R3_EL1); + write_gicreg(0, ICC_AP0R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP0R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP0R0_EL1); + } + + isb(); + } + + switch (pribits) { + case 8: + case 7: + write_gicreg(0, ICC_AP1R3_EL1); + write_gicreg(0, ICC_AP1R2_EL1); + fallthrough; + case 6: + write_gicreg(0, ICC_AP1R1_EL1); + fallthrough; + case 5: + case 4: + write_gicreg(0, ICC_AP1R0_EL1); + } + + isb(); + + /* ... and let's hit the road... */ + gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss_ft2500, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss_ft2500, i) && per_cpu(has_rss_ft2500, cpu); + + need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)gic_cpu_to_affinity(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); +} + +static bool gicv3_nolpi; + +static int __init gicv3_nolpi_cfg(char *buf) +{ + return kstrtobool(buf, &gicv3_nolpi); +} +early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); + +static int gic_dist_supports_lpis(void) +{ + return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && + !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && + !gicv3_nolpi); +} + +static void gic_cpu_init(void) +{ + void __iomem *rbase; + int i; + unsigned long mpidr; + + /* Register ourselves with the rest of the world */ + if (gic_populate_rdist()) + return; + + gic_enable_redist(true); + + WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && + !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), + "Distributor has extended ranges, but CPU%d doesn't\n", + smp_processor_id()); + + rbase = gic_data_rdist_sgi_base(); + + /* Configure SGIs/PPIs as non-secure Group-1 */ + for (i = 0; i < gic_data.ppi_nr + 16; i += 32) + writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); + + mpidr = (unsigned long)cpu_logical_map(smp_processor_id()); + + if ((mpidr & 0xFFFF) == 0) { // both Aff1 and Aff0 is zero + rbase = rbase + 64*SZ_128K; // skip 64 Redistributors + + for (i = 0; i < 4; i++) { + /* Configure SGIs/PPIs as non-secure Group-1 */ + writel_relaxed(~0, rbase + GICR_IGROUPR0); + + gic_cpu_config(rbase, gic_data.ppi_nr + 16, NULL); + gic_do_wait_for_rwp(rbase - SZ_64K); + + rbase = rbase + SZ_128K; + + } + } + + /* initialise system registers */ + gic_cpu_sys_reg_init(); +} + +#ifdef CONFIG_SMP + +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + +static int gic_starting_cpu(unsigned int cpu) +{ + gic_cpu_init(); + + if (gic_dist_supports_lpis()) + phytium_its_cpu_init(); + + return 0; +} + +static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, + unsigned long cluster_id) +{ + int next_cpu, cpu = *base_cpu; + unsigned long mpidr; + u16 tlist = 0; + + mpidr = gic_cpu_to_affinity(cpu); + + while (cpu < nr_cpu_ids) { + tlist |= 1 << (mpidr & 0xf); + + next_cpu = cpumask_next(cpu, mask); + if (next_cpu >= nr_cpu_ids) + goto out; + cpu = next_cpu; + + mpidr = gic_cpu_to_affinity(cpu); + + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { + cpu--; + goto out; + } + } +out: + *base_cpu = cpu; + return tlist; +} + +#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ + (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ + << ICC_SGI1R_AFFINITY_## level ##_SHIFT) + +static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) +{ + u64 val; + + val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | + MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | + irq << ICC_SGI1R_SGI_ID_SHIFT | + MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | + tlist << ICC_SGI1R_TARGET_LIST_SHIFT); + + pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); + gic_write_sgi1r(val); +} + +static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) +{ + int cpu; + + if (WARN_ON(d->hwirq >= 16)) + return; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(ishst); + + for_each_cpu(cpu, mask) { + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); + u16 tlist; + + tlist = gic_compute_target_list(&cpu, mask, cluster_id); + gic_send_sgi(cluster_id, tlist, d->hwirq); + } + + /* Force the above writes to ICC_SGI1R_EL1 to be executed */ + isb(); +} + +static void __init gic_smp_init(void) +{ + struct irq_fwspec sgi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 1, + }; + int base_sgi; + + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "irqchip/arm/gicv3:starting", + gic_starting_cpu, NULL); + + /* Register all 8 non-secure SGIs */ + base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); + if (WARN_ON(base_sgi <= 0)) + return; + + set_smp_ipi_range(base_sgi, 8); +} + +static int gic_cpumask_select(struct irq_data *d, const struct cpumask *mask_val) +{ + unsigned int skt, irq_skt, i; + unsigned int cpu, cpus = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + irq_skt = mars3_irq_to_skt(gic_irq(d)); + + if (irq_skt != 0) + for (i = 0; i < irq_skt; i++) + cpus += skt_cpu_cnt[i]; + + cpu = cpumask_any_and(mask_val, cpu_online_mask); + cpus = cpus + cpu % skt_cpu_cnt[irq_skt]; + + if (is_kdump_kernel()) { + skt = (cpu_logical_map(cpu) >> 16) & 0xff; + if (irq_skt == skt) + return cpu; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) { + if (irq_skt == skt) + return i; + } else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + } + return cpus; +} + +static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu; + u32 offset, index; + void __iomem *reg; + int enabled; + u64 val; + unsigned int skt; + + if (force) + cpu = cpumask_first(mask_val); + else + cpu = gic_cpumask_select(d, mask_val); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + if (gic_irq_in_rdist(d)) + return -EINVAL; + + /* If interrupt was enabled, disable it first */ + enabled = gic_peek_irq(d, GICD_ISENABLER); + if (enabled) + gic_mask_irq(d); + + offset = convert_offset_index(d, GICD_IROUTER, &index); + + skt = mars3_irq_to_skt(gic_irq(d)); + reg = mars3_gic_dists[skt].dist_base + offset + GICD_IROUTER + (index * 8); + reg = gic_dist_base(d) + offset + (index * 8); + val = gic_cpu_to_affinity(cpu); + + gic_write_irouter(val, reg); + + /* + * If the interrupt was enabled, enabled it again. Otherwise, + * just wait for the distributor to have digested our changes. + */ + if (enabled) + gic_unmask_irq(d); + + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + + return IRQ_SET_MASK_OK_DONE; +} +#else +#define gic_set_affinity NULL +#define gic_ipi_send_mask NULL +#define gic_smp_init() do { } while (0) +#endif + +static int gic_retrigger(struct irq_data *data) +{ + return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); +} + +#ifdef CONFIG_CPU_PM +static int gic_cpu_pm_notifier(struct notifier_block *self, + unsigned long cmd, void *v) +{ + if (cmd == CPU_PM_EXIT) { + if (gic_dist_security_disabled()) + gic_enable_redist(true); + gic_cpu_sys_reg_init(); + } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { + gic_write_grpen1(0); + gic_enable_redist(false); + } + return NOTIFY_OK; +} + +static struct notifier_block gic_cpu_pm_notifier_block = { + .notifier_call = gic_cpu_pm_notifier, +}; + +static void gic_cpu_pm_init(void) +{ + cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); +} + +#else +static inline void gic_cpu_pm_init(void) { } +#endif /* CONFIG_CPU_PM */ + +static struct irq_chip gic_chip = { + .name = "GIC-phytium-2500", + .irq_mask = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static struct irq_chip gic_eoimode1_chip = { + .name = "GICv3-phytium-2500", + .irq_mask = gic_eoimode1_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_eoimode1_eoi_irq, + .irq_set_type = gic_set_type, + .irq_set_affinity = gic_set_affinity, + .irq_retrigger = gic_retrigger, + .irq_get_irqchip_state = gic_irq_get_irqchip_state, + .irq_set_irqchip_state = gic_irq_set_irqchip_state, + .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, + .irq_nmi_setup = gic_irq_nmi_setup, + .irq_nmi_teardown = gic_irq_nmi_teardown, + .ipi_send_mask = gic_ipi_send_mask, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + struct irq_chip *chip = &gic_chip; + struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); + + if (static_branch_likely(&supports_deactivate_key)) + chip = &gic_eoimode1_chip; + + switch (__get_intid_range(hw)) { + case SGI_RANGE: + case PPI_RANGE: + case EPPI_RANGE: + irq_set_percpu_devid(irq); + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); + break; + + case SPI_RANGE: + case ESPI_RANGE: + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + irq_set_probe(irq); + irqd_set_single_target(irqd); + break; + + case LPI_RANGE: + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + break; + + default: + return -EPERM; + } + + /* Prevents SW retriggers which mess up the ACK/EOI ordering */ + irqd_set_handle_enforce_irqctx(irqd); + return 0; +} + +static int gic_irq_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (fwspec->param_count == 1 && fwspec->param[0] < 16) { + *hwirq = fwspec->param[0]; + *type = IRQ_TYPE_EDGE_RISING; + return 0; + } + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count < 3) + return -EINVAL; + + switch (fwspec->param[0]) { + case 0: /* SPI */ + *hwirq = fwspec->param[1] + 32; + break; + case 1: /* PPI */ + *hwirq = fwspec->param[1] + 16; + break; + case 2: /* ESPI */ + *hwirq = fwspec->param[1] + ESPI_BASE_INTID; + break; + case 3: /* EPPI */ + *hwirq = fwspec->param[1] + EPPI_BASE_INTID; + break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *hwirq = fwspec->param[1]; + break; + case GIC_IRQ_TYPE_PARTITION: + *hwirq = fwspec->param[1]; + if (fwspec->param[1] >= 16) + *hwirq += EPPI_BASE_INTID - 16; + else + *hwirq += 16; + break; + default: + return -EINVAL; + } + + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + /* + * Make it clear that broken DTs are... broken. + * Partitioned PPIs are an unfortunate exception. + */ + WARN_ON(*type == IRQ_TYPE_NONE && + fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); + return 0; + } + + if (is_fwnode_irqchip(fwspec->fwnode)) { + if (fwspec->param_count != 2) + return -EINVAL; + + if (fwspec->param[0] < 16) { + pr_err(FW_BUG "Illegal GSI%d translation request\n", + fwspec->param[0]); + return -EINVAL; + } + + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + + WARN_ON(*type == IRQ_TYPE_NONE); + return 0; + } + + return -EINVAL; +} + +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct irq_fwspec *fwspec = arg; + + ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) { + ret = gic_irq_domain_map(domain, virq + i, hwirq + i); + if (ret) + return ret; + } + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + +static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, + irq_hw_number_t hwirq) +{ + enum gic_intid_range range; + + if (!gic_data.ppi_descs) + return false; + + if (!is_of_node(fwspec->fwnode)) + return false; + + if (fwspec->param_count < 4 || !fwspec->param[3]) + return false; + + range = __get_intid_range(hwirq); + if (range != PPI_RANGE && range != EPPI_RANGE) + return false; + + return true; +} + +static int gic_irq_domain_select(struct irq_domain *d, + struct irq_fwspec *fwspec, + enum irq_domain_bus_token bus_token) +{ + unsigned int type, ret, ppi_idx; + irq_hw_number_t hwirq; + + /* Not for us */ + if (fwspec->fwnode != d->fwnode) + return 0; + + /* If this is not DT, then we have a single domain */ + if (!is_of_node(fwspec->fwnode)) + return 1; + + ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); + if (WARN_ON_ONCE(ret)) + return 0; + + if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) + return d == gic_data.domain; + + /* + * If this is a PPI and we have a 4th (non-null) parameter, + * then we need to match the partition domain. + */ + ppi_idx = __gic_get_ppi_index(hwirq); + return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); +} + +static const struct irq_domain_ops gic_irq_domain_ops = { + .translate = gic_irq_domain_translate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, + .select = gic_irq_domain_select, +}; + +static int partition_domain_translate(struct irq_domain *d, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + unsigned long ppi_intid; + struct device_node *np; + unsigned int ppi_idx; + int ret; + + if (!gic_data.ppi_descs) + return -ENOMEM; + + np = of_find_node_by_phandle(fwspec->param[3]); + if (WARN_ON(!np)) + return -EINVAL; + + ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); + if (WARN_ON_ONCE(ret)) + return 0; + + ppi_idx = __gic_get_ppi_index(ppi_intid); + ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], + of_node_to_fwnode(np)); + if (ret < 0) + return ret; + + *hwirq = ret; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + + return 0; +} + +static const struct irq_domain_ops partition_domain_ops = { + .translate = partition_domain_translate, + .select = gic_irq_domain_select, +}; + +static bool gic_enable_quirk_msm8996(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; + + return true; +} + +static bool gic_enable_quirk_mtk_gicr(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_MTK_GICR_SAVE; + + return true; +} + +static bool gic_enable_quirk_cavium_38539(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; + + return true; +} + +static bool gic_enable_quirk_hip06_07(void *data) +{ + struct gic_chip_data *d = data; + + /* + * HIP06 GICD_IIDR clashes with GIC-600 product number (despite + * not being an actual ARM implementation). The saving grace is + * that GIC-600 doesn't have ESPI, so nothing to do in that case. + * HIP07 doesn't even have a proper IIDR, and still pretends to + * have ESPI. In both cases, put them right. + */ + if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { + /* Zero both ESPI and the RES0 field next to it... */ + d->rdists.gicd_typer &= ~GENMASK(9, 8); + return true; + } + + return false; +} + +#define T241_CHIPN_MASK GENMASK_ULL(45, 44) +#define T241_CHIP_GICDA_OFFSET 0x1580000 +#define SMCCC_SOC_ID_T241 0x036b0241 + +static bool gic_enable_quirk_nvidia_t241(void *data) +{ + s32 soc_id = arm_smccc_get_soc_id_version(); + unsigned long chip_bmask = 0; + phys_addr_t phys; + u32 i; + + /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ + if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) + return false; + + /* Find the chips based on GICR regions PHYS addr */ + for (i = 0; i < gic_data.nr_redist_regions; i++) { + chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, + (u64)gic_data.redist_regions[i].phys_base)); + } + + if (hweight32(chip_bmask) < 3) + return false; + + /* Setup GICD alias regions */ + for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { + if (chip_bmask & BIT(i)) { + phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; + phys |= FIELD_PREP(T241_CHIPN_MASK, i); + t241_dist_base_alias[i] = ioremap(phys, SZ_64K); + WARN_ON_ONCE(!t241_dist_base_alias[i]); + } + } + static_branch_enable(&gic_nvidia_t241_erratum); + return true; +} + +static bool gic_enable_quirk_asr8601(void *data) +{ + struct gic_chip_data *d = data; + + d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; + + return true; +} + +static bool gic_enable_quirk_arm64_2941627(void *data) +{ + static_branch_enable(&gic_arm64_2941627_erratum); + return true; +} + +static bool rd_set_non_coherent(void *data) +{ + struct gic_chip_data *d = data; + + d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; + return true; +} + +static const struct gic_quirk gic_quirks[] = { + { + .desc = "GICv3: Qualcomm MSM8996 broken firmware", + .compatible = "qcom,msm8996-gic-v3", + .init = gic_enable_quirk_msm8996, + }, + { + .desc = "GICv3: ASR erratum 8601001", + .compatible = "asr,asr8601-gic-v3", + .init = gic_enable_quirk_asr8601, + }, + { + .desc = "GICv3: Mediatek Chromebook GICR save problem", + .property = "mediatek,broken-save-restore-fw", + .init = gic_enable_quirk_mtk_gicr, + }, + { + .desc = "GICv3: HIP06 erratum 161010803", + .iidr = 0x0204043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + .desc = "GICv3: HIP07 erratum 161010803", + .iidr = 0x00000000, + .mask = 0xffffffff, + .init = gic_enable_quirk_hip06_07, + }, + { + /* + * Reserved register accesses generate a Synchronous + * External Abort. This erratum applies to: + * - ThunderX: CN88xx + * - OCTEON TX: CN83xx, CN81xx + * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* + */ + .desc = "GICv3: Cavium erratum 38539", + .iidr = 0xa000034c, + .mask = 0xe8f00fff, + .init = gic_enable_quirk_cavium_38539, + }, + { + .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", + .iidr = 0x0402043b, + .mask = 0xffffffff, + .init = gic_enable_quirk_nvidia_t241, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [0,1] + * + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0400043b, + .mask = 0xff0e0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + /* + * GIC-700: 2941627 workaround - IP variant [2] + */ + .desc = "GICv3: ARM64 erratum 2941627", + .iidr = 0x0402043b, + .mask = 0xff0f0fff, + .init = gic_enable_quirk_arm64_2941627, + }, + { + .desc = "GICv3: non-coherent attribute", + .property = "dma-noncoherent", + .init = rd_set_non_coherent, + }, + { + } +}; + +static void gic_enable_nmi_support(void) +{ + int i; + + if (!gic_prio_masking_enabled()) + return; + + if (gic_data.flags & FLAGS_WORKAROUND_MTK_GICR_SAVE) { + pr_warn("Skipping NMI enable due to firmware issues\n"); + return; + } + + ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); + if (!ppi_nmi_refs) + return; + + for (i = 0; i < gic_data.ppi_nr; i++) + refcount_set(&ppi_nmi_refs[i], 0); + + pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", + gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); + + /* + * How priority values are used by the GIC depends on two things: + * the security state of the GIC (controlled by the GICD_CTRL.DS bit) + * and if Group 0 interrupts can be delivered to Linux in the non-secure + * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the + * ICC_PMR_EL1 register and the priority that software assigns to + * interrupts: + * + * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority + * ----------------------------------------------------------- + * 1 | - | unchanged | unchanged + * ----------------------------------------------------------- + * 0 | 1 | non-secure | non-secure + * ----------------------------------------------------------- + * 0 | 0 | unchanged | non-secure + * + * where non-secure means that the value is right-shifted by one and the + * MSB bit set, to make it fit in the non-secure priority range. + * + * In the first two cases, where ICC_PMR_EL1 and the interrupt priority + * are both either modified or unchanged, we can use the same set of + * priorities. + * + * In the last case, where only the interrupt priorities are modified to + * be in the non-secure range, we use a different PMR value to mask IRQs + * and the rest of the values that we use remain unchanged. + */ + if (gic_has_group0() && !gic_dist_security_disabled()) + static_branch_enable(&gic_nonsecure_priorities); + + static_branch_enable(&supports_pseudo_nmis_ft2500); + + if (static_branch_likely(&supports_deactivate_key)) + gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; + else + gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; +} + +static int __init gic_init_bases(phys_addr_t dist_phys_base, + void __iomem *dist_base, + struct redist_region *rdist_regs, + u32 nr_redist_regions, + u64 redist_stride, + struct fwnode_handle *handle) +{ + u32 typer; + int err; + + if (!is_hyp_mode_available()) + static_branch_disable(&supports_deactivate_key); + + if (static_branch_likely(&supports_deactivate_key)) + pr_info("GIC: Using split EOI/Deactivate mode\n"); + + gic_data.fwnode = handle; + gic_data.dist_phys_base = dist_phys_base; + gic_data.dist_base = dist_base; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; + gic_data.redist_stride = redist_stride; + + /* + * Find out how many interrupts are supported. + */ + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.gicd_typer = typer; + + gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), + gic_quirks, &gic_data); + + pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); + pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); + + /* + * ThunderX1 explodes on reading GICD_TYPER2, in violation of the + * architecture spec (which says that reserved registers are RES0). + */ + if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) + gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); + + gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, + &gic_data); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); + if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { + /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ + gic_data.rdists.has_rvpeid = true; + gic_data.rdists.has_vlpis = true; + gic_data.rdists.has_direct_lpi = true; + gic_data.rdists.has_vpend_valid_dirty = true; + } + + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { + err = -ENOMEM; + goto out_free; + } + + irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); + + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + + if (typer & GICD_TYPER_MBIS) { + err = mbi_init(handle, gic_data.domain); + if (err) + pr_err("Failed to initialize MBIs\n"); + } + + set_handle_irq(gic_handle_irq); + + gic_update_rdist_properties(); + + gic_dist_init(); + gic_cpu_init(); + gic_smp_init(); + gic_cpu_pm_init(); + + if (gic_dist_supports_lpis()) { + phytium_its_init(handle, &gic_data.rdists, gic_data.domain); + phytium_its_cpu_init(); + its_lpi_memreserve_init(); + } else { + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_init(handle, gic_data.domain); + } + + gic_enable_nmi_support(); + + return 0; + +out_free: + if (gic_data.domain) + irq_domain_remove(gic_data.domain); + free_percpu(gic_data.rdists.rdist); + return err; +} + +static int __init gic_validate_dist_version(void __iomem *dist_base) +{ + u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + + if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) + return -ENODEV; + + return 0; +} + +/* Create all possible partitions at boot time */ +static void __init gic_populate_ppi_partitions(struct device_node *gic_node) +{ + struct device_node *parts_node, *child_part; + int part_idx = 0, i; + int nr_parts; + struct partition_affinity *parts; + + parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); + if (!parts_node) + return; + + gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); + if (!gic_data.ppi_descs) + goto out_put_node; + + nr_parts = of_get_child_count(parts_node); + + if (!nr_parts) + goto out_put_node; + + parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); + if (WARN_ON(!parts)) + goto out_put_node; + + for_each_child_of_node(parts_node, child_part) { + struct partition_affinity *part; + int n; + + part = &parts[part_idx]; + + part->partition_id = of_node_to_fwnode(child_part); + + pr_info("GIC: PPI partition %pOFn[%d] { ", + child_part, part_idx); + + n = of_property_count_elems_of_size(child_part, "affinity", + sizeof(u32)); + WARN_ON(n <= 0); + + for (i = 0; i < n; i++) { + int err, cpu; + u32 cpu_phandle; + struct device_node *cpu_node; + + err = of_property_read_u32_index(child_part, "affinity", + i, &cpu_phandle); + if (WARN_ON(err)) + continue; + + cpu_node = of_find_node_by_phandle(cpu_phandle); + if (WARN_ON(!cpu_node)) + continue; + + cpu = of_cpu_node_to_id(cpu_node); + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); + continue; + } + + pr_info("%pOF[%d] ", cpu_node, cpu); + + cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); + } + + pr_info("}\n"); + part_idx++; + } + + for (i = 0; i < gic_data.ppi_nr; i++) { + unsigned int irq; + struct partition_desc *desc; + struct irq_fwspec ppi_fwspec = { + .fwnode = gic_data.fwnode, + .param_count = 3, + .param = { + [0] = GIC_IRQ_TYPE_PARTITION, + [1] = i, + [2] = IRQ_TYPE_NONE, + }, + }; + + irq = irq_create_fwspec_mapping(&ppi_fwspec); + if (WARN_ON(!irq)) + continue; + desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, + irq, &partition_domain_ops); + if (WARN_ON(!desc)) + continue; + + gic_data.ppi_descs[i] = desc; + } + +out_put_node: + of_node_put(parts_node); +} + +static void __init gic_of_setup_kvm_info(struct device_node *node) +{ + int ret; + struct resource r; + u32 gicv_idx; + + gic_v3_kvm_info.type = GIC_V3; + + gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); + if (!gic_v3_kvm_info.maint_irq) + return; + + if (of_property_read_u32(node, "#redistributor-regions", + &gicv_idx)) + gicv_idx = 1; + + gicv_idx += 3; /* Also skip GICD, GICC, GICH */ + ret = of_address_to_resource(node, gicv_idx, &r); + if (!ret) + gic_v3_kvm_info.vcpu = r; + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static void gic_request_region(resource_size_t base, resource_size_t size, + const char *name) +{ + if (!request_mem_region(base, size, name)) + pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", + name, &base); +} + +static void __iomem *gic_of_iomap(struct device_node *node, int idx, + const char *name, struct resource *res) +{ + void __iomem *base; + int ret; + + ret = of_address_to_resource(node, idx, res); + if (ret) + return IOMEM_ERR_PTR(ret); + + gic_request_region(res->start, resource_size(res), name); + base = of_iomap(node, idx); + + return base ?: IOMEM_ERR_PTR(-ENOMEM); +} + +static int __init gic_of_init(struct device_node *node, struct device_node *parent) +{ + phys_addr_t dist_phys_base; + void __iomem *dist_base; + struct redist_region *rdist_regs; + struct resource res; + u64 redist_stride; + u32 nr_redist_regions; + int err, i; + unsigned long skt; + + dist_base = gic_of_iomap(node, 0, "GICD", &res); + if (IS_ERR(dist_base)) { + pr_err("%pOF: unable to map gic dist registers\n", node); + return PTR_ERR(dist_base); + } + + dist_phys_base = res.start; + + err = gic_validate_dist_version(dist_base); + if (err) { + pr_err("%pOF: no distributor detected, giving up\n", node); + goto out_unmap_dist; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("Error: No GIC Distributor in FDT\n"); + goto out_unmap_dist; + } + + mars3_gic_dists[0].phys_base = res.start; + mars3_gic_dists[0].size = resource_size(&res); + mars3_gic_dists[0].dist_base = dist_base; + + if (of_property_read_u32(node, "#mars3_soc_bitmap", &mars3_sockets_bitmap)) + mars3_sockets_bitmap = 0x1; + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; + + rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), + GFP_KERNEL); + if (!rdist_regs) { + err = -ENOMEM; + goto out_unmap_dist; + } + + for (i = 0; i < nr_redist_regions; i++) { + rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); + if (IS_ERR(rdist_regs[i].redist_base)) { + pr_err("%pOF: couldn't map region %d\n", node, i); + err = -ENODEV; + goto out_unmap_rdist; + } + rdist_regs[i].phys_base = res.start; + } + + if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) + redist_stride = 0; + + err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, + nr_redist_regions, redist_stride, &node->fwnode); + if (err) + goto out_unmap_rdist; + + gic_populate_ppi_partitions(node); + + if (static_branch_likely(&supports_deactivate_key)) + gic_of_setup_kvm_info(node); + return 0; + +out_unmap_rdist: + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); +out_unmap_dist: + iounmap(dist_base); + return err; +} + +IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); + +#ifdef CONFIG_ACPI +static struct +{ + void __iomem *dist_base; + struct redist_region *redist_regs; + u32 nr_redist_regions; + bool single_redist; + int enabled_rdists; + u32 maint_irq; + int maint_irq_mode; + phys_addr_t vcpu_base; +} acpi_data __initdata; + +static int gic_mars3_sockets_bitmap(void) +{ + unsigned int skt, i; + int skt_bitmap = 0; + + unsigned int skt_cpu_cnt[MAX_MARS3_SOC_COUNT] = {0}; + + for (i = 0; i < nr_cpu_ids; i++) { + skt = (cpu_logical_map(i) >> 16) & 0xff; + if ((skt >= 0) && (skt < MAX_MARS3_SOC_COUNT)) + skt_cpu_cnt[skt]++; + else if (skt != 0xff) + pr_err("socket address: %d is out of range.", skt); + } + + for (i = 0; i < MAX_MARS3_SOC_COUNT; i++) + if (skt_cpu_cnt[i] > 0) + skt_bitmap |= (1 << i); + + return skt_bitmap; +} + +static void __init +gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) +{ + static int count; + + acpi_data.redist_regs[count].phys_base = phys_base; + acpi_data.redist_regs[count].redist_base = redist_base; + acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; + count++; +} + +static int __init +gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_redistributor *redist = + (struct acpi_madt_generic_redistributor *)header; + void __iomem *redist_base; + + redist_base = ioremap(redist->base_address, redist->length); + if (!redist_base) { + pr_err("Couldn't map GICR region @%llx\n", redist->base_address); + return -ENOMEM; + } + gic_request_region(redist->base_address, redist->length, "GICR"); + + gic_acpi_register_redist(redist->base_address, redist_base); + return 0; +} + +static int __init +gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; + u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; + void __iomem *redist_base; + + /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + redist_base = ioremap(gicc->gicr_base_address, size); + if (!redist_base) + return -ENOMEM; + gic_request_region(gicc->gicr_base_address, size, "GICR"); + + gic_acpi_register_redist(gicc->gicr_base_address, redist_base); + return 0; +} + +static int __init gic_acpi_collect_gicr_base(void) +{ + acpi_tbl_entry_handler redist_parser; + enum acpi_madt_type type; + + if (acpi_data.single_redist) { + type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; + redist_parser = gic_acpi_parse_madt_gicc; + } else { + type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; + redist_parser = gic_acpi_parse_madt_redist; + } + + /* Collect redistributor base addresses in GICR entries */ + if (acpi_table_parse_madt(type, redist_parser, 0) > 0) + return 0; + + pr_info("No valid GICR entries exist\n"); + return -ENODEV; +} + +static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, + const unsigned long end) +{ + /* Subtable presence means that redist exists, that's it */ + return 0; +} + +static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + + /* + * If GICC is enabled and has valid gicr base address, then it means + * GICR base is presented via GICC + */ + if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { + acpi_data.enabled_rdists++; + return 0; + } + + /* + * It's perfectly valid firmware can pass disabled GICC entry, driver + * should not treat as errors, skip the entry instead of probe fail. + */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + return -ENODEV; +} + +static int __init gic_acpi_count_gicr_regions(void) +{ + int count; + + /* + * Count how many redistributor regions we have. It is not allowed + * to mix redistributor description, GICR and GICC subtables have to be + * mutually exclusive. + */ + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, + gic_acpi_match_gicr, 0); + if (count > 0) { + acpi_data.single_redist = false; + return count; + } + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_match_gicc, 0); + if (count > 0) { + acpi_data.single_redist = true; + count = acpi_data.enabled_rdists; + } + + return count; +} + +static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, + struct acpi_probe_entry *ape) +{ + struct acpi_madt_generic_distributor *dist; + int count; + + dist = (struct acpi_madt_generic_distributor *)header; + if (dist->version != ape->driver_data) + return false; + + /* We need to do that exercise anyway, the sooner the better */ + count = gic_acpi_count_gicr_regions(); + if (count <= 0) + return false; + + acpi_data.nr_redist_regions = count; + return true; +} + +static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, + const unsigned long end) +{ + struct acpi_madt_generic_interrupt *gicc = + (struct acpi_madt_generic_interrupt *)header; + int maint_irq_mode; + static int first_madt = true; + + /* Skip unusable CPUs */ + if (!(gicc->flags & ACPI_MADT_ENABLED)) + return 0; + + maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? + ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; + + if (first_madt) { + first_madt = false; + + acpi_data.maint_irq = gicc->vgic_interrupt; + acpi_data.maint_irq_mode = maint_irq_mode; + acpi_data.vcpu_base = gicc->gicv_base_address; + + return 0; + } + + /* + * The maintenance interrupt and GICV should be the same for every CPU + */ + if ((acpi_data.maint_irq != gicc->vgic_interrupt) || + (acpi_data.maint_irq_mode != maint_irq_mode) || + (acpi_data.vcpu_base != gicc->gicv_base_address)) + return -EINVAL; + + return 0; +} + +static bool __init gic_acpi_collect_virt_info(void) +{ + int count; + + count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, + gic_acpi_parse_virt_madt_gicc, 0); + + return (count > 0); +} + +#define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) +#define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) +#define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) + +static void __init gic_acpi_setup_kvm_info(void) +{ + int irq; + + if (!gic_acpi_collect_virt_info()) { + pr_warn("Unable to get hardware information used for virtualization\n"); + return; + } + + gic_v3_kvm_info.type = GIC_V3; + + irq = acpi_register_gsi(NULL, acpi_data.maint_irq, + acpi_data.maint_irq_mode, + ACPI_ACTIVE_HIGH); + if (irq <= 0) + return; + + gic_v3_kvm_info.maint_irq = irq; + + if (acpi_data.vcpu_base) { + struct resource *vcpu = &gic_v3_kvm_info.vcpu; + + vcpu->flags = IORESOURCE_MEM; + vcpu->start = acpi_data.vcpu_base; + vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; + } + + gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; + gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; + vgic_set_kvm_info(&gic_v3_kvm_info); +} + +static struct fwnode_handle *gsi_domain_handle; + +static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) +{ + return gsi_domain_handle; +} + +static int __init +gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) +{ + struct acpi_madt_generic_distributor *dist; + size_t size; + int i, err; + int skt; + + /* Get distributor base address */ + dist = (struct acpi_madt_generic_distributor *)header; + acpi_data.dist_base = ioremap(dist->base_address, + ACPI_GICV3_DIST_MEM_SIZE); + if (!acpi_data.dist_base) { + pr_err("Unable to map GICD registers\n"); + return -ENOMEM; + } + gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); + + err = gic_validate_dist_version(acpi_data.dist_base); + if (err) { + pr_err("No distributor detected at @%p, giving up\n", + acpi_data.dist_base); + goto out_dist_unmap; + } + + mars3_gic_dists[0].phys_base = dist->base_address; + mars3_gic_dists[0].size = ACPI_GICV3_DIST_MEM_SIZE; + mars3_gic_dists[0].dist_base = acpi_data.dist_base; + +#ifdef CONFIG_ACPI + mars3_sockets_bitmap = gic_mars3_sockets_bitmap(); + if (is_kdump_kernel()) + mars3_sockets_bitmap = 0x3; + + if (mars3_sockets_bitmap == 0) { + mars3_sockets_bitmap = 0x1; + pr_err("No socket, please check cpus MPIDR_AFFINITY_LEVEL!!!"); + } else + pr_info("mars3_sockets_bitmap = 0x%x\n", mars3_sockets_bitmap); +#endif + + for (skt = 1; skt < MAX_MARS3_SOC_COUNT; skt++) { + if (((1U << skt) & mars3_sockets_bitmap) == 0) + continue; + + mars3_gic_dists[skt].phys_base = ((unsigned long)skt << MARS3_ADDR_SKTID_SHIFT) | + mars3_gic_dists[0].phys_base; + mars3_gic_dists[skt].size = mars3_gic_dists[0].size; + mars3_gic_dists[skt].dist_base = ioremap(mars3_gic_dists[skt].phys_base, + mars3_gic_dists[skt].size); + } + + size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; + acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); + if (!acpi_data.redist_regs) { + err = -ENOMEM; + goto out_dist_unmap; + } + + err = gic_acpi_collect_gicr_base(); + if (err) + goto out_redist_unmap; + + gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); + if (!gsi_domain_handle) { + err = -ENOMEM; + goto out_redist_unmap; + } + + err = gic_init_bases(dist->base_address, acpi_data.dist_base, + acpi_data.redist_regs, acpi_data.nr_redist_regions, + 0, gsi_domain_handle); + if (err) + goto out_fwhandle_free; + + acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); + + if (static_branch_likely(&supports_deactivate_key)) + gic_acpi_setup_kvm_info(); + + return 0; + +out_fwhandle_free: + irq_domain_free_fwnode(gsi_domain_handle); +out_redist_unmap: + for (i = 0; i < acpi_data.nr_redist_regions; i++) + if (acpi_data.redist_regs[i].redist_base) + iounmap(acpi_data.redist_regs[i].redist_base); + kfree(acpi_data.redist_regs); +out_dist_unmap: + iounmap(acpi_data.dist_base); + return err; +} +IRQCHIP_ACPI_DECLARE(gic_phyt_2500, ACPI_MADT_TYPE_PHYTIUM_2500, + acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, + gic_acpi_init); +#endif diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9a7a74239eabb7cd3d2a3a077316d54833ec18cb..cefe81cbe493fd1c1a75dad3751751b680fc81c3 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -37,6 +37,10 @@ #include #include +#ifdef CONFIG_ARCH_PHYTIUM +#include +#endif + #include "irq-gic-common.h" #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) @@ -1725,6 +1729,11 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) msg->address_hi = upper_32_bits(addr); msg->data = its_get_event_id(d); +#ifdef CONFIG_ARCH_PHYTIUM + if (typeof_ft2000plus()) + return; +#endif + iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg); } diff --git a/drivers/irqchip/irq-loongarch-cpu.c b/drivers/irqchip/irq-loongarch-cpu.c index 9d8f2c40604310d6d92fdea667c2ea0322ece015..e174963dc7b905408ae3e5f3e76c3144ce6c3357 100644 --- a/drivers/irqchip/irq-loongarch-cpu.c +++ b/drivers/irqchip/irq-loongarch-cpu.c @@ -141,7 +141,12 @@ static int __init acpi_cascade_irqdomain_init(void) return 0; } -static int __init cpuintc_acpi_init(union acpi_subtable_headers *header, +struct irq_domain *get_cpudomain(void) +{ + return irq_domain; +} + +int __init cpuintc_acpi_init(union acpi_subtable_headers *header, const unsigned long end) { int ret; diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c index 1623cd77917523f42419cb958ecbc0ce32ba8809..c5c26b8e8d0c5cd6ca29d3ad9928f94d33ab3b48 100644 --- a/drivers/irqchip/irq-loongson-eiointc.c +++ b/drivers/irqchip/irq-loongson-eiointc.c @@ -23,6 +23,16 @@ #define EIOINTC_REG_ISR 0x1800 #define EIOINTC_REG_ROUTE 0x1c00 +#define EXTIOI_VIRT_FEATURES 0x40000000 +#define EXTIOI_HAS_VIRT_EXTENSION 0 +#define EXTIOI_HAS_ENABLE_OPTION 1 +#define EXTIOI_HAS_INT_ENCODE 2 +#define EXTIOI_HAS_CPU_ENCODE 3 +#define EXTIOI_VIRT_CONFIG 0x40000004 +#define EXTIOI_ENABLE 1 +#define EXTIOI_ENABLE_INT_ENCODE 2 +#define EXTIOI_ENABLE_CPU_ENCODE 3 + #define VEC_REG_COUNT 4 #define VEC_COUNT_PER_REG 64 #define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG) @@ -41,6 +51,7 @@ struct eiointc_priv { cpumask_t cpuspan_map; struct fwnode_handle *domain_handle; struct irq_domain *eiointc_domain; + bool cpu_encoded; }; static struct eiointc_priv *eiointc_priv[MAX_IO_PICS]; @@ -56,7 +67,9 @@ static void eiointc_enable(void) static int cpu_to_eio_node(int cpu) { - return cpu_logical_map(cpu) / CORES_PER_EIO_NODE; + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); + + return cpu_logical_map(cpu) / cores; } static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map) @@ -87,6 +100,20 @@ static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, static DEFINE_RAW_SPINLOCK(affinity_lock); +static void virt_extioi_set_irq_route(int irq, unsigned int cpu) +{ + int data; + + /* + * get irq route info for continuous 4 vectors + * and set affinity for specified vector + */ + data = iocsr_read32(EIOINTC_REG_ROUTE + (irq & ~3)); + data &= ~(0xff << ((irq & 3) * 8)); + data |= cpu_logical_map(cpu) << ((irq & 3) * 8); + iocsr_write32(data, EIOINTC_REG_ROUTE + (irq & ~3)); +} + static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force) { unsigned int cpu; @@ -109,16 +136,22 @@ static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *af vector = d->hwirq; regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2); - /* Mask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), - 0x0, priv->node * CORES_PER_EIO_NODE); - - /* Set route for target vector */ - eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); - - /* Unmask target vector */ - csr_any_send(regaddr, EIOINTC_ALL_ENABLE, - 0x0, priv->node * CORES_PER_EIO_NODE); + if (priv->cpu_encoded) { + iocsr_write32(EIOINTC_ALL_ENABLE & ~BIT(vector & 0x1F), regaddr); + virt_extioi_set_irq_route(vector, cpu); + iocsr_write32(EIOINTC_ALL_ENABLE, regaddr); + } else { + /* Mask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)), + 0x0, priv->node * CORES_PER_EIO_NODE); + + /* Set route for target vector */ + eiointc_set_irq_route(vector, cpu, priv->node, &priv->node_map); + + /* Unmask target vector */ + csr_any_send(regaddr, EIOINTC_ALL_ENABLE, + 0x0, priv->node * CORES_PER_EIO_NODE); + } irq_data_update_effective_affinity(d, cpumask_of(cpu)); @@ -145,13 +178,14 @@ static int eiointc_router_init(unsigned int cpu) uint32_t data; uint32_t node = cpu_to_eio_node(cpu); int index = eiointc_index(node); + int cores = (cpu_has_hypervisor ? MAX_CORES_PER_EIO_NODE : CORES_PER_EIO_NODE); if (index < 0) { pr_err("Error: invalid nodemap!\n"); return -1; } - if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) { + if ((cpu_logical_map(cpu) % cores) == 0) { eiointc_enable(); for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) { @@ -167,7 +201,9 @@ static int eiointc_router_init(unsigned int cpu) for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) { /* Route to Node-0 Core-0 */ - if (index == 0) + if (eiointc_priv[index]->cpu_encoded) + bit = cpu_logical_map(0); + else if (index == 0) bit = BIT(cpu_logical_map(0)); else bit = (eiointc_priv[index]->node << 4) | 1; @@ -198,6 +234,12 @@ static void eiointc_irq_dispatch(struct irq_desc *desc) for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) { pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3)); + + /* Skip handling if pending bitmap is zero */ + if (!pending) + continue; + + /* Clear the IRQs */ iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3)); while (pending) { int bit = __ffs(pending); @@ -241,7 +283,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, int ret; unsigned int i, type; unsigned long hwirq = 0; - struct eiointc *priv = domain->host_data; + struct eiointc_priv *priv = domain->host_data; ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); if (ret) @@ -304,23 +346,7 @@ static int eiointc_suspend(void) static void eiointc_resume(void) { - int i, j; - struct irq_desc *desc; - struct irq_data *irq_data; - eiointc_router_init(0); - - for (i = 0; i < nr_pics; i++) { - for (j = 0; j < eiointc_priv[0]->vec_count; j++) { - desc = irq_resolve_mapping(eiointc_priv[i]->eiointc_domain, j); - if (desc && desc->handle_irq && desc->handle_irq != handle_bad_irq) { - raw_spin_lock(&desc->lock); - irq_data = irq_domain_get_irq_data(eiointc_priv[i]->eiointc_domain, irq_desc_get_irq(desc)); - eiointc_set_irq_affinity(irq_data, irq_data->common->affinity, 0); - raw_spin_unlock(&desc->lock); - } - } - } } static struct syscore_ops eiointc_syscore_ops = { @@ -328,7 +354,7 @@ static struct syscore_ops eiointc_syscore_ops = { .resume = eiointc_resume, }; -static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, +int __init pch_pic_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header; @@ -341,7 +367,7 @@ static int __init pch_pic_parse_madt(union acpi_subtable_headers *header, return 0; } -static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, +int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end) { struct irq_domain *parent; @@ -379,7 +405,7 @@ static int __init acpi_cascade_irqdomain_init(void) static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, u64 node_map) { - int i; + int i, val; node_map = node_map ? node_map : -1ULL; for_each_possible_cpu(i) { @@ -399,6 +425,17 @@ static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq, return -ENOMEM; } + if (cpu_has_hypervisor) { + val = iocsr_read32(EXTIOI_VIRT_FEATURES); + if (val & BIT(EXTIOI_HAS_CPU_ENCODE)) { + val = iocsr_read32(EXTIOI_VIRT_CONFIG); + val |= BIT(EXTIOI_ENABLE_CPU_ENCODE); + iocsr_write32(val, EXTIOI_VIRT_CONFIG); + priv->cpu_encoded = true; + pr_info("loongson-extioi: enable cpu encodig\n"); + } + } + eiointc_priv[nr_pics++] = priv; eiointc_router_init(0); irq_set_chained_handler_and_data(parent_irq, eiointc_irq_dispatch, priv); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index e4b33aed1c97b3461e8f66f734f1785743a013f4..0262cbefe9ddbdcdc48550e3699447b5bf13a0cf 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -28,7 +28,7 @@ #define LIOINTC_INTC_CHIP_START 0x20 -#define LIOINTC_REG_INTC_STATUS (LIOINTC_INTC_CHIP_START + 0x20) +#define LIOINTC_REG_INTC_STATUS(cpuid) (LIOINTC_INTC_CHIP_START + 0x20 + (cpuid) * 8) #define LIOINTC_REG_INTC_EN_STATUS (LIOINTC_INTC_CHIP_START + 0x04) #define LIOINTC_REG_INTC_ENABLE (LIOINTC_INTC_CHIP_START + 0x08) #define LIOINTC_REG_INTC_DISABLE (LIOINTC_INTC_CHIP_START + 0x0c) @@ -217,7 +217,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision, goto out_free_priv; for (i = 0; i < LIOINTC_NUM_CORES; i++) - priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS; + priv->core_isr[i] = base + LIOINTC_REG_INTC_STATUS(i); for (i = 0; i < LIOINTC_NUM_PARENT; i++) priv->handler[i].parent_int_map = parent_int_map[i]; diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c index 63db8e2172e017031396003c9a1bb32be8bf59ab..3b150b6121fc3b292ff6a903441ec91d28715066 100644 --- a/drivers/irqchip/irq-loongson-pch-pic.c +++ b/drivers/irqchip/irq-loongson-pch-pic.c @@ -33,6 +33,10 @@ #define PIC_COUNT (PIC_COUNT_PER_REG * PIC_REG_COUNT) #define PIC_REG_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG) #define PIC_REG_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG) +#define PIC_COUNT_PER_REG64 64 +#define PIC_REG64_COUNT 1 +#define PIC_REG64_IDX(irq_id) ((irq_id) / PIC_COUNT_PER_REG64) +#define PIC_REG64_BIT(irq_id) ((irq_id) % PIC_COUNT_PER_REG64) static int nr_pics; @@ -52,6 +56,11 @@ static struct pch_pic *pch_pic_priv[MAX_IO_PICS]; struct fwnode_handle *pch_pic_handle[MAX_IO_PICS]; +struct irq_domain *get_pchpic_irq_domain(void) +{ + return pch_pic_priv[0]->pic_domain; +} + static void pch_pic_bitset(struct pch_pic *priv, int offset, int bit) { u32 reg; @@ -88,8 +97,8 @@ static void pch_pic_unmask_irq(struct irq_data *d) { struct pch_pic *priv = irq_data_get_irq_chip_data(d); - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); irq_chip_unmask_parent(d); pch_pic_bitclr(priv, PCH_PIC_MASK, d->hwirq); @@ -136,8 +145,8 @@ static void pch_pic_ack_irq(struct irq_data *d) reg = readl(priv->base + PCH_PIC_EDGE + PIC_REG_IDX(d->hwirq) * 4); if (reg & BIT(PIC_REG_BIT(d->hwirq))) { - writel(BIT(PIC_REG_BIT(d->hwirq)), - priv->base + PCH_PIC_CLR + PIC_REG_IDX(d->hwirq) * 4); + writeq(BIT(PIC_REG64_BIT(d->hwirq)), + priv->base + PCH_PIC_CLR + PIC_REG64_IDX(d->hwirq) * 8); } irq_chip_ack_parent(d); } @@ -230,13 +239,15 @@ static void pch_pic_reset(struct pch_pic *priv) for (i = 0; i < PIC_REG_COUNT; i++) { /* Clear IRQ cause registers, mask all interrupts */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_MASK + 4 * i); - writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_CLR + 4 * i); /* Clear auto bounce, we don't need that */ writel_relaxed(0, priv->base + PCH_PIC_AUTO0 + 4 * i); writel_relaxed(0, priv->base + PCH_PIC_AUTO1 + 4 * i); /* Enable HTMSI transformer */ writel_relaxed(0xFFFFFFFF, priv->base + PCH_PIC_HTMSI_EN + 4 * i); } + + for (i = 0; i < PIC_REG64_COUNT; i++) + writeq_relaxed((u64)-1, priv->base + PCH_PIC_CLR + 8 * i); } static int pch_pic_suspend(void) diff --git a/drivers/irqchip/irq-sunway-cpu.c b/drivers/irqchip/irq-sunway-cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..ff7455c0f3ec282c4a55ace6c92ad55a0aa8d9e6 --- /dev/null +++ b/drivers/irqchip/irq-sunway-cpu.c @@ -0,0 +1,213 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +#include +#include +#include +#include + +static void handle_intx(unsigned int offset) +{ + struct pci_controller *hose; + unsigned long value; + + hose = hose_head; + for (hose = hose_head; hose; hose = hose->next) { + value = read_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7)); + if (value >> 63) { + value = value & (~(1UL << 62)); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + handle_irq(hose->int_irq); + value = value | (1UL << 62); + write_piu_ior0(hose->node, hose->index, INTACONFIG + (offset << 7), value); + } + + if (IS_ENABLED(CONFIG_PCIE_PME)) { + value = read_piu_ior0(hose->node, hose->index, PMEINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, PMEINTCONFIG, value); + } + } + + if (IS_ENABLED(CONFIG_PCIEAER)) { + value = read_piu_ior0(hose->node, hose->index, AERERRINTCONFIG); + if (value >> 63) { + handle_irq(hose->service_irq); + write_piu_ior0(hose->node, hose->index, AERERRINTCONFIG, value); + } + } + + if (hose->iommu_enable) { + value = read_piu_ior0(hose->node, hose->index, IOMMUEXCPT_STATUS); + if (value >> 63) + handle_irq(hose->int_irq); + } + } +} + +static void handle_device_interrupt(unsigned long irq_info) +{ + unsigned int i; + + if (is_guest_or_emul()) { + handle_irq(irq_info); + return; + } + + for (i = 0; i < 4; i++) { + if ((irq_info >> i) & 0x1) + handle_intx(i); + } +} + +/* Performance counter hook. A module can override this to do something useful. */ +static void dummy_perf(unsigned long vector, struct pt_regs *regs) +{ + irq_err_count++; + pr_crit("Performance counter interrupt!\n"); +} + +void (*perf_irq)(unsigned long vector, struct pt_regs *regs) = dummy_perf; +EXPORT_SYMBOL(perf_irq); + +static void handle_fault_int(void) +{ + int node; + unsigned long value; + + node = __this_cpu_read(hard_node_id); + pr_info("enter fault int, si_fault_stat = %#lx\n", + sw64_io_read(node, SI_FAULT_STAT)); + sw64_io_write(node, SI_FAULT_INT_EN, 0); + sw64_io_write(node, DLI_RLTD_FAULT_INTEN, 0); +#if defined(CONFIG_UNCORE_XUELANG) + value = 0; +#elif defined(CONFIG_UNCORE_JUNZHANG) + value = sw64_io_read(node, FAULT_INT_CONFIG); + value |= (1 << 8); +#endif + __io_write_fault_int_en(node, value); +} + +static void handle_mt_int(void) +{ + pr_info("enter mt int\n"); +} + +static void handle_nmi_int(void) +{ + pr_info("enter nmi int\n"); +} + +static void handle_dev_int(struct pt_regs *regs) +{ + unsigned long config_val, val, stat; + int node = 0; + unsigned int hwirq; + + config_val = sw64_io_read(node, DEV_INT_CONFIG); + val = config_val & (~(1UL << 8)); + sw64_io_write(node, DEV_INT_CONFIG, val); + stat = sw64_io_read(node, MCU_DVC_INT); + + while (stat) { + hwirq = ffs(stat) - 1; + generic_handle_domain_irq(NULL, hwirq); + stat &= ~(1UL << hwirq); + } + /*do handle irq */ + + sw64_io_write(node, DEV_INT_CONFIG, config_val); +} + +asmlinkage void do_entInt(unsigned long type, unsigned long vector, + unsigned long irq_arg, struct pt_regs *regs) +{ + struct pt_regs *old_regs; + extern char __idle_start[], __idle_end[]; + + if (is_guest_or_emul()) { + if ((type & 0xffff) > 15) { + vector = type; + if (vector == 16) + type = INT_INTx; + else + type = INT_MSI; + } + } + + /* restart idle routine if it is interrupted */ + if (regs->pc > (u64)__idle_start && regs->pc < (u64)__idle_end) + regs->pc = (u64)__idle_start; + + switch (type & 0xffff) { + case INT_MSI: + old_regs = set_irq_regs(regs); + handle_pci_msi_interrupt(type, vector, irq_arg); + set_irq_regs(old_regs); + return; + case INT_INTx: + old_regs = set_irq_regs(regs); + handle_device_interrupt(vector); + set_irq_regs(old_regs); + return; + + case INT_IPI: +#ifdef CONFIG_SMP + handle_ipi(regs); + return; +#else + irq_err_count++; + pr_crit("Interprocessor interrupt? You must be kidding!\n"); +#endif + break; + case INT_RTC: + old_regs = set_irq_regs(regs); + sw64_timer_interrupt(); + set_irq_regs(old_regs); + return; + case INT_VT_SERIAL: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_VT_HOTPLUG: + old_regs = set_irq_regs(regs); + handle_irq(type); + set_irq_regs(old_regs); + return; + case INT_PC0: + perf_irq(PMC_PC0, regs); + return; + case INT_PC1: + perf_irq(PMC_PC1, regs); + return; + case INT_DEV: + old_regs = set_irq_regs(regs); + handle_dev_int(regs); + set_irq_regs(old_regs); + return; + case INT_FAULT: + old_regs = set_irq_regs(regs); + handle_fault_int(); + set_irq_regs(old_regs); + return; + case INT_MT: + old_regs = set_irq_regs(regs); + handle_mt_int(); + set_irq_regs(old_regs); + return; + case INT_NMI: + old_regs = set_irq_regs(regs); + handle_nmi_int(); + set_irq_regs(old_regs); + return; + default: + pr_crit("Hardware intr %ld %lx? uh?\n", type, vector); + } + pr_crit("PC = %016lx PS = %04lx\n", regs->pc, regs->ps); +} +EXPORT_SYMBOL(do_entInt); diff --git a/drivers/irqchip/irq-sunway-msi-v2.c b/drivers/irqchip/irq-sunway-msi-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..36790dfedb33a71d8ed2bd4f0f41eb2075d01bc6 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-v2.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + int rcid; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + rcid = cpu_to_rcid(chip_data->dst_cpu); + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = + (unsigned int)chip_data->msiaddr | + (rcid_to_msicid(rcid) << MSI_ADDR_DEST_ID_SHIFT); + msg->data = chip_data->vector; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static bool find_free_cpu_vectors(const struct cpumask *search_mask, int *found_cpu, int *found_vector, unsigned int nr_irqs) +{ + int i, vector, cpu; + bool found = false, find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + for (vector = 0; vector < 256; vector++) { + for (i = 0; i < nr_irqs; i++) + if (per_cpu(vector_irq, cpu)[vector + i]) + break; + + if (i == nr_irqs) { + found = true; + *found_cpu = cpu; + *found_vector = vector; + return found; + } + + vector += i; + } + + cpu = cpumask_next(cpu, search_mask); + if (cpu < nr_cpu_ids) + goto try_again; + else { + if (find_once_global) { + pr_warn("No global free vectors\n"); + return found; + } + pr_warn("No local free vectors\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags; + int vector, cpu; + int i; + struct msi_msg msg; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (cdata->multi_msi > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, cdata->multi_msi)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } else { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + spin_lock(&cdata->cdata_lock); + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cpu)[vector + i] = entry->irq + i; + BUG_ON(irq_chip_compose_msi_msg(irqd, &msg)); + __pci_write_msi_msg(entry, &msg); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy(irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int node; + int i, vector, cpu; + unsigned long msiaddr; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (type == IRQ_ALLOC_TYPE_MSI && nr_irqs > 1) { + if (!find_free_cpu_vectors(&searchmask, &cpu, + &vector, nr_irqs)) + return -ENOSPC; + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + for (i = 0; i < nr_irqs; i++) { + per_cpu(vector_irq, cpu)[vector + i] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + irq_data->chip_data = cdata; + } + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = nr_irqs; + cdata->move_in_progress = false; + } else { + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msiaddr = MSIX_MSG_ADDR; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->multi_msi = 1; + cdata->move_in_progress = false; + } + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, enum irq_alloc_type type) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, type); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i, j; + struct irq_data *irq_data; + unsigned long flags; + unsigned int multi_msi; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_domain_reset_irq_data(irq_data); + multi_msi = cdata->multi_msi; + for (j = 0; j < multi_msi; j++) + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector + j] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + if (multi_msi > 1) + break; + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + for_each_pci_msi_entry(desc, dev) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + enum irq_alloc_type msi_type; + + if (arg == NULL) + return -ENODEV; + msi_type = info->type; + err = assign_irq_vector(virq, nr_irqs, domain, msi_type); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct msi_desc *desc = first_pci_msi_entry(pdev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (desc->msi_attrib.is_msix) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + err = msi_domain_alloc_irqs(msi_default_domain, &pdev->dev, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (cdata->dst_cpu == cpu) { + if (vector >= cdata->vector && + vector < cdata->vector + cdata->multi_msi) { + int i; + + raw_spin_lock(&vector_lock); + cdata->move_in_progress = false; + for (i = 0; i < cdata->multi_msi; i++) + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector + i] = 0; + raw_spin_unlock(&vector_lock); + } + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, msi_index = 0; + int cpu, vector_index = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + int irq = 0; + + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sunway-msi-vt.c b/drivers/irqchip/irq-sunway-msi-vt.c new file mode 100644 index 0000000000000000000000000000000000000000..df8c7d72671b43b7f1d7ce0c6d239c0810afc7fe --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi-vt.c @@ -0,0 +1,280 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +static DEFINE_RAW_SPINLOCK(vector_lock); + +static void __vt_irq_msi_compose_msg(struct sw64_msi_chip_data *cdata, + struct msi_msg *msg) +{ + msg->address_hi = (u32)(VT_MSIX_MSG_ADDR >> 32); + msg->address_lo = (u32)(VT_MSIX_MSG_ADDR & 0xffffffff) + | VT_MSIX_ADDR_DEST_ID(cdata->dst_cpu); + msg->data = cdata->vector; +} + +static void vt_irq_msi_compose_msg(struct irq_data *irqd, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *cdata; + + cdata = irqd->chip_data; + __vt_irq_msi_compose_msg(cdata, msg); +} + +static void vt_irq_msi_update_msg(struct irq_data *irqd, + struct sw64_msi_chip_data *cdata) +{ + struct msi_msg msg[2] = { [1] = { }, }; + + __vt_irq_msi_compose_msg(cdata, msg); + pci_write_msi_msg(irqd->irq, msg); +} + +static int +vt_set_affinity(struct irq_data *irqd, const struct cpumask *cpumask, + bool force) +{ + struct sw64_msi_chip_data *cdata; + struct cpumask searchmask; + int cpu, vector; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target coreid is already in the new mask, + * and is online then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + spin_lock(&cdata->cdata_lock); + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->prev_vector = cdata->vector; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + vt_irq_msi_update_msg(irqd, irqd->chip_data); + + return 0; +} + +static struct irq_chip vt_pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = sw64_irq_noop, + .irq_compose_msi_msg = vt_irq_msi_compose_msg, + .irq_set_affinity = vt_set_affinity, +}; + +int chip_setup_vt_msix_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + int virq, val_node = 0; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long flags, node, rc_index; + const struct cpumask *mask; + + struct cpumask searchmask; + int cpu, vector; + + node = hose->node; + rc_index = hose->index; + mask = cpumask_of_node(node); + + raw_spin_lock_irqsave(&vector_lock, flags); + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq; + + irq_set_msi_desc(virq, desc); + irq_set_chip_and_handler_name(virq, &vt_pci_msi_controller, + handle_edge_irq, "edge"); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msix_irq); + +int chip_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + unsigned long node, rc_index; + int virq = -1, val_node = 0; + unsigned long flags; + + const struct cpumask *mask; + struct cpumask searchmask; + int i, vector, cpu; + + if (type == PCI_CAP_ID_MSI && nvec > 32) + return 1; + + node = hose->node; + rc_index = hose->index; + raw_spin_lock_irqsave(&vector_lock, flags); + msi_for_each_desc(desc, &(dev->dev), MSI_DESC_ALL) { + /* Find unused msi config reg in PIU-IOR0 */ + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + virq = irq_alloc_descs_from(NR_IRQS_LEGACY, desc->nvec_used, val_node); + if (virq < 0) { + pr_err("Failed to allocate IRQ(base 16, count %d)\n", desc->nvec_used); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return virq; + } + + irq_data = irq_get_irq_data(virq); + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < desc->nvec_used; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + cdata = kzalloc(sizeof(*cdata), GFP_KERNEL); + if (!cdata) + return -ENOMEM; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + irq_set_msi_desc_off(virq, i, desc); + irq_set_chip_and_handler_name(virq + i, &vt_pci_msi_controller, handle_edge_irq, "edge"); + irq_data = irq_get_irq_data(virq + i); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + + irq_data->chip_data = cdata; + + vt_irq_msi_update_msg(irq_data, irq_data->chip_data); + } + } + + raw_spin_unlock_irqrestore(&vector_lock, flags); + return 0; +} +EXPORT_SYMBOL(chip_setup_vt_msi_irqs); + +void vt_sw64_vector_free_irqs(unsigned int virq, unsigned int nr_irqs) +{ + int i; + unsigned long flags; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_get_irq_data(virq + i); + if (irq_data && irq_data->chip_data) { + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + irq_data->hwirq = 0; + irq_data->chip = &no_irq_chip; + irq_data->chip_data = NULL; + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +int __arch_setup_vt_msix_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *entry; + int ret; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + ret = chip_setup_vt_msix_irq(dev, entry); + if (ret) + return ret; + } + + return 0; +} + +int sw64_setup_vt_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = 0; + + if (type == PCI_CAP_ID_MSI) + ret = chip_setup_vt_msi_irqs(dev, nvec, type); + else if (type == PCI_CAP_ID_MSIX) + ret = __arch_setup_vt_msix_irqs(dev, nvec, type); + else + pr_info("SW arch do not identify ID:%d\n", type); + + return ret; +} diff --git a/drivers/irqchip/irq-sunway-msi.c b/drivers/irqchip/irq-sunway-msi.c new file mode 100644 index 0000000000000000000000000000000000000000..060aa96711b7a96497d3704237b122c31103be03 --- /dev/null +++ b/drivers/irqchip/irq-sunway-msi.c @@ -0,0 +1,472 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include +#include + +static struct irq_domain *msi_default_domain; +static DEFINE_RAW_SPINLOCK(vector_lock); +DEFINE_PER_CPU(vector_irq_t, vector_irq) = { + [0 ... PERCPU_MSI_IRQS - 1] = 0, +}; + +static struct sw64_msi_chip_data *alloc_sw_msi_chip_data(struct irq_data *irq_data) +{ + struct sw64_msi_chip_data *data; + int node; + + node = irq_data_get_node(irq_data); + data = kzalloc_node(sizeof(*data), GFP_KERNEL, node); + if (!data) + return NULL; + spin_lock_init(&data->cdata_lock); + return data; +} + +static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct sw64_msi_chip_data *chip_data; + + chip_data = irq_data_get_irq_chip_data(data->parent_data); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = chip_data->msi_config_index; +} + +bool find_free_cpu_vector(const struct cpumask *search_mask, + int *found_cpu, int *found_vector) +{ + int vector, max_vector, cpu; + bool find_once_global = false; + + cpu = cpumask_first(search_mask); +try_again: + if (is_guest_or_emul()) { + vector = IRQ_PENDING_MSI_VECTORS_SHIFT; + max_vector = SWVM_IRQS; + } else { + vector = 0; + max_vector = 256; + } + for (; vector < max_vector; vector++) { + while (per_cpu(vector_irq, cpu)[vector]) { + cpu = cpumask_next(cpu, search_mask); + if (cpu >= nr_cpu_ids) { + if (vector == 255) { + if (find_once_global) { + pr_warn("No global free vector\n"); + return false; + } + pr_warn("No local free vector\n"); + search_mask = cpu_online_mask; + cpu = cpumask_first(search_mask); + find_once_global = true; + goto try_again; + } + cpu = cpumask_first(search_mask); + break; + } + } + if (!per_cpu(vector_irq, cpu)[vector]) + break; + } + + *found_cpu = cpu; + *found_vector = vector; + return true; +} + +static unsigned long set_piu_msi_config(struct pci_controller *hose, int cpu, + int msiconf_index, int vector) +{ + unsigned int reg; + unsigned long msi_config; + int phy_cpu; + + msi_config = (1UL << 62) | ((unsigned long)vector << 10); + phy_cpu = cpu_to_rcid(cpu); + msi_config |= ((phy_cpu >> 5) << 6) | (phy_cpu & 0x1f); + reg = MSICONFIG0 + (unsigned long)(msiconf_index << 7); + write_piu_ior0(hose->node, hose->index, reg, msi_config); + msi_config = read_piu_ior0(hose->node, hose->index, reg); + set_bit(msiconf_index, hose->piu_msiconfig); + + return msi_config; +} + +static int sw64_set_affinity(struct irq_data *d, const struct cpumask *cpumask, bool force) +{ + struct sw64_msi_chip_data *cdata; + struct pci_controller *hose; + struct pci_dev *pdev; + struct irq_data *irqd; + struct msi_desc *entry; + struct cpumask searchmask; + unsigned long flags, msi_config; + int vector, cpu; + + /* Is this valid ? */ + if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) + return -EINVAL; + + irqd = irq_domain_get_irq_data(msi_default_domain->parent, d->irq); + /* Don't do anything if the interrupt isn't started */ + if (!irqd_is_started(irqd)) + return IRQ_SET_MASK_OK; + + cdata = irqd->chip_data; + if (!cdata) + return -ENOMEM; + + /* + * If existing target cpu is already in the new mask and is online + * then do nothing. + */ + if (cpu_online(cdata->dst_cpu) && cpumask_test_cpu(cdata->dst_cpu, cpumask)) + return IRQ_SET_MASK_OK; + + raw_spin_lock_irqsave(&vector_lock, flags); + + cpumask_and(&searchmask, cpumask, cpu_online_mask); + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) { + raw_spin_unlock_irqrestore(&vector_lock, flags); + return -ENOSPC; + } + + /* update new setting */ + entry = irq_get_msi_desc(irqd->irq); + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + spin_lock(&cdata->cdata_lock); + per_cpu(vector_irq, cpu)[vector] = irqd->irq; + msi_config = set_piu_msi_config(hose, cpu, cdata->msi_config_index, vector); + cdata->prev_vector = cdata->vector; + cdata->prev_cpu = cdata->dst_cpu; + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->msi_config = msi_config; + cdata->move_in_progress = true; + spin_unlock(&cdata->cdata_lock); + cpumask_copy((struct cpumask *)irq_data_get_affinity_mask(irqd), &searchmask); + + raw_spin_unlock_irqrestore(&vector_lock, flags); + + return 0; +} + +static void chip_irq_ack(struct irq_data *data) +{ +} + +static struct irq_chip pci_msi_controller = { + .name = "PCI-MSI", + .irq_unmask = pci_msi_unmask_irq, + .irq_mask = pci_msi_mask_irq, + .irq_ack = chip_irq_ack, + .irq_compose_msi_msg = irq_msi_compose_msg, + .flags = IRQCHIP_SKIP_SET_WAKE, + .irq_set_affinity = sw64_set_affinity, +}; + +static int __assign_irq_vector(int virq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + struct irq_data *irq_data; + const struct cpumask *mask; + struct cpumask searchmask; + struct sw64_msi_chip_data *cdata; + int msiconf_index, node; + int i, vector, cpu; + unsigned long msi_config; + int start_index; + + if (unlikely((nr_irqs > 1) && (!is_power_of_2(nr_irqs)))) + nr_irqs = __roundup_pow_of_two(nr_irqs); + + msiconf_index = bitmap_find_next_zero_area(hose->piu_msiconfig, 256, 0, + nr_irqs, nr_irqs - 1); + + if (msiconf_index >= 256) { + pr_warn("No free msi on PIU!\n"); + return -ENOSPC; + } + + start_index = msiconf_index; + irq_data = irq_domain_get_irq_data(domain, virq); + BUG_ON(!irq_data); + irq_data->chip = &pci_msi_controller; + + if (irqd_affinity_is_managed(irq_data)) { + mask = irq_data_get_affinity_mask(irq_data); + cpumask_and(&searchmask, mask, cpu_online_mask); + } else { + node = irq_data_get_node(irq_data); + cpumask_copy(&searchmask, cpumask_of_node(node)); + } + + if (cpumask_first(&searchmask) >= nr_cpu_ids) + cpumask_copy(&searchmask, cpu_online_mask); + + for (i = 0; i < nr_irqs; i++) { + if (!find_free_cpu_vector(&searchmask, &cpu, &vector)) + return -ENOSPC; + + per_cpu(vector_irq, cpu)[vector] = virq + i; + + if (i) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_data->chip = &pci_msi_controller; + } + + cdata = alloc_sw_msi_chip_data(irq_data); + if (!cdata) { + pr_warn("error alloc irq chip data\n"); + return -ENOMEM; + } + + irq_data->chip_data = cdata; + msiconf_index = start_index + i; + msi_config = set_piu_msi_config(hose, cpu, msiconf_index, vector); + + cdata->dst_cpu = cpu; + cdata->vector = vector; + cdata->rc_index = hose->index; + cdata->rc_node = hose->node; + cdata->msi_config = msi_config; + cdata->msi_config_index = msiconf_index; + cdata->prev_cpu = cpu; + cdata->prev_vector = vector; + cdata->move_in_progress = false; + } + return 0; +} + +static int assign_irq_vector(int irq, unsigned int nr_irqs, + struct irq_domain *domain, struct pci_controller *hose) +{ + int err; + unsigned long flags; + + raw_spin_lock_irqsave(&vector_lock, flags); + err = __assign_irq_vector(irq, nr_irqs, domain, hose); + raw_spin_unlock_irqrestore(&vector_lock, flags); + return err; +} + +static void sw64_vector_free_irqs(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + int i; + struct irq_data *irq_data; + struct pci_dev *pdev; + unsigned long flags; + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + struct sw64_msi_chip_data *cdata; + struct msi_desc *entry; + struct pci_controller *hose; + + raw_spin_lock_irqsave(&vector_lock, flags); + cdata = irq_data->chip_data; + entry = irq_get_msi_desc(virq + i); + if (entry) { + pdev = (struct pci_dev *)msi_desc_to_pci_dev(entry); + hose = pci_bus_to_pci_controller(pdev->bus); + clear_bit(cdata->msi_config_index, hose->piu_msiconfig); + } + irq_domain_reset_irq_data(irq_data); + per_cpu(vector_irq, cdata->dst_cpu)[cdata->vector] = 0; + kfree(cdata); + raw_spin_unlock_irqrestore(&vector_lock, flags); + } + } +} + +static void sw64_irq_free_descs(unsigned int virq, unsigned int nr_irqs) +{ + if (is_guest_or_emul()) { + vt_sw64_vector_free_irqs(virq, nr_irqs); + return irq_free_descs(virq, nr_irqs); + } + + return irq_domain_free_irqs(virq, nr_irqs); +} + +void arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + if (desc->irq) { + for (i = 0; i < desc->nvec_used; i++) + sw64_irq_free_descs(desc->irq + i, 1); + desc->irq = 0; + } + } +} + +static int sw64_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int err; + struct irq_alloc_info *info = arg; + struct pci_controller *hose; + + if (arg == NULL) + return -ENODEV; + + hose = pci_bus_to_pci_controller(info->msi_dev->bus); + err = assign_irq_vector(virq, nr_irqs, domain, hose); + if (err) + goto error; + return 0; +error: + sw64_vector_free_irqs(domain, virq, nr_irqs); + return err; +} + +static int pci_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *arg) +{ + struct pci_dev *pdev = to_pci_dev(dev); + + memset(arg, 0, sizeof(*arg)); + arg->msi_dev = pdev; + if (pdev->msix_enabled) + arg->type = IRQ_ALLOC_TYPE_MSIX; + else + arg->type = IRQ_ALLOC_TYPE_MSI; + return 0; +} + +static struct msi_domain_ops pci_msi_domain_ops = { + .msi_prepare = pci_msi_prepare, +}; + +static struct msi_domain_info pci_msi_domain_info = { + .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX, + .ops = &pci_msi_domain_ops, + .chip = &pci_msi_controller, + .handler = handle_edge_irq, + .handler_name = "edge", +}; + +static int sw64_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) +{ + irq_set_chip_and_handler(virq, &sw64_irq_chip, handle_level_irq); + irq_set_status_flags(virq, IRQ_LEVEL); + return 0; +} + +const struct irq_domain_ops sw64_msi_domain_ops = { + .map = sw64_irq_map, + .alloc = sw64_vector_alloc_irqs, + .free = sw64_vector_free_irqs, +}; + +int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +{ + int err; + + if (is_guest_or_emul()) + return sw64_setup_vt_msi_irqs(pdev, nvec, type); + + if (!msi_default_domain) + return -EIO; + + err = msi_domain_alloc_irqs_all_locked(&pdev->dev, MSI_DEFAULT_DOMAIN, nvec); + return err; +} + +void arch_init_msi_domain(struct irq_domain *parent) +{ + struct irq_domain *sw64_irq_domain; + + if (is_guest_or_emul()) + return; + + sw64_irq_domain = irq_domain_add_tree(NULL, &sw64_msi_domain_ops, NULL); + BUG_ON(sw64_irq_domain == NULL); + irq_set_default_host(sw64_irq_domain); + msi_default_domain = pci_msi_create_irq_domain(NULL, + &pci_msi_domain_info, sw64_irq_domain); + if (!msi_default_domain) + pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n"); +} + +int pcibios_device_add(struct pci_dev *dev) +{ + if (msi_default_domain) + dev_set_msi_domain(&dev->dev, msi_default_domain); + return 0; +} + +static void irq_move_complete(struct sw64_msi_chip_data *cdata, int cpu, int vector) +{ + if (likely(!cdata->move_in_progress)) + return; + if (vector == cdata->vector && cdata->dst_cpu == cpu) { + raw_spin_lock(&vector_lock); + cdata->move_in_progress = 0; + per_cpu(vector_irq, cdata->prev_cpu)[cdata->prev_vector] = 0; + raw_spin_unlock(&vector_lock); + } +} + +void handle_pci_msi_interrupt(unsigned long type, unsigned long vector, unsigned long pci_msi1_addr) +{ + int i, irq, piu_index, msi_index = 0; + int cpu, vector_index = 0; + unsigned long value = 0; + unsigned long int_pci_msi[3]; + unsigned long *ptr; + struct irq_data *irq_data; + struct sw64_msi_chip_data *cdata; + + if (is_guest_or_emul()) { + cpu = smp_processor_id(); + irq = per_cpu(vector_irq, cpu)[vector]; + handle_irq(irq); + return; + } + + ptr = (unsigned long *)pci_msi1_addr; + int_pci_msi[0] = *ptr; + int_pci_msi[1] = *(ptr + 1); + int_pci_msi[2] = *(ptr + 2); + + cpu = smp_processor_id(); + + for (i = 0; i < 4; i++) { + vector_index = i * 64; + while (vector != 0) { + msi_index = find_next_bit(&vector, 64, msi_index); + if (msi_index == 64) { + msi_index = 0; + continue; + } + + irq = per_cpu(vector_irq, cpu)[vector_index + msi_index]; + irq_data = irq_domain_get_irq_data(msi_default_domain->parent, irq); + cdata = irq_data_get_irq_chip_data(irq_data); + spin_lock(&cdata->cdata_lock); + irq_move_complete(cdata, cpu, vector_index + msi_index); + piu_index = cdata->msi_config_index; + value = cdata->msi_config | (1UL << 63); + write_piu_ior0(cdata->rc_node, cdata->rc_index, MSICONFIG0 + (piu_index << 7), value); + spin_unlock(&cdata->cdata_lock); + handle_irq(irq); + + vector = vector & (~(1UL << msi_index)); + } + + vector = int_pci_msi[i % 3]; + } +} diff --git a/drivers/irqchip/irq-sw64-intc-v2.c b/drivers/irqchip/irq-sw64-intc-v2.c new file mode 100644 index 0000000000000000000000000000000000000000..bc2c8ef3ed2fca25a4742c8f4232f27854bea948 --- /dev/null +++ b/drivers/irqchip/irq-sw64-intc-v2.c @@ -0,0 +1,89 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include + +static void fake_irq_mask(struct irq_data *data) +{ +} + +static void fake_irq_unmask(struct irq_data *data) +{ +} + +static struct irq_chip onchip_intc = { + .name = "SW fake Intc", + .irq_mask = fake_irq_mask, + .irq_unmask = fake_irq_unmask, +}; + +static int sw64_intc_domain_map(struct irq_domain *d, unsigned int irq, + irq_hw_number_t hw) +{ + + irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + return 0; +} + +static const struct irq_domain_ops sw64_intc_domain_ops = { + .xlate = irq_domain_xlate_onecell, + .map = sw64_intc_domain_map, +}; + +#ifdef CONFIG_OF +static struct irq_domain *root_domain; + +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) +{ + + int node = 0; + int hwirq = 0, nirq = 8; + + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_linear(intc, 8, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + for (hwirq = 0 ; hwirq < nirq ; hwirq++) + irq_create_mapping(root_domain, hwirq); + + /*enable MCU_DVC_INT_EN*/ + sw64_io_write(node, MCU_DVC_INT_EN, 0xff); + + return 0; +} + +IRQCHIP_DECLARE(sw64_intc, "sw64,sw6_irq_controller", init_onchip_IRQ); + +static int __init +init_onchip_vt_IRQ(struct device_node *intc, struct device_node *parent) +{ + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); + + root_domain = irq_domain_add_legacy(intc, 16, 0, 0, + &sw64_intc_domain_ops, NULL); + + if (!root_domain) + panic("root irq domain not avail\n"); + + /* with this we don't need to export root_domain */ + irq_set_default_host(root_domain); + + return 0; +} + +IRQCHIP_DECLARE(sw64_vt_intc, "sw64,sw6_irq_vt_controller", init_onchip_vt_IRQ); +#endif diff --git a/drivers/irqchip/irq-sw64-lpc-intc.c b/drivers/irqchip/irq-sw64-lpc-intc.c new file mode 100644 index 0000000000000000000000000000000000000000..1cbf8747824232bb62bce64432320ea50ad50451 --- /dev/null +++ b/drivers/irqchip/irq-sw64-lpc-intc.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#define LPC_NR_IRQS 16 +#define LPC_IRQ 0x4 +#define LPC_IRQ_MASK 0x8 + +struct lpc_intc_data { + struct irq_domain *domain; + struct irq_chip_generic *gc; +}; + +static void lpc_irq_mask_ack(struct irq_data *data) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); + struct irq_chip_type *ct = irq_data_get_chip_type(data); + unsigned int mask = data->mask; + + irq_gc_lock(gc); + *ct->mask_cache |= mask; + irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + +static void lpc_irq_handler(struct irq_desc *desc) +{ + struct lpc_intc_data *b = irq_desc_get_handler_data(desc); + struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int irq; + u32 status; + + chained_irq_enter(chip, desc); + + status = irq_reg_readl(b->gc, LPC_IRQ); + + if (status == 0) { + raw_spin_lock(&desc->lock); + handle_bad_irq(desc); + raw_spin_unlock(&desc->lock); + goto out; + } + + while (status) { + irq = __ffs(status); + status &= ~BIT(irq); + generic_handle_irq(irq_find_mapping(b->domain, irq)); + } + +out: + chained_irq_exit(chip, desc); +} + +static int __init lpc_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + unsigned int set = IRQ_NOPROBE | IRQ_LEVEL; + struct lpc_intc_data *data; + struct irq_chip_type *ct; + int parent_irq, ret; + void __iomem *base; + int hwirq = 0; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + base = of_iomap(np, 0); + if (!base) { + pr_err("failed to remap lpc intc registers\n"); + ret = -ENOMEM; + goto out_free; + } + + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { + pr_err("failed to find parent interrupt\n"); + ret = -EINVAL; + goto out_unmap; + } + + data->domain = irq_domain_add_linear(np, LPC_NR_IRQS, + &irq_generic_chip_ops, NULL); + if (!data->domain) { + ret = -ENOMEM; + goto out_unmap; + } + + /* Allocate a single Generic IRQ chip for this node */ + ret = irq_alloc_domain_generic_chips(data->domain, 16, 1, np->name, + handle_level_irq, 0, set, + IRQ_GC_INIT_MASK_CACHE); + if (ret) { + pr_err("failed to allocate generic irq chip\n"); + goto out_free_domain; + } + + /* Set the IRQ chaining logic */ + irq_set_chained_handler_and_data(parent_irq, + lpc_irq_handler, data); + + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + + ct = data->gc->chip_types; + + ct->regs.ack = LPC_IRQ; + ct->regs.mask = LPC_IRQ_MASK; + ct->chip.irq_mask = irq_gc_mask_set_bit; + ct->chip.irq_unmask = irq_gc_mask_clr_bit; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = lpc_irq_mask_ack; + + for (hwirq = 0 ; hwirq < 16 ; hwirq++) + irq_create_mapping(data->domain, hwirq); + + /* Enable LPC interrupts */ + writel(0xffffebdd, base + LPC_IRQ_MASK); + + return 0; + +out_free_domain: + irq_domain_remove(data->domain); +out_unmap: + iounmap(base); +out_free: + kfree(data); + return ret; +} +IRQCHIP_DECLARE(sw_lpc_intc, "sw64,lpc_intc", lpc_intc_of_init); diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 90ce58fd629e5f32dd6cb207e60544b2718ee5c7..7b33746742bbff0e2e1594f949658f7c2e58ce75 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -604,6 +604,22 @@ config LPC_SCH LPC bridge function of the Intel SCH provides support for System Management Bus and General Purpose I/O. +config LPC_CHIP3 + tristate "CHIP3 LPC" + depends on UNCORE_XUELANG + select MFD_CORE + help + LPC bridge function of the chip3 provides support for + System Management Bus and General Purpose I/O. + +config SUNWAY_SUPERIO_AST2400 + tristate "SUNWAY SUPERIO AST2400" + depends on SW64 + select MFD_CORE + help + Nuvoton AST2400 Super I/O chip platform driver written + for SUNWAY LPC controller. + config INTEL_SOC_PMIC bool "Support for Crystal Cove PMIC" depends on HAS_IOMEM && I2C=y && GPIOLIB && COMMON_CLK diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index c66f07edcd0e6273ef1a9eac702bc674eb1cbb32..700b3600eb794a055cbc6ff7c11053f39fb22e69 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -269,6 +269,9 @@ obj-$(CONFIG_MFD_KHADAS_MCU) += khadas-mcu.o obj-$(CONFIG_MFD_ACER_A500_EC) += acer-ec-a500.o obj-$(CONFIG_MFD_QCOM_PM8008) += qcom-pm8008.o +obj-$(CONFIG_LPC_CHIP3) += lpc_sunway_chip3.o +obj-$(CONFIG_SUNWAY_SUPERIO_AST2400) += sunway_ast2400.o + obj-$(CONFIG_SGI_MFD_IOC3) += ioc3.o obj-$(CONFIG_MFD_SIMPLE_MFD_I2C) += simple-mfd-i2c.o obj-$(CONFIG_MFD_SMPRO) += smpro-core.o diff --git a/drivers/mfd/lpc_sunway_chip3.c b/drivers/mfd/lpc_sunway_chip3.c new file mode 100644 index 0000000000000000000000000000000000000000..1bcf40d6a6f7b6d7a6d87574386fccb57f7dd1b8 --- /dev/null +++ b/drivers/mfd/lpc_sunway_chip3.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * lpc_sunway_chip3.c - LPC interface for SUNWAY CHIP3 + * + * LPC bridge function contains many other functional units, + * such as Interrupt controllers, Timers, Power Management, + * System Management, GPIO, RTC, and LPC Configuration + * Registers. + * + * Copyright (c) 2014 JN + * Author: Weiqiang Su + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +enum features { + LPC_USE_MSI = (1 << 0), + LPC_USE_INTX = (1 << 1), +}; + +enum { + LPC_HST_BAR = 0, + LPC_MEM_BAR = 2, + LPC_FWH_BAR = 4, +}; + +enum { + LPC_CTL = 0x0, + LPC_IRQ = 0x4, + LPC_IRQ_MASK = 0x8, + LPC_STAT = 0xc, + LPC_ERR_INF = 0x10, + LPC_MEM_HADDR = 0x14, + LPC_FWH_IDSEL_R1 = 0x18, + LPC_FWH_IDSEL_R2 = 0x1c, + LPC_FWH_IDSEL_R3 = 0x20, + LPC_FWH_IDSEL_R4 = 0x24, + LPC_FWH_IDSEL_R5 = 0x28, + LPC_FWH_DEC_EN1 = 0x2c, + LPC_FWH_DEC_EN2 = 0x30, + LPC_DMA_CTL = 0x34, + LPC_CH_STAT = 0x38, + LPC_CH0_ADDR = 0x3c, + LPC_CH1_ADDR = 0x40, + LPC_CH2_ADDR = 0x44, + LPC_CH3_ADDR = 0x48, + LPC_CH0_LENG = 0x4c, + LPC_CH1_LENG = 0x50, + LPC_CH2_LENG = 0x54, + LPC_CH3_LENG = 0x58, + LPC_CH0_MODE = 0x5c, + LPC_CH1_MODE = 0x60, + LPC_CH2_MODE = 0x64, + LPC_CH3_MODE = 0x68, + LPC_CH_MASK = 0x6c, + LPC_DMA_SWRST = 0x70, +}; + +struct lpc_chip3_adapter { + void __iomem *hst_regs; + struct device *dev; + int irq; + unsigned int features; +}; + +static struct resource superio_chip3_resources[] = { + { + .flags = IORESOURCE_IO, + } +}; + +static struct resource mem_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct resource fw_flash_resource = { + .flags = IORESOURCE_MEM, +}; + +static struct physmap_flash_data mem_flash_data = { + .width = 1, +}; + +static struct physmap_flash_data fw_flash_data = { + .width = 1, +}; + +static struct mfd_cell lpc_chip3_cells[] = { + { + .name = "sunway_superio_ast2400", + .id = 0, + .num_resources = ARRAY_SIZE(superio_chip3_resources), + .resources = superio_chip3_resources, + }, + { + .name = "chip3-flash", + .id = 0, + .num_resources = 1, + .resources = &mem_flash_resource, + .platform_data = &mem_flash_data, + .pdata_size = sizeof(mem_flash_data), + }, + { + .name = "chip3_fwh-flash", + .id = 0, + .num_resources = 1, + .resources = &fw_flash_resource, + .platform_data = &fw_flash_data, + .pdata_size = sizeof(fw_flash_data), + } +}; + +static inline void lpc_writel(void *address, int reg_base, int value) +{ + unsigned long addr = (unsigned long)address + reg_base; + + writel(value, (void *)addr); +} + +static inline int lpc_readl(void *address, int reg_base) +{ + unsigned long addr = (unsigned long)address + reg_base; + int value = readl((void *)addr); + + return value; +} + +static void lpc_enable(struct lpc_chip3_adapter *lpc_adapter) +{ + unsigned int value; + + value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + value |= 0x1600; + + /* LPC host enable */ + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, value); +} + +static void lpc_mem_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + mem_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x2UL << 28)); + mem_flash_resource.end = mem_flash_resource.start + SZ_256M - 1; + + writel(0x1f, lpc_adapter->hst_regs + LPC_MEM_HADDR); +} + +static void lpc_fw_flash_init(struct platform_device *pdev, + struct lpc_chip3_adapter *lpc_adapter) +{ + fw_flash_resource.start = + (((unsigned long)(lpc_adapter->hst_regs) & (~(0xfUL << 28))) | (0x3UL << 28)); + fw_flash_resource.end = fw_flash_resource.start + SZ_256M - 1; + + writel(0xff0f, lpc_adapter->hst_regs + LPC_FWH_DEC_EN1); + writel(0xffff11ff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R5); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R4); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R3); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R2); + writel(0xffffffff, lpc_adapter->hst_regs + LPC_FWH_IDSEL_R1); + +} + +static int lpc_chip3_probe(struct platform_device *pdev) +{ + int ret; + struct lpc_chip3_adapter *lpc_adapter; + struct resource *mem; + + lpc_adapter = kzalloc(sizeof(*lpc_adapter), GFP_KERNEL); + if (lpc_adapter == NULL) { + dev_err(&pdev->dev, "%s kzalloc failed !\n", __func__); + return -ENOMEM; + } + + platform_set_drvdata(pdev, lpc_adapter); + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(&pdev->dev, "no mem resource?\n"); + return -EINVAL; + } + + lpc_adapter->hst_regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(lpc_adapter->hst_regs)) { + dev_err(&pdev->dev, "lpc region map failed\n"); + return PTR_ERR(lpc_adapter->hst_regs); + } + + lpc_adapter->dev = &pdev->dev; + lpc_adapter->features = 0; + + lpc_enable(lpc_adapter); + + lpc_mem_flash_init(pdev, lpc_adapter); + lpc_fw_flash_init(pdev, lpc_adapter); + + ret = mfd_add_devices(&pdev->dev, 0, + lpc_chip3_cells, ARRAY_SIZE(lpc_chip3_cells), + NULL, 0, NULL); + if (ret) + goto out_dev; + + dev_info(lpc_adapter->dev, "probe succeed !\n"); + + return ret; + +out_dev: + dev_info(lpc_adapter->dev, "probe failed !\n"); + + mfd_remove_devices(&pdev->dev); + kfree(lpc_adapter); + + return ret; +} + +static int lpc_chip3_remove(struct platform_device *pdev) +{ + struct lpc_chip3_adapter *lpc_adapter = platform_get_drvdata(pdev); + + mfd_remove_devices(&pdev->dev); + iounmap(lpc_adapter->hst_regs); + kfree(lpc_adapter); + + return 0; +} + +static const struct of_device_id chip3_lpc_of_match[] = { + {.compatible = "sunway,chip3_lpc",}, + { /* end of table */ } +}; + +MODULE_DEVICE_TABLE(of, chip3_lpc_of_match); + +#ifdef CONFIG_PM_SLEEP +unsigned int lpc_irq_ctrl_value; +unsigned int lpc_irq_irq_value; +unsigned int lpc_irq_mask_value; + +/** + * chip3_lpc_platform_suspend - Suspend an chip3_lpc-platform device + * @dev: the platform device to suspend + * + * This function stores the lpc controller register values and + * restores them when the machine wakes up. + */ +int chip3_lpc_platform_suspend(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_irq_ctrl_value = lpc_readl(lpc_adapter->hst_regs, LPC_CTL); + lpc_irq_irq_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ); + lpc_irq_mask_value = lpc_readl(lpc_adapter->hst_regs, LPC_IRQ_MASK); + + return 0; +} + +/** + * chip3_lpc_platform_resume - Resume an chip3_lpc-platform device + * @dev: the platform device to resume + * + * This function restores the register value before the suspend. + */ +int chip3_lpc_platform_resume(struct device *dev) +{ + struct lpc_chip3_adapter *lpc_adapter = dev_get_drvdata(dev); + + lpc_writel(lpc_adapter->hst_regs, LPC_CTL, lpc_irq_ctrl_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ, lpc_irq_irq_value); + lpc_writel(lpc_adapter->hst_regs, LPC_IRQ_MASK, lpc_irq_mask_value); + + return 0; +} +static SIMPLE_DEV_PM_OPS(chip3_lpc_pm_ops, chip3_lpc_platform_suspend, + chip3_lpc_platform_resume); +#endif + + +static struct platform_driver chip3_lpc_platform_driver = { + .driver = { + .name = "chip3_lpc", + .of_match_table = chip3_lpc_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &chip3_lpc_pm_ops, +#endif + }, + .remove = lpc_chip3_remove, +}; + +static int __init chip3_lpc_drvinit(void) +{ + return platform_driver_probe(&chip3_lpc_platform_driver, + lpc_chip3_probe); +} + +/* + * lpc controller init configure before serial drivers; + * The lpc & ast2400 should be initialized much before + * the serial initialized functions are called. + */ +subsys_initcall_sync(chip3_lpc_drvinit); + +static void __exit chip3_lpc_drvexit(void) +{ + platform_driver_unregister(&chip3_lpc_platform_driver); +} + +module_exit(chip3_lpc_drvexit); + +MODULE_AUTHOR("Weiqiang Su "); +MODULE_DESCRIPTION("LPC Interface for CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mfd/sunway_ast2400.c b/drivers/mfd/sunway_ast2400.c new file mode 100644 index 0000000000000000000000000000000000000000..fbea07813643fea6b19ed0c9380ea0e422551b37 --- /dev/null +++ b/drivers/mfd/sunway_ast2400.c @@ -0,0 +1,223 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * linux/drivers/mfd/sunway_ast2400.c + * + * Copyright (C) 20014 - 2015 JN + * Author: Weiqiang Su + * + * Nuvoton AST2400 Super I/O chip platform driver written for + * SUNWAY LPC controller. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int superio_uart0_irq; +static int superio_uart1_irq; +static void pnp_enable(device_t dev) +{ + pnp_enter_conf_mode(dev); + pnp_set_logical_device(dev); + pnp_set_enable(dev, 1); + pnp_exit_conf_mode(dev); +} + +const struct pnp_mode_ops pnp_conf_mode_8787_aa = { + .enter_conf_mode = pnp_enter_conf_mode_a5a5, + .exit_conf_mode = pnp_exit_conf_mode_aa, +}; + +static struct device_operations ops = { + .enable = pnp_enable, + .ops_pnp_mode = &pnp_conf_mode_8787_aa, +}; + +static struct pnp_info pnp_dev_info[] = { + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_FDC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_PP }, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP1}, + { true, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SP2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_KBC}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIR}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_ACPI}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_HWM_FPLED}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_VID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_CIRWKUP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO_PP_OD}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_SVID}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_DSLP}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA_LDN}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_WDT1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOBASE}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO0}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO1}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO2}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO3}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO4}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO5}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO6}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO7}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO8}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIO9}, + { false, {SUPERIO_PNP_PORT, 0, &ops}, AST2400_GPIOA}, +}; + +static void superio_com1_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x3); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart0_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void superio_com2_init(struct pnp_device *device) +{ + pnp_enter_conf_mode(device); + pnp_set_logical_device(device); + pnp_set_enable(device, 1); + + pnp_write_config(device, 0x60, 0x2); + pnp_write_config(device, 0x61, 0xf8); + + pnp_write_config(device, 0x70, superio_uart1_irq); + pnp_write_config(device, 0x71, 0x1); + + pnp_write_config(device, 0xf0, 0x0); + + pnp_exit_conf_mode(device); +} + +static void pnp_enable_devices(superio_device_t superio_device, + struct device_operations *ops, + unsigned int functions, struct pnp_info *info) +{ + int i = 0; + struct pnp_info *each_info; + struct pnp_device *each_device; + + /* Setup the ops and resources on the newly allocated devices. */ + for (i = 0; i < functions; i++) { + each_info = info + i; + each_device = &each_info->pnp_device; + + /* Skip logical devices this Super I/O doesn't enable. */ + if (each_info->enabled == false) + continue; + + each_device->device = each_info->function; + each_device->ops = ops; + each_device->port = superio_device->superio_ast2400_efir; + + switch (each_device->device) { + case AST2400_SP1: + each_device->ops->init = superio_com1_init; + break; + case AST2400_SP2: + each_device->ops->init = superio_com2_init; + break; + } + + if (each_device->ops->init) + each_device->ops->init(each_device); + } +} + +static void superio_enable_devices(superio_device_t superio_device) +{ + pnp_enable_devices(superio_device, &ops, + ARRAY_SIZE(pnp_dev_info), pnp_dev_info); +} + +static int superio_ast2400_probe(struct platform_device *pdev) +{ + int err = 0; + superio_device_t superio_device; + struct resource *res; + resource_size_t physaddr = 0; + + /* allocate space for device info */ + superio_device = kzalloc(sizeof(struct superio_ast2400_device), GFP_KERNEL); + if (superio_device == NULL) { + err = -ENOMEM; + return err; + } + + res = platform_get_resource(pdev, IORESOURCE_IO, 1); + if (res) { + physaddr = res->start; + dev_info(&pdev->dev, "request memory region %pR\n", res); + } + + superio_device->dev = &pdev->dev; + superio_device->enabled = 1; + superio_device->superio_ast2400_efir = physaddr + SUPERIO_PNP_PORT; + superio_device->superio_ast2400_efdr = physaddr + SUPERIO_PNP_PORT + 1; + superio_uart0_irq = platform_get_irq_byname(pdev, "uart0_irq"); + superio_uart1_irq = platform_get_irq_byname(pdev, "uart1_irq"); + + superio_enable_devices(superio_device); + + platform_set_drvdata(pdev, superio_device); + + dev_info(superio_device->dev, "probe succeed !\n"); + + return 0; +} + +static int superio_ast2400_remove(struct platform_device *pdev) +{ + superio_device_t superio_device = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + + kfree(superio_device); + + return 0; +} + +static struct platform_driver superio_nuvoton_ast2400_driver = { + .probe = superio_ast2400_probe, + .remove = superio_ast2400_remove, + .driver = { + .name = "sunway_superio_ast2400" + }, +}; + +static int __init superio_nuvoton_ast2400_init(void) +{ + return platform_driver_register(&superio_nuvoton_ast2400_driver); +} + +subsys_initcall_sync(superio_nuvoton_ast2400_init); + +static void __exit superio_nuvoton_ast2400_exit(void) +{ + platform_driver_unregister(&superio_nuvoton_ast2400_driver); +} + +module_exit(superio_nuvoton_ast2400_exit); + +MODULE_DESCRIPTION("NUVOTON AST2400 Super I/O DRIVER"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); +MODULE_AUTHOR("Weiqiang Su"); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index cadd4a820c03364ae1c3afd32278dcdcace8cb61..1e9def44eb09ca5eb13abb78f4dac2ca88a244a9 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -376,6 +376,14 @@ config HMC6352 This driver provides support for the Honeywell HMC6352 compass, providing configuration and heading data via sysfs. +config SUNWAY_GED + tristate "sunway generic device driver for memhotplug" + depends on SW64 + depends on MEMORY_HOTPLUG + help + This driver provides support for sunway generic device driver for + memhotplug, providing configuration and heading data via sysfs. + config DS1682 tristate "Dallas DS1682 Total Elapsed Time Recorder with Alarm" depends on I2C diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d46a2a014b6e40ed737d26a68a25d0..ccf5456e1d880373d1776276275a8658e374af44 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -35,6 +35,7 @@ obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o obj-$(CONFIG_DS1682) += ds1682.o obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o +obj-$(CONFIG_SUNWAY_GED) += sunway-ged.o obj-y += eeprom/ obj-y += cb710/ obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 88b91ad8e5413a6a6c361084ae85a51925bb8776..6cd73f2a487f34bd781c5c45818122a7422860ec 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c @@ -130,7 +130,8 @@ static int hw_break_val2; static int cont_instead_of_sstep; static unsigned long cont_thread_id; static unsigned long sstep_thread_id; -#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) +#if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) \ + || defined(CONFIG_SW64) static int arch_needs_sstep_emulation = 1; #else static int arch_needs_sstep_emulation; diff --git a/drivers/misc/sunway-ged.c b/drivers/misc/sunway-ged.c new file mode 100644 index 0000000000000000000000000000000000000000..b4e4ca31585257961b54120024bdf049af9ae8bd --- /dev/null +++ b/drivers/misc/sunway-ged.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Generic Event Device for ACPI. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define OFFSET_START_ADDR 0 +#define OFFSET_LENGTH 8 +#define OFFSET_STATUS 16 +#define OFFSET_SLOT 24 + +/* Memory hotplug event */ +#define SUNWAY_MEMHOTPLUG_ADD 0x1 +#define SUNWAY_MEMHOTPLUG_REMOVE 0x2 + +struct sunway_memory_device { + struct sunway_ged_device *device; + unsigned int state; /* State of the memory device */ + struct list_head list; + + u64 start_addr; /* Memory Range start physical addr */ + u64 length; /* Memory Range length */ + u64 slot; /* Memory Range slot */ + unsigned int enabled:1; +}; + +struct sunway_ged_device { + struct device *dev; + void __iomem *membase; + void *driver_data; + spinlock_t lock; + struct list_head dev_list; +}; + +static int sunway_memory_enable_device(struct sunway_memory_device *mem_device) +{ + int num_enabled = 0; + int result = 0; + + if (mem_device->enabled) { /* just sanity check...*/ + num_enabled++; + goto out; + } + + /* + * If the memory block size is zero, please ignore it. + * Don't try to do the following memory hotplug flowchart. + */ + if (!mem_device->length) + goto out; + + lock_device_hotplug(); + /* suppose node = 0, fix me! */ + result = __add_memory(0, mem_device->start_addr, mem_device->length); + unlock_device_hotplug(); + /* + * If the memory block has been used by the kernel, add_memory() + * returns -EEXIST. If add_memory() returns the other error, it + * means that this memory block is not used by the kernel. + */ + if (result && result != -EEXIST) + goto out; + + mem_device->enabled = 1; + + /* + * Add num_enable even if add_memory() returns -EEXIST, so the + * device is bound to this driver. + */ + num_enabled++; +out: + if (!num_enabled) { + dev_err(mem_device->device->dev, "add_memory failed\n"); + return -EINVAL; + } + + return 0; +} + +static int sunway_memory_get_meminfo(struct sunway_memory_device *mem_device) +{ + struct sunway_ged_device *geddev; + + if (!mem_device) + return -EINVAL; + + if (mem_device->enabled) + return 0; + + geddev = mem_device->device; + + mem_device->start_addr = readq(geddev->membase + OFFSET_START_ADDR); + mem_device->length = readq(geddev->membase + OFFSET_LENGTH); + + return 0; +} + +static void sunway_memory_device_remove(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_dev, *n; + unsigned long start_addr, length, slot; + + if (!device) + return; + + start_addr = readq(device->membase + OFFSET_START_ADDR); + length = readq(device->membase + OFFSET_LENGTH); + slot = readq(device->membase + OFFSET_SLOT); + + list_for_each_entry_safe(mem_dev, n, &device->dev_list, list) { + if (!mem_dev->enabled) + continue; + + if ((start_addr == mem_dev->start_addr) && + (length == mem_dev->length)) { + /* suppose node = 0, fix me! */ + remove_memory(0, start_addr, length); + list_del(&mem_dev->list); + kfree(mem_dev); + } + } + + writeq(slot, device->membase + OFFSET_SLOT); +} + +static int sunway_memory_device_add(struct sunway_ged_device *device) +{ + struct sunway_memory_device *mem_device; + int result; + + if (!device) + return -EINVAL; + + mem_device = kzalloc(sizeof(struct sunway_memory_device), GFP_KERNEL); + if (!mem_device) + return -ENOMEM; + + INIT_LIST_HEAD(&mem_device->list); + mem_device->device = device; + + /* Get the range from the IO */ + mem_device->start_addr = readq(device->membase + OFFSET_START_ADDR); + mem_device->length = readq(device->membase + OFFSET_LENGTH); + mem_device->slot = readq(device->membase + OFFSET_SLOT); + + result = sunway_memory_enable_device(mem_device); + if (result) { + dev_err(device->dev, "sunway_memory_enable_device() error\n"); + sunway_memory_device_remove(device); + + return result; + } + + list_add_tail(&mem_device->list, &device->dev_list); + dev_dbg(device->dev, "Memory device configured\n"); + + hcall(HCALL_MEMHOTPLUG, mem_device->start_addr, 0, 0); + + return 1; +} + +static irqreturn_t sunwayged_ist(int irq, void *data) +{ + struct sunway_ged_device *sunwayged_dev = data; + unsigned int status; + + status = readl(sunwayged_dev->membase + OFFSET_STATUS); + + /* through IO status to add or remove memory device */ + if (status & SUNWAY_MEMHOTPLUG_ADD) + sunway_memory_device_add(sunwayged_dev); + + if (status & SUNWAY_MEMHOTPLUG_REMOVE) + sunway_memory_device_remove(sunwayged_dev); + + return IRQ_HANDLED; +} + +static irqreturn_t sunwayged_irq_handler(int irq, void *data) +{ + return IRQ_WAKE_THREAD; +} + +static int sunwayged_probe(struct platform_device *pdev) +{ + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct sunway_ged_device *geddev; + struct device *dev; + int irqflags; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + geddev = devm_kzalloc(&pdev->dev, sizeof(*geddev), GFP_KERNEL); + if (!geddev) + return -ENOMEM; + + spin_lock_init(&geddev->lock); + geddev->membase = devm_ioremap(&pdev->dev, + regs->start, resource_size(regs)); + if (!geddev->membase) + return -ENOMEM; + + INIT_LIST_HEAD(&geddev->dev_list); + geddev->dev = &pdev->dev; + irqflags = IRQF_SHARED; + + if (request_threaded_irq(irq, sunwayged_irq_handler, sunwayged_ist, + irqflags, "SUNWAY:Ged", geddev)) { + dev_err(dev, "failed to setup event handler for irq %u\n", irq); + + return -EINVAL; + } + + platform_set_drvdata(pdev, geddev); + + return 0; +} + +static int sunwayged_remove(struct platform_device *pdev) +{ + return 0; +} + +static const struct of_device_id sunwayged_of_match[] = { + {.compatible = "sw6,sunway-ged", }, + { } +}; +MODULE_DEVICE_TABLE(of, sunwayged_of_match); + +static struct platform_driver sunwayged_platform_driver = { + .driver = { + .name = "sunway-ged", + .of_match_table = sunwayged_of_match, + }, + .probe = sunwayged_probe, + .remove = sunwayged_remove, +}; +module_platform_driver(sunwayged_platform_driver); + +MODULE_AUTHOR("Lu Feifei"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Sunway ged driver"); diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 69e84ff7f2e5214e584c8b09722a069264acc0b0..41d18df97c8576596e602f0749c179f29b5d77fc 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -362,7 +362,7 @@ int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) const struct nfp_rtsym *sym; int res; - abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp); + abm->pf_id = nfp_get_pf_id(pf); /* Check if Qdisc offloads are supported */ res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1); diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index 5d3df28c648ffc2d1925fa84af04899af9742677..d4acaa15629d5869545ad13bfca23d003032f348 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -451,7 +451,7 @@ static int nfp_abm_init(struct nfp_app *app) nfp_err(pf->cpp, "ABM NIC requires ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index f469950c726573fd1f3a40a162b9f0f03a3bc6a5..3d928dfba114f40655b728dbd25118380ae437eb 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -70,7 +70,7 @@ nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) nfp_err(pf->cpp, "No ETH table\n"); return -EINVAL; } - if (pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->max_data_vnics != pf->eth_tbl->count && !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 83eaa5ae3cd4f5adbf04475811ca91da82ebf4f2..88e8ae25f0cc67cab04db5dbe2e04e25a5888150 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -378,10 +378,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, enum nfp_flower_cmsg_port_vnic_type vnic_type, enum nfp_repr_type repr_type, unsigned int cnt) { - u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp); struct nfp_flower_priv *priv = app->priv; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + u8 nfp_pcie = nfp_get_pf_id(app->pf); enum nfp_port_type port_type; struct nfp_repr *nfp_repr; struct nfp_reprs *reprs; @@ -428,10 +428,10 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app, goto err_reprs_clean; } if (repr_type == NFP_REPR_TYPE_PF) { - port->pf_id = i; + port->pf_id = app->pf->multi_pf.id; port->vnic = priv->nn->dp.ctrl_bar; } else { - port->pf_id = 0; + port->pf_id = app->pf->multi_pf.id; port->vf_id = i; port->vnic = app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ; @@ -496,24 +496,27 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) struct nfp_eth_table *eth_tbl = app->pf->eth_tbl; atomic_t *replies = &priv->reify_replies; struct nfp_flower_repr_priv *repr_priv; + int err, reify_cnt, phy_reprs_num; struct nfp_repr *nfp_repr; struct sk_buff *ctrl_skb; struct nfp_reprs *reprs; - int err, reify_cnt; unsigned int i; ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count); if (!ctrl_skb) return -ENOMEM; + phy_reprs_num = app->pf->multi_pf.en ? app->pf->max_data_vnics : + eth_tbl->count; reprs = nfp_reprs_alloc(eth_tbl->max_index + 1); if (!reprs) { err = -ENOMEM; goto err_free_ctrl_skb; } - for (i = 0; i < eth_tbl->count; i++) { - unsigned int phys_port = eth_tbl->ports[i].index; + for (i = 0; i < phy_reprs_num; i++) { + int idx = app->pf->multi_pf.en ? app->pf->multi_pf.id : i; + unsigned int phys_port = eth_tbl->ports[idx].index; struct net_device *repr; struct nfp_port *port; u32 cmsg_port_id; @@ -542,7 +545,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) nfp_repr_free(repr); goto err_reprs_clean; } - err = nfp_port_init_phy_port(app->pf, app, port, i); + err = nfp_port_init_phy_port(app->pf, app, port, idx); if (err) { kfree(repr_priv); nfp_port_free(port); @@ -609,7 +612,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv) static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) { - if (id > 0) { + if (id > 0 && !app->pf->multi_pf.en) { nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n"); goto err_invalid_port; } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 71301dbd8fb5ee462a29c04ac215f91712816b16..0fae86d8abe03e27311d40eaae50423ad32261b1 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -69,6 +69,13 @@ static const struct pci_device_id nfp_pci_device_ids[] = { }; MODULE_DEVICE_TABLE(pci, nfp_pci_device_ids); +u8 nfp_get_pf_id(struct nfp_pf *pf) +{ + return nfp_cppcore_pcie_unit(pf->cpp) * + pf->dev_info->pf_num_per_unit + + pf->multi_pf.id; +} + int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val) { @@ -76,7 +83,7 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, int err = 0; u64 val; - snprintf(name, sizeof(name), format, nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), format, nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -90,15 +97,22 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, } u8 __iomem * -nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { char pf_symbol[256]; - snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, - nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(pf_symbol, sizeof(pf_symbol), sym_fmt, nfp_get_pf_id(pf)); - return nfp_rtsym_map(pf->rtbl, pf_symbol, name, min_size, area); + return nfp_rtsym_map_offset(pf->rtbl, pf_symbol, name, offset, min_size, area); +} + +u8 __iomem * +nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_pf_map_rtsym_offset(pf, name, sym_fmt, 0, min_size, area); } /* Callers should hold the devlink instance lock */ @@ -218,11 +232,49 @@ static int nfp_pf_board_state_wait(struct nfp_pf *pf) return 0; } +static unsigned int nfp_pf_get_limit_vfs(struct nfp_pf *pf, + unsigned int limit_vfs_rtsym) +{ + u16 pos, offset, total; + + if (!pf->multi_pf.en || !limit_vfs_rtsym) + return limit_vfs_rtsym; + + pos = pci_find_ext_capability(pf->pdev, PCI_EXT_CAP_ID_SRIOV); + if (!pos) + return 0; + + /* Management firmware ensures that SR-IOV capability registers + * are initialized correctly. + */ + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_VF_OFFSET, &offset); + pci_read_config_word(pf->pdev, pos + PCI_SRIOV_TOTAL_VF, &total); + if (!total) + return 0; + + /* Offset of first VF is relative to its PF. */ + offset += pf->multi_pf.id; + if (offset < pf->dev_info->pf_num_per_unit) + return 0; + + /* For 3800, VF is numbered from max PF count. */ + offset -= pf->dev_info->pf_num_per_unit; + if (offset >= limit_vfs_rtsym) + return 0; + + pf->multi_pf.vf_fid = offset; + if (offset + total > limit_vfs_rtsym) + return limit_vfs_rtsym - offset; + + return total; +} + static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) { + unsigned int limit_vfs_rtsym; int err; - pf->limit_vfs = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); + limit_vfs_rtsym = nfp_rtsym_read_le(pf->rtbl, "nfd_vf_cfg_max_vfs", &err); if (err) { /* For backwards compatibility if symbol not found allow all */ pf->limit_vfs = ~0; @@ -233,9 +285,13 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf) return err; } - err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); - if (err) - nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + pf->limit_vfs = nfp_pf_get_limit_vfs(pf, limit_vfs_rtsym); + if (pci_sriov_get_totalvfs(pf->pdev) != pf->limit_vfs) { + err = pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs); + if (err) + nfp_warn(pf->cpp, "Failed to set VF count in sysfs: %d\n", err); + } + return 0; } @@ -404,7 +460,7 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) if (fw) return fw; - /* Finally try the card type and media */ + /* Then try the card type */ if (!pf->eth_tbl) { dev_err(&pdev->dev, "Error: can't identify media config\n"); return NULL; @@ -418,6 +474,12 @@ nfp_net_fw_find(struct pci_dev *pdev, struct nfp_pf *pf) return NULL; } + sprintf(fw_name, "netronome/%s.nffw", fw_model); + fw = nfp_net_fw_request(pdev, pf, fw_name); + if (fw) + return fw; + + /* Finally try the card type and media */ spc = ARRAY_SIZE(fw_name); spc -= snprintf(fw_name, spc, "netronome/nic_%s", fw_model); @@ -469,6 +531,118 @@ nfp_get_fw_policy_value(struct pci_dev *pdev, struct nfp_nsp *nsp, return err; } +static u8 __iomem * +nfp_get_beat_addr(struct nfp_pf *pf, int pf_id) +{ + /* Each PF has corresponding qword to beat: + * offset | usage + * 0 | magic number + * 8 | beat qword of pf0 + * 16 | beat qword of pf1 + */ + return pf->multi_pf.beat_addr + ((pf_id + 1) << 3); +} + +static void +nfp_nsp_beat_timer(struct timer_list *t) +{ + struct nfp_pf *pf = from_timer(pf, t, multi_pf.beat_timer); + + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + /* Beat once per second. */ + mod_timer(&pf->multi_pf.beat_timer, jiffies + HZ); +} + +/** + * nfp_nsp_keepalive_start() - Start keepalive mechanism if needed + * @pf: NFP PF Device structure + * + * Return 0 if no error, errno otherwise + */ +static int +nfp_nsp_keepalive_start(struct nfp_pf *pf) +{ + struct nfp_resource *res; + u8 __iomem *base; + int err = 0; + u64 addr; + u32 cpp; + + if (!pf->multi_pf.en) + return 0; + + res = nfp_resource_acquire(pf->cpp, NFP_KEEPALIVE); + if (IS_ERR(res)) + return PTR_ERR(res); + + cpp = nfp_resource_cpp_id(res); + addr = nfp_resource_address(res); + + /* Allocate a fixed area for keepalive. */ + base = nfp_cpp_map_area(pf->cpp, "keepalive", cpp, addr, + nfp_resource_size(res), &pf->multi_pf.beat_area); + if (IS_ERR(base)) { + nfp_err(pf->cpp, "Failed to map area for keepalive\n"); + err = PTR_ERR(base); + goto res_release; + } + + pf->multi_pf.beat_addr = base; + timer_setup(&pf->multi_pf.beat_timer, nfp_nsp_beat_timer, 0); + mod_timer(&pf->multi_pf.beat_timer, jiffies); + +res_release: + nfp_resource_release(res); + return err; +} + +static void +nfp_nsp_keepalive_stop(struct nfp_pf *pf) +{ + if (pf->multi_pf.beat_area) { + del_timer_sync(&pf->multi_pf.beat_timer); + nfp_cpp_area_release_free(pf->multi_pf.beat_area); + } +} + +static u64 +nfp_get_sibling_beat(struct nfp_pf *pf) +{ + unsigned int i = 0; + u64 beat = 0; + + if (!pf->multi_pf.beat_addr) + return 0; + + for (; i < pf->dev_info->pf_num_per_unit; i++) { + if (i == pf->multi_pf.id) + continue; + + beat += readq(nfp_get_beat_addr(pf, i)); + } + + return beat; +} + +static bool +nfp_skip_fw_load(struct nfp_pf *pf, struct nfp_nsp *nsp) +{ + unsigned long timeout = jiffies + HZ * 3; + u64 beat = nfp_get_sibling_beat(pf); + + if (!pf->multi_pf.en || nfp_nsp_fw_loaded(nsp) <= 0) + return false; + + while (time_is_after_jiffies(timeout)) { + if (beat != nfp_get_sibling_beat(pf)) + return true; + + msleep(500); + } + + return false; +} + /** * nfp_fw_load() - Load the firmware image * @pdev: PCI Device structure @@ -528,6 +702,13 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) if (err) return err; + err = nfp_nsp_keepalive_start(pf); + if (err) + return err; + + if (nfp_skip_fw_load(pf, nsp)) + return true; + fw = nfp_net_fw_find(pdev, pf); do_reset = reset == NFP_NSP_DRV_RESET_ALWAYS || (fw && reset == NFP_NSP_DRV_RESET_DISK); @@ -556,7 +737,6 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) fw_loaded = true; } else if (policy != NFP_NSP_APP_FW_LOAD_DISK && nfp_nsp_has_stored_fw_load(nsp)) { - /* Don't propagate this error to stick with legacy driver * behavior, failure will be detected later during init. */ @@ -577,7 +757,9 @@ nfp_fw_load(struct pci_dev *pdev, struct nfp_pf *pf, struct nfp_nsp *nsp) * dependent on it, which could be the case if there are multiple * devices that could load firmware. */ - if (fw_loaded && ifcs == 1) + if (err < 0) + nfp_nsp_keepalive_stop(pf); + else if (fw_loaded && ifcs == 1) pf->unload_fw_on_remove = true; return err < 0 ? err : fw_loaded; @@ -629,6 +811,15 @@ static int nfp_nsp_init(struct pci_dev *pdev, struct nfp_pf *pf) if (err < 0) goto exit_close_nsp; + if (pf->multi_pf.en && pf->multi_pf.id) { + err = nfp_nsp_device_activate(nsp); + if (err < 0 && err != -EOPNOTSUPP) { + dev_err(&pdev->dev, + "Failed to activate the NFP device: %d\n", err); + goto exit_close_nsp; + } + } + nfp_nsp_init_ports(pdev, pf, nsp); pf->nspi = __nfp_nsp_identify(nsp); @@ -657,6 +848,12 @@ static void nfp_fw_unload(struct nfp_pf *pf) struct nfp_nsp *nsp; int err; + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* NSP will unload firmware when no active PF exists. */ + writeq(NFP_KEEPALIVE_MAGIC, pf->multi_pf.beat_addr); + return; + } + nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) { nfp_err(pf->cpp, "Reset failed, can't open NSP\n"); @@ -674,10 +871,8 @@ static void nfp_fw_unload(struct nfp_pf *pf) static int nfp_pf_find_rtsyms(struct nfp_pf *pf) { + unsigned int pf_id = nfp_get_pf_id(pf); char pf_symbol[256]; - unsigned int pf_id; - - pf_id = nfp_cppcore_pcie_unit(pf->cpp); /* Optional per-PCI PF mailbox */ snprintf(pf_symbol, sizeof(pf_symbol), NFP_MBOX_SYM_NAME, pf_id); @@ -703,7 +898,7 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) int err = 0; u64 val; - snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_cppcore_pcie_unit(pf->cpp)); + snprintf(name, sizeof(name), "_pf%u_net_app_cap", nfp_get_pf_id(pf)); val = nfp_rtsym_read_le(pf->rtbl, name, &err); if (err) { @@ -753,6 +948,18 @@ static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) nfp_nsp_close(nsp); } +static u8 nfp_init_pf_id(struct pci_dev *pdev) +{ + int vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + u8 id = 0; + + if (!vndr) + return PCI_FUNC(pdev->devfn); + + pci_read_config_byte(pdev, vndr + NFP_VNDR_PF_ID_OFFSET, &id); + return id; +} + static int nfp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) { @@ -803,15 +1010,22 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_priv_unset; } - pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info); + pf->multi_pf.en = pdev->multifunction; + pf->multi_pf.id = nfp_init_pf_id(pdev); + dev_info(&pdev->dev, "%s-PF detected\n", pf->multi_pf.en ? "Multi" : "Single"); + + pf->cpp = nfp_cpp_from_nfp6000_pcie(pdev, dev_info, pf); if (IS_ERR(pf->cpp)) { err = PTR_ERR(pf->cpp); goto err_disable_msix; } - err = nfp_resource_table_init(pf->cpp); - if (err) - goto err_cpp_free; + /* Only PF0 has the right to reclaim locked resources. */ + if (!pf->multi_pf.id) { + err = nfp_resource_table_init(pf->cpp); + if (err) + goto err_cpp_free; + } pf->hwinfo = nfp_hwinfo_read(pf->cpp); @@ -874,6 +1088,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, nfp_mip_close(pf->mip); if (pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); kfree(pf->eth_tbl); kfree(pf->nspi); vfree(pf->dumpspec); @@ -896,12 +1111,14 @@ static int nfp_pci_probe(struct pci_dev *pdev, static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) { + bool keep_device_active; struct nfp_pf *pf; pf = pci_get_drvdata(pdev); if (!pf) return; + keep_device_active = pf->multi_pf.en && !pf->multi_pf.id; nfp_hwmon_unregister(pf); nfp_pcie_sriov_disable(pdev); @@ -914,6 +1131,7 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) if (unload_fw && pf->unload_fw_on_remove) nfp_fw_unload(pf); + nfp_nsp_keepalive_stop(pf); destroy_workqueue(pf->wq); pci_set_drvdata(pdev, NULL); kfree(pf->hwinfo); @@ -923,7 +1141,13 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) kfree(pf->nspi); devlink_free(priv_to_devlink(pf)); pci_release_regions(pdev); - pci_disable_device(pdev); + + /* In multiple pfs case, we need to keep master flag of pf 0 + * to ensure vfs of other pfs work normally because of + * hardware limitation. + */ + if (!keep_device_active) + pci_disable_device(pdev); } static void nfp_pci_remove(struct pci_dev *pdev) @@ -936,12 +1160,65 @@ static void nfp_pci_shutdown(struct pci_dev *pdev) __nfp_pci_shutdown(pdev, false); } +void nfp_pci_error_reset_prepare(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + struct nfp_net *nn; + + if (pf->multi_pf.en && pf->multi_pf.beat_addr) { + /* Pause heartbeat timer so it can't happen during FLR */ + del_timer_sync(&pf->multi_pf.beat_timer); + /* We need to write keepalive to keep firmware alive + * during frequent FLR. + */ + writeq(jiffies, nfp_get_beat_addr(pf, pf->multi_pf.id)); + } + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + netdev->netdev_ops->ndo_stop(netdev); + } + } + } +} + +void nfp_pci_error_reset_done(struct pci_dev *dev) +{ + struct nfp_pf *pf = pci_get_drvdata(dev); + + if (pf) { + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nn->dp.netdev && nn->dp.netdev->flags & IFF_UP) { + struct net_device *netdev = nn->dp.netdev; + + rtnl_lock(); + netdev->netdev_ops->ndo_open(netdev); + rtnl_unlock(); + } + } + if (pf->multi_pf.en && pf->multi_pf.beat_addr) + add_timer(&pf->multi_pf.beat_timer); + } +} + +static const struct pci_error_handlers nfp_pci_err_handler = { + .reset_prepare = nfp_pci_error_reset_prepare, + .reset_done = nfp_pci_error_reset_done, +}; + static struct pci_driver nfp_pci_driver = { .name = nfp_driver_name, .id_table = nfp_pci_device_ids, .probe = nfp_pci_probe, .remove = nfp_pci_remove, .shutdown = nfp_pci_shutdown, + .err_handler = &nfp_pci_err_handler, .sriov_configure = nfp_pcie_sriov_configure, }; @@ -994,6 +1271,13 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw"); MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw"); +MODULE_FIRMWARE("netronome/AMDA0161-1001.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2000-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1103.nffw"); +MODULE_FIRMWARE("netronome/AMDA2001-1104.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1113.nffw"); +MODULE_FIRMWARE("netronome/AMDA2002-1114.nffw"); MODULE_AUTHOR("Corigine, Inc. "); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h index 14a751bfe1fe15f3648e9fd5fb7c934bad77a00b..5a01c66ddce9520dc16a7da49892c5df39c2a0f5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h @@ -84,6 +84,13 @@ struct nfp_dumpspec { * @port_refresh_work: Work entry for taking netdevs out * @shared_bufs: Array of shared buffer structures if FW has any SBs * @num_shared_bufs: Number of elements in @shared_bufs + * @multi_pf: Used in multi-PF setup + * @multi_pf.en: Is multi-PF setup? + * @multi_pf.id: PF index + * @multi_pf.vf_fid: Id of first VF that belongs to this PF + * @multi_pf.beat_timer:Timer for beat to keepalive + * @multi_pf.beat_area: Pointer to CPP area for beat to keepalive + * @multi_pf.beat_addr: Pointer to mapped beat address used for keepalive * * Fields which may change after proble are protected by devlink instance lock. */ @@ -141,6 +148,15 @@ struct nfp_pf { struct nfp_shared_buf *shared_bufs; unsigned int num_shared_bufs; + + struct { + bool en; + u8 id; + u8 vf_fid; + struct timer_list beat_timer; + struct nfp_cpp_area *beat_area; + u8 __iomem *beat_addr; + } multi_pf; }; extern struct pci_driver nfp_netvf_pci_driver; @@ -163,6 +179,10 @@ int nfp_pf_rtsym_read_optional(struct nfp_pf *pf, const char *format, unsigned int default_val); int nfp_net_pf_get_app_id(struct nfp_pf *pf); u8 __iomem * +nfp_pf_map_rtsym_offset(struct nfp_pf *pf, const char *name, const char *sym_fmt, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt, unsigned int min_size, struct nfp_cpp_area **area); int nfp_mbox_cmd(struct nfp_pf *pf, u32 cmd, void *in_data, u64 in_length, @@ -194,4 +214,5 @@ void nfp_devlink_params_unregister(struct nfp_pf *pf); unsigned int nfp_net_lr2speed(unsigned int linkrate); unsigned int nfp_net_speed2lr(unsigned int speed); +u8 nfp_get_pf_id(struct nfp_pf *pf); #endif /* NFP_MAIN_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index de0a5d5ded305b7ed40e984e2c7fc8f0635d3367..9f527edd2f3b1b85d07793edd95e0739cb3798d2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -2704,6 +2704,11 @@ int nfp_net_init(struct nfp_net *nn) if (nn->cap_w1 & NFP_NET_CFG_CTRL_MCAST_FILTER) nn->dp.ctrl_w1 |= NFP_NET_CFG_CTRL_MCAST_FILTER; + /* Multi-PF is already enabled during pre-init, preserve control bit */ + if (nn->cap_w1 & NFP_NET_CFG_CTRL_MULTI_PF) + nn->dp.ctrl_w1 |= (nn_readl(nn, NFP_NET_CFG_CTRL_WORD1) & + NFP_NET_CFG_CTRL_MULTI_PF); + /* Stash the re-configuration queue away. First odd queue in TX Bar */ nn->qcp_cfg = nn->tx_bar + NFP_QCP_QUEUE_ADDR_SZ; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index 3e63f6d6a563d1261a09471947d5fe5bf24f51f4..d6b127f13ed35edc46f10f19e6b10da4d3922e58 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -268,6 +268,7 @@ #define NFP_NET_CFG_CTRL_PKT_TYPE (0x1 << 0) /* Pkttype offload */ #define NFP_NET_CFG_CTRL_IPSEC (0x1 << 1) /* IPsec offload */ #define NFP_NET_CFG_CTRL_MCAST_FILTER (0x1 << 2) /* Multicast Filter */ +#define NFP_NET_CFG_CTRL_MULTI_PF (0x1 << 5) /* Multi PF */ #define NFP_NET_CFG_CTRL_FREELIST_EN (0x1 << 6) /* Freelist enable flag bit */ #define NFP_NET_CFG_CAP_WORD1 0x00a4 diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index cbe4972ba104ba0e4368d8c901618fa574dbf738..5df99c60c3b226584eec89fb10640439e4b30bff 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -141,7 +141,7 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id) { int err; - nn->id = id; + nn->id = pf->multi_pf.en ? pf->multi_pf.id : id; if (nn->port) { err = nfp_devlink_port_register(pf->app, nn->port); @@ -184,7 +184,7 @@ nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar, for (i = 0; i < pf->max_data_vnics; i++) { nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar, - stride, i); + stride, pf->multi_pf.en ? pf->multi_pf.id : i); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_free_prev; @@ -293,6 +293,16 @@ static int nfp_net_pf_init_vnics(struct nfp_pf *pf) return err; } +static void nfp_net_pf_clean_vnics(struct nfp_pf *pf) +{ + struct nfp_net *nn; + + list_for_each_entry(nn, &pf->vnics, vnic_list) { + if (nfp_net_is_data_vnic(nn)) + nfp_net_pf_clean_vnic(pf, nn); + } +} + static int nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride) { @@ -463,9 +473,10 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) } } - pf->vf_cfg_mem = nfp_pf_map_rtsym(pf, "net.vfcfg", "_pf%d_net_vf_bar", - NFP_NET_CFG_BAR_SZ * pf->limit_vfs, - &pf->vf_cfg_bar); + pf->vf_cfg_mem = nfp_pf_map_rtsym_offset(pf, "net.vfcfg", "_pf%d_net_vf_bar", + NFP_NET_CFG_BAR_SZ * pf->multi_pf.vf_fid, + NFP_NET_CFG_BAR_SZ * pf->limit_vfs, + &pf->vf_cfg_bar); if (IS_ERR(pf->vf_cfg_mem)) { if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) { err = PTR_ERR(pf->vf_cfg_mem); @@ -474,7 +485,8 @@ static int nfp_net_pci_map_mem(struct nfp_pf *pf) pf->vf_cfg_mem = NULL; } - min_size = NFP_NET_VF_CFG_SZ * pf->limit_vfs + NFP_NET_VF_CFG_MB_SZ; + min_size = NFP_NET_VF_CFG_SZ * (pf->limit_vfs + pf->multi_pf.vf_fid) + + NFP_NET_VF_CFG_MB_SZ; pf->vfcfg_tbl2 = nfp_pf_map_rtsym(pf, "net.vfcfg_tbl2", "_pf%d_net_vf_cfg2", min_size, &pf->vfcfg_tbl2_area); @@ -684,15 +696,100 @@ int nfp_net_refresh_eth_port(struct nfp_port *port) return ret; } +static int nfp_net_pre_init(struct nfp_pf *pf, int *stride) +{ + struct nfp_net_fw_version fw_ver; + struct nfp_cpp_area *area; + u8 __iomem *ctrl_bar; + int err = 0; + + ctrl_bar = nfp_pf_map_rtsym(pf, NULL, "_pf%d_net_bar0", NFP_PF_CSR_SLICE_SIZE, &area); + if (IS_ERR(ctrl_bar)) { + nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n"); + return pf->fw_loaded ? PTR_ERR(ctrl_bar) : 1; + } + + nfp_net_get_fw_version(&fw_ver, ctrl_bar); + if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || + fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { + nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + + /* Determine stride */ + if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { + *stride = 2; + nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); + } else { + switch (fw_ver.major) { + case 1 ... 5: + *stride = 4; + break; + default: + nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", + fw_ver.extend, fw_ver.class, + fw_ver.major, fw_ver.minor); + err = -EINVAL; + goto end; + } + } + + if (!pf->multi_pf.en) + goto end; + + /* Enable multi-PF. */ + if (readl(ctrl_bar + NFP_NET_CFG_CAP_WORD1) & NFP_NET_CFG_CTRL_MULTI_PF) { + unsigned long long addr; + u32 cfg_q, cpp_id, ret; + unsigned long timeout; + + writel(NFP_NET_CFG_CTRL_MULTI_PF, ctrl_bar + NFP_NET_CFG_CTRL_WORD1); + writel(NFP_NET_CFG_UPDATE_GEN, ctrl_bar + NFP_NET_CFG_UPDATE); + + /* Config queue is next to txq. */ + cfg_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ) + 1; + addr = nfp_qcp_queue_offset(pf->dev_info, cfg_q) + NFP_QCP_QUEUE_ADD_WPTR; + cpp_id = NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW, 0, 0); + err = nfp_cpp_writel(pf->cpp, cpp_id, addr, 1); + if (err) + goto end; + + timeout = jiffies + HZ * NFP_NET_POLL_TIMEOUT; + while ((ret = readl(ctrl_bar + NFP_NET_CFG_UPDATE))) { + if (ret & NFP_NET_CFG_UPDATE_ERR) { + nfp_err(pf->cpp, "Enalbe multi-PF failed\n"); + err = -EIO; + break; + } + + usleep_range(250, 500); + if (time_is_before_eq_jiffies(timeout)) { + nfp_err(pf->cpp, "Enalbe multi-PF timeout\n"); + err = -ETIMEDOUT; + break; + } + }; + } else { + nfp_err(pf->cpp, "Loaded firmware doesn't support multi-PF\n"); + err = -EINVAL; + } + +end: + nfp_cpp_area_release_free(area); + return err; +} + /* * PCI device functions */ int nfp_net_pci_probe(struct nfp_pf *pf) { struct devlink *devlink = priv_to_devlink(pf); - struct nfp_net_fw_version fw_ver; u8 __iomem *ctrl_bar, *qc_bar; - int stride; + int stride = 0; int err; INIT_WORK(&pf->port_refresh_work, nfp_net_refresh_vnics); @@ -703,9 +800,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) return -EINVAL; } + err = nfp_net_pre_init(pf, &stride); + if (err) + return err; + pf->max_data_vnics = nfp_net_pf_get_num_ports(pf); if ((int)pf->max_data_vnics < 0) return pf->max_data_vnics; + if (pf->multi_pf.en && pf->max_data_vnics != 1) { + nfp_err(pf->cpp, "Only one data_vnic per PF is supported in multiple PF setup, please update FW.\n"); + return -EPERM; + } err = nfp_net_pci_map_mem(pf); if (err) @@ -718,34 +823,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf) goto err_unmap; } - nfp_net_get_fw_version(&fw_ver, ctrl_bar); - if (fw_ver.extend & NFP_NET_CFG_VERSION_RESERVED_MASK || - fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) { - nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - - /* Determine stride */ - if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) { - stride = 2; - nfp_warn(pf->cpp, "OBSOLETE Firmware detected - VF isolation not available\n"); - } else { - switch (fw_ver.major) { - case 1 ... 5: - stride = 4; - break; - default: - nfp_err(pf->cpp, "Unsupported Firmware ABI %d.%d.%d.%d\n", - fw_ver.extend, fw_ver.class, - fw_ver.major, fw_ver.minor); - err = -EINVAL; - goto err_unmap; - } - } - err = nfp_net_pf_app_init(pf, qc_bar, stride); if (err) goto err_unmap; @@ -778,11 +855,17 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_stop_app; + err = nfp_net_pf_init_sriov(pf); + if (err) + goto err_clean_vnics; + devl_unlock(devlink); devlink_register(devlink); return 0; +err_clean_vnics: + nfp_net_pf_clean_vnics(pf); err_stop_app: nfp_net_pf_app_stop(pf); err_free_irqs: diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 6eeeb0fda91fac4d494f1b60794f1591360f41bd..67aea9445aa29cbbbbf376b31d1d5925269d0777 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -14,6 +14,9 @@ #include "nfp_net.h" #include "nfp_net_sriov.h" +/* The configurations that precede VF creating. */ +#define NFP_NET_VF_PRE_CONFIG NFP_NET_VF_CFG_MB_CAP_SPLIT + static int nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { @@ -29,6 +32,10 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool return -EOPNOTSUPP; } + /* No need to check vf for the pre-configurations. */ + if (cap & NFP_NET_VF_PRE_CONFIG) + return 0; + if (vf < 0 || vf >= app->pf->num_vfs) { if (warn) nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); @@ -65,7 +72,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) { struct nfp_app *app = nfp_app_from_netdev(netdev); unsigned int vf_offset; - int err; + int err, abs_vf; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) @@ -78,13 +85,14 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) return -EINVAL; } + abs_vf = vf + app->pf->multi_pf.vf_fid; /* Write MAC to VF entry in VF config symbol */ - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + abs_vf * NFP_NET_VF_CFG_SZ; writel(get_unaligned_be32(mac), app->pf->vfcfg_tbl2 + vf_offset); writew(get_unaligned_be16(mac + 4), app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); - err = nfp_net_sriov_update(app, vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); + err = nfp_net_sriov_update(app, abs_vf, NFP_NET_VF_CFG_MB_UPD_MAC, "MAC"); if (!err) nfp_info(app->pf->cpp, "MAC %pM set on VF %d, reload the VF driver to make this change effective.\n", @@ -138,6 +146,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, if (vlan_tag && is_proto_sup) vlan_tag |= FIELD_PREP(NFP_NET_VF_CFG_VLAN_PROT, ntohs(vlan_proto)); + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; writel(vlan_tag, app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_VLAN); @@ -162,6 +171,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, return -EINVAL; } + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; ratevalue = FIELD_PREP(NFP_NET_VF_CFG_MAX_RATE, max_tx_rate ? max_tx_rate : @@ -188,6 +198,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) return err; /* Write spoof check control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -212,6 +223,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) return err; /* Write trust control bit to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -246,6 +258,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, } /* Write link state to VF entry in VF config symbol */ + vf += app->pf->multi_pf.vf_fid; vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ + NFP_NET_VF_CFG_CTRL; vf_ctrl = readb(app->pf->vfcfg_tbl2 + vf_offset); @@ -271,7 +284,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, if (err) return err; - vf_offset = NFP_NET_VF_CFG_MB_SZ + vf * NFP_NET_VF_CFG_SZ; + vf_offset = NFP_NET_VF_CFG_MB_SZ + (vf + app->pf->multi_pf.vf_fid) * NFP_NET_VF_CFG_SZ; mac_hi = readl(app->pf->vfcfg_tbl2 + vf_offset); mac_lo = readw(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_MAC_LO); @@ -309,3 +322,21 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, return 0; } + +int nfp_net_pf_init_sriov(struct nfp_pf *pf) +{ + int err; + + if (!pf->multi_pf.en || !pf->limit_vfs) + return 0; + + err = nfp_net_sriov_check(pf->app, 0, NFP_NET_VF_CFG_MB_CAP_SPLIT, "split", true); + if (err) + return err; + + writeb(pf->limit_vfs, pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_VF_CNT); + + /* Reuse NFP_NET_VF_CFG_MB_VF_NUM to pass vf_fid to FW. */ + return nfp_net_sriov_update(pf->app, pf->multi_pf.vf_fid, + NFP_NET_VF_CFG_MB_UPD_SPLIT, "split"); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h index 2d445fa199dc8884d94f8f8f9d3d7dab2dfe2c1b..8de9590188199c1ae11d56a6923c0ba527ce3fab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.h @@ -21,6 +21,7 @@ #define NFP_NET_VF_CFG_MB_CAP_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_CAP_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_CAP_SPLIT (0x1 << 8) #define NFP_NET_VF_CFG_MB_RET 0x2 #define NFP_NET_VF_CFG_MB_UPD 0x4 #define NFP_NET_VF_CFG_MB_UPD_MAC (0x1 << 0) @@ -30,6 +31,8 @@ #define NFP_NET_VF_CFG_MB_UPD_TRUST (0x1 << 4) #define NFP_NET_VF_CFG_MB_UPD_VLAN_PROTO (0x1 << 5) #define NFP_NET_VF_CFG_MB_UPD_RATE (0x1 << 6) +#define NFP_NET_VF_CFG_MB_UPD_SPLIT (0x1 << 8) +#define NFP_NET_VF_CFG_MB_VF_CNT 0x6 #define NFP_NET_VF_CFG_MB_VF_NUM 0x7 /* VF config entry @@ -68,4 +71,6 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int nfp_app_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); +int nfp_net_pf_init_sriov(struct nfp_pf *pf); + #endif /* _NFP_NET_SRIOV_H_ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_port.c b/drivers/net/ethernet/netronome/nfp/nfp_port.c index 54640bcb70fbabff03bb91873710021fdc4b7c4b..dadd6844c3855c709fa79b4274a2e29f642ac2fd 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_port.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_port.c @@ -189,7 +189,8 @@ int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app, port->eth_port = &pf->eth_tbl->ports[id]; port->eth_id = pf->eth_tbl->ports[id].index; - port->netdev->dev_port = id; + if (!pf->multi_pf.en) + port->netdev->dev_port = id; if (pf->mac_stats_mem) port->eth_stats = pf->mac_stats_mem + port->eth_id * NFP_MAC_STATS_SIZE; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h index db94b0bddc925136748b7af2bcd0828f4f83eaba..89a131cffc48a500040eba32b9f05bb168cc5458 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -64,6 +64,10 @@ int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" +/* Keepalive */ +#define NFP_KEEPALIVE "nfp.beat" +#define NFP_KEEPALIVE_MAGIC 0x6e66702e62656174ULL /* ASCII of "nfp.beat" */ + int nfp_resource_table_init(struct nfp_cpp *cpp); struct nfp_resource * diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c index 33b4c28563162eeab3938da414b32cfd480c13d7..0d881e37de20ae37a75d03722536cf2087ec39ab 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -29,6 +29,7 @@ #include "nfp_cpp.h" #include "nfp_dev.h" +#include "../nfp_main.h" #include "nfp6000/nfp6000.h" @@ -532,7 +533,8 @@ static int bar_cmp(const void *aptr, const void *bptr) * BAR1.0-BAR1.7: -- * BAR2.0-BAR2.7: -- */ -static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface, + struct nfp_pf *pf) { const u32 barcfg_msix_general = NFP_PCIE_BAR_PCIE2CPP_MapType( @@ -609,7 +611,7 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) bar->iomem = ioremap(nfp_bar_resource_start(bar), nfp_bar_resource_len(bar)); if (bar->iomem) { - int pf; + int pf_id; msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); atomic_inc(&bar->refcnt); @@ -622,8 +624,8 @@ static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) switch (nfp->pdev->device) { case PCI_DEVICE_ID_NFP3800: - pf = nfp->pdev->devfn & 7; - nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + pf_id = pf->multi_pf.id; + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf_id); break; case PCI_DEVICE_ID_NFP4000: case PCI_DEVICE_ID_NFP5000: @@ -1307,7 +1309,8 @@ static const struct nfp_cpp_operations nfp6000_pcie_ops = { * Return: NFP CPP handle */ struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info) +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf) { struct nfp6000_pcie *nfp; u16 interface; @@ -1351,7 +1354,7 @@ nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_i goto err_free_nfp; } - err = enable_bars(nfp, interface); + err = enable_bars(nfp, interface, pf); if (err) goto err_free_nfp; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h index 097660b673dbf251dd32d631007be3332a43feba..e992f5c910132f0ae23001c1a29945f0511728d2 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -11,7 +11,14 @@ #include "nfp_cpp.h" +/* Vendor specific register layout */ +#define NFP_VNDR_HEADER_OFFSET 0x0 +#define NFP_VNDR_PF_ID_OFFSET 0x4 + +struct nfp_pf; + struct nfp_cpp * -nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info); +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info, + struct nfp_pf *pf); #endif /* NFP6000_PCIE_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c index 0725b51c2a95cb9b46f39bdf4551d411b6e6d742..8a7c5de0de772e6ef57e7400782fe4fde1298972 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -19,6 +19,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0a00, .pcie_expl_offset = 0xd000, .qc_area_sz = 0x100000, + .pf_num_per_unit = 4, }, [NFP_DEV_NFP3800_VF] = { .dma_mask = DMA_BIT_MASK(48), @@ -38,6 +39,7 @@ const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { .pcie_cfg_expbar_offset = 0x0400, .pcie_expl_offset = 0x1000, .qc_area_sz = 0x80000, + .pf_num_per_unit = 1, }, [NFP_DEV_NFP6000_VF] = { .dma_mask = DMA_BIT_MASK(40), diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h index e4d38178de0fd6bebff3767226eb5fcb0d2ba058..d948c9c4a09a07acd51341f3cfc08e92cff84034 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -35,6 +35,7 @@ struct nfp_dev_info { u32 pcie_cfg_expbar_offset; u32 pcie_expl_offset; u32 qc_area_sz; + u8 pf_num_per_unit; }; extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT]; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h index 49a4d3f56b56a8cec0501ded5e362fc154686651..4042352f83b08e6eae3054fbb66c9bfdc860ef4a 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -101,6 +101,10 @@ u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, u64 value); u8 __iomem * +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area); +u8 __iomem * nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, unsigned int min_size, struct nfp_cpp_area **area); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c index 7136bc48530ba08cf431339481bb352e34e8a9b6..55d799d420aa37c8e88b9390032b2208e81622b8 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -37,7 +37,8 @@ #define NSP_COMMAND 0x08 #define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) -#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_CODE_MJ_VER GENMASK_ULL(31, 28) +#define NSP_COMMAND_CODE GENMASK_ULL(27, 16) #define NSP_COMMAND_DMA_BUF BIT_ULL(1) #define NSP_COMMAND_START BIT_ULL(0) @@ -58,7 +59,7 @@ #define NFP_CAP_CMD_DMA_SG 0x28 #define NSP_MAGIC 0xab10 -#define NSP_MAJOR 0 +#define NSP_MAJOR 1 #define NSP_MINOR 8 #define NSP_CODE_MAJOR GENMASK(15, 12) @@ -101,6 +102,7 @@ enum nfp_nsp_cmd { SPCODE_VERSIONS = 21, /* Report FW versions */ SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */ + SPCODE_DEV_ACTIVATE = 29, /* Activate hardware for multiple pfs case */ }; struct nfp_nsp_dma_buf { @@ -247,14 +249,14 @@ static int nfp_nsp_check(struct nfp_nsp *state) state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); - if (state->ver.major != NSP_MAJOR) { + if (state->ver.major > NSP_MAJOR) { nfp_err(cpp, "Unsupported ABI %hu.%hu\n", state->ver.major, state->ver.minor); return -EINVAL; } if (state->ver.minor < NSP_MINOR) { - nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n", - NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR); + nfp_err(cpp, "ABI too old to support NIC operation (x.%u < x.%u), please update the management FW on the flash\n", + state->ver.minor, NSP_MINOR); return -EINVAL; } @@ -380,6 +382,7 @@ __nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE_MJ_VER, state->ver.major) | FIELD_PREP(NSP_COMMAND_CODE, arg->code) | FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | FIELD_PREP(NSP_COMMAND_START, 1)); @@ -730,6 +733,17 @@ int nfp_nsp_device_soft_reset(struct nfp_nsp *state) return nfp_nsp_command(state, SPCODE_SOFT_RESET); } +int nfp_nsp_device_activate(struct nfp_nsp *state) +{ + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 38. + */ + if (nfp_nsp_get_abi_ver_minor(state) < 38) + return -EOPNOTSUPP; + + return nfp_nsp_command(state, SPCODE_DEV_ACTIVATE); +} + int nfp_nsp_mac_reinit(struct nfp_nsp *state) { return nfp_nsp_command(state, SPCODE_MAC_INIT); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h index 6e044ac049172e3713ee70c09d0e94a110115cd7..f34b996b0749656cb7e97df198199fcc327ccaca 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -17,6 +17,7 @@ u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); int nfp_nsp_wait(struct nfp_nsp *state); int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_device_activate(struct nfp_nsp *state); int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); int nfp_nsp_mac_reinit(struct nfp_nsp *state); diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c index 2260c2403a83a177df578bfe5bcad47981cc339f..97a4417a1c1b0df00ae8946609789f3dffe6dbb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -520,8 +520,9 @@ int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, } u8 __iomem * -nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, - unsigned int min_size, struct nfp_cpp_area **area) +nfp_rtsym_map_offset(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int offset, unsigned int min_size, + struct nfp_cpp_area **area) { const struct nfp_rtsym *sym; u8 __iomem *mem; @@ -540,12 +541,12 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return (u8 __iomem *)ERR_PTR(err); } - if (sym->size < min_size) { + if (sym->size < min_size + offset) { nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); return (u8 __iomem *)ERR_PTR(-EINVAL); } - mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr + offset, sym->size - offset, area); if (IS_ERR(mem)) { nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", name, PTR_ERR(mem)); @@ -554,3 +555,10 @@ nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, return mem; } + +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area) +{ + return nfp_rtsym_map_offset(rtbl, name, id, 0, min_size, area); +} diff --git a/drivers/net/ethernet/netronome/nfp/nic/main.c b/drivers/net/ethernet/netronome/nfp/nic/main.c index 9dd5afe37f6e44ca0c4043db58514a0964246e89..e7a2d01bcbff1c2832dbab6206f4b75fe24b1579 100644 --- a/drivers/net/ethernet/netronome/nfp/nic/main.c +++ b/drivers/net/ethernet/netronome/nfp/nic/main.c @@ -12,7 +12,8 @@ static int nfp_nic_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; - if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count) { + if (pf->eth_tbl && pf->max_data_vnics != pf->eth_tbl->count && + !pf->multi_pf.en) { nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n", pf->max_data_vnics, pf->eth_tbl->count); return -EINVAL; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 69b9c71f0ededed8e8d7b0d92acd0949b4c1f911..f41f8a576e452d9df9f7d4dcdd4a1d0c19f60bf8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1941,13 +1941,18 @@ static void __free_dma_rx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, dma_conf->dma_rx_size * sizeof(struct dma_extended_desc), rx_q->dma_erx, rx_q->dma_rx_phy); + rx_q->dma_rx = NULL; + rx_q->dma_erx = NULL; if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) xdp_rxq_info_unreg(&rx_q->xdp_rxq); kfree(rx_q->buf_pool); + rx_q->buf_pool = NULL; + if (rx_q->page_pool) page_pool_destroy(rx_q->page_pool); + rx_q->page_pool = NULL; } static void free_dma_rx_desc_resources(struct stmmac_priv *priv, @@ -1993,8 +1998,14 @@ static void __free_dma_tx_desc_resources(struct stmmac_priv *priv, dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); + tx_q->dma_etx = NULL; + tx_q->dma_entx = NULL; + tx_q->dma_tx = NULL; + kfree(tx_q->tx_skbuff_dma); + tx_q->tx_skbuff_dma = NULL; kfree(tx_q->tx_skbuff); + tx_q->tx_skbuff = NULL; } static void free_dma_tx_desc_resources(struct stmmac_priv *priv, diff --git a/drivers/net/ethernet/wangxun/Kconfig b/drivers/net/ethernet/wangxun/Kconfig index 23cd610bd3766c2b2785e94f1faa9da54990ec00..85cdbdd44fec70d1b20e348c38368f047107eec6 100644 --- a/drivers/net/ethernet/wangxun/Kconfig +++ b/drivers/net/ethernet/wangxun/Kconfig @@ -26,7 +26,7 @@ config NGBE tristate "Wangxun(R) GbE PCI Express adapters support" depends on PCI select LIBWX - select PHYLIB + select PHYLINK help This driver supports Wangxun(R) GbE PCI Express family of adapters. diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index 93cb6f2294e72e235abbc90ed47aaaf91bfd2ff3..f3c7e19dff5c46c881d6df063c10508f1fcc1529 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -3,9 +3,172 @@ #include #include +#include #include "wx_type.h" #include "wx_ethtool.h" +#include "wx_hw.h" +#include "wx_lib.h" + +struct wx_stats { + char stat_string[ETH_GSTRING_LEN]; + size_t sizeof_stat; + off_t stat_offset; +}; + +#define WX_STAT(str, m) { \ + .stat_string = str, \ + .sizeof_stat = sizeof(((struct wx *)0)->m), \ + .stat_offset = offsetof(struct wx, m) } + +static const struct wx_stats wx_gstrings_stats[] = { + WX_STAT("rx_dma_pkts", stats.gprc), + WX_STAT("tx_dma_pkts", stats.gptc), + WX_STAT("rx_dma_bytes", stats.gorc), + WX_STAT("tx_dma_bytes", stats.gotc), + WX_STAT("rx_total_pkts", stats.tpr), + WX_STAT("tx_total_pkts", stats.tpt), + WX_STAT("rx_long_length_count", stats.roc), + WX_STAT("rx_short_length_count", stats.ruc), + WX_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), + WX_STAT("os2bmc_tx_by_bmc", stats.b2ospc), + WX_STAT("os2bmc_tx_by_host", stats.o2bspc), + WX_STAT("os2bmc_rx_by_host", stats.b2ogprc), + WX_STAT("rx_no_dma_resources", stats.rdmdrop), + WX_STAT("tx_busy", tx_busy), + WX_STAT("non_eop_descs", non_eop_descs), + WX_STAT("tx_restart_queue", restart_queue), + WX_STAT("rx_csum_offload_good_count", hw_csum_rx_good), + WX_STAT("rx_csum_offload_errors", hw_csum_rx_error), + WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), +}; + +/* drivers allocates num_tx_queues and num_rx_queues symmetrically so + * we set the num_rx_queues to evaluate to num_tx_queues. This is + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ +#define WX_NUM_RX_QUEUES netdev->num_tx_queues +#define WX_NUM_TX_QUEUES netdev->num_tx_queues + +#define WX_QUEUE_STATS_LEN ( \ + (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ + (sizeof(struct wx_queue_stats) / sizeof(u64))) +#define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) + +int wx_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return WX_STATS_LEN; + default: + return -EOPNOTSUPP; + } +} +EXPORT_SYMBOL(wx_get_sset_count); + +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) +{ + u8 *p = data; + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) + ethtool_sprintf(&p, wx_gstrings_stats[i].stat_string); + for (i = 0; i < netdev->num_tx_queues; i++) { + ethtool_sprintf(&p, "tx_queue_%u_packets", i); + ethtool_sprintf(&p, "tx_queue_%u_bytes", i); + } + for (i = 0; i < WX_NUM_RX_QUEUES; i++) { + ethtool_sprintf(&p, "rx_queue_%u_packets", i); + ethtool_sprintf(&p, "rx_queue_%u_bytes", i); + } + break; + } +} +EXPORT_SYMBOL(wx_get_strings); + +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_ring *ring; + unsigned int start; + int i, j; + char *p; + + wx_update_stats(wx); + + for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) { + p = (char *)wx + wx_gstrings_stats[i].stat_offset; + data[i] = (wx_gstrings_stats[i].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } + + for (j = 0; j < netdev->num_tx_queues; j++) { + ring = wx->tx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } + for (j = 0; j < WX_NUM_RX_QUEUES; j++) { + ring = wx->rx_ring[j]; + if (!ring) { + data[i++] = 0; + data[i++] = 0; + continue; + } + + do { + start = u64_stats_fetch_begin(&ring->syncp); + data[i] = ring->stats.packets; + data[i + 1] = ring->stats.bytes; + } while (u64_stats_fetch_retry(&ring->syncp, start)); + i += 2; + } +} +EXPORT_SYMBOL(wx_get_ethtool_stats); + +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + mac_stats->MulticastFramesXmittedOK = hwstats->mptc; + mac_stats->BroadcastFramesXmittedOK = hwstats->bptc; + mac_stats->MulticastFramesReceivedOK = hwstats->mprc; + mac_stats->BroadcastFramesReceivedOK = hwstats->bprc; +} +EXPORT_SYMBOL(wx_get_mac_stats); + +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats) +{ + struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; + + wx_update_stats(wx); + + hwstats = &wx->stats; + stats->tx_pause_frames = hwstats->lxontxc + hwstats->lxofftxc; + stats->rx_pause_frames = hwstats->lxonoffrxc; +} +EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { @@ -14,5 +177,190 @@ void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); + if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { + info->n_stats = WX_STATS_LEN - + (WX_NUM_TX_QUEUES - wx->num_tx_queues) * + (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; + } else { + info->n_stats = WX_STATS_LEN; + } } EXPORT_SYMBOL(wx_get_drvinfo); + +int wx_nway_reset(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_nway_reset(wx->phylink); +} +EXPORT_SYMBOL(wx_nway_reset); + +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_get(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_get_link_ksettings); + +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_ksettings_set(wx->phylink, cmd); +} +EXPORT_SYMBOL(wx_set_link_ksettings); + +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + phylink_ethtool_get_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_get_pauseparam); + +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +{ + struct wx *wx = netdev_priv(netdev); + + return phylink_ethtool_set_pauseparam(wx->phylink, pause); +} +EXPORT_SYMBOL(wx_set_pauseparam); + +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ring->rx_max_pending = WX_MAX_RXD; + ring->tx_max_pending = WX_MAX_TXD; + ring->rx_mini_max_pending = 0; + ring->rx_jumbo_max_pending = 0; + ring->rx_pending = wx->rx_ring_count; + ring->tx_pending = wx->tx_ring_count; + ring->rx_mini_pending = 0; + ring->rx_jumbo_pending = 0; +} +EXPORT_SYMBOL(wx_get_ringparam); + +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + + ec->tx_max_coalesced_frames_irq = wx->tx_work_limit; + /* only valid if in constant ITR mode */ + if (wx->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = wx->rx_itr_setting; + else + ec->rx_coalesce_usecs = wx->rx_itr_setting >> 2; + + /* if in mixed tx/rx queues per vector mode, report only rx settings */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + return 0; + + /* only valid if in constant ITR mode */ + if (wx->tx_itr_setting <= 1) + ec->tx_coalesce_usecs = wx->tx_itr_setting; + else + ec->tx_coalesce_usecs = wx->tx_itr_setting >> 2; + + return 0; +} +EXPORT_SYMBOL(wx_get_coalesce); + +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u16 tx_itr_param, rx_itr_param; + struct wx_q_vector *q_vector; + u16 max_eitr; + int i; + + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) { + /* reject Tx specific changes in case of mixed RxTx vectors */ + if (ec->tx_coalesce_usecs) + return -EOPNOTSUPP; + } + + if (ec->tx_max_coalesced_frames_irq) + wx->tx_work_limit = ec->tx_max_coalesced_frames_irq; + + if (wx->mac.type == wx_mac_sp) + max_eitr = WX_SP_MAX_EITR; + else + max_eitr = WX_EM_MAX_EITR; + + if ((ec->rx_coalesce_usecs > (max_eitr >> 2)) || + (ec->tx_coalesce_usecs > (max_eitr >> 2))) + return -EINVAL; + + if (ec->rx_coalesce_usecs > 1) + wx->rx_itr_setting = ec->rx_coalesce_usecs << 2; + else + wx->rx_itr_setting = ec->rx_coalesce_usecs; + + if (wx->rx_itr_setting == 1) + rx_itr_param = WX_20K_ITR; + else + rx_itr_param = wx->rx_itr_setting; + + if (ec->tx_coalesce_usecs > 1) + wx->tx_itr_setting = ec->tx_coalesce_usecs << 2; + else + wx->tx_itr_setting = ec->tx_coalesce_usecs; + + if (wx->tx_itr_setting == 1) { + if (wx->mac.type == wx_mac_sp) + tx_itr_param = WX_12K_ITR; + else + tx_itr_param = WX_20K_ITR; + } else { + tx_itr_param = wx->tx_itr_setting; + } + + /* mixed Rx/Tx */ + if (wx->q_vector[0]->tx.count && wx->q_vector[0]->rx.count) + wx->tx_itr_setting = wx->rx_itr_setting; + + for (i = 0; i < wx->num_q_vectors; i++) { + q_vector = wx->q_vector[i]; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; + else + /* rx only or mixed */ + q_vector->itr = rx_itr_param; + wx_write_eitr(q_vector); + } + + return 0; +} +EXPORT_SYMBOL(wx_set_coalesce); + +u32 wx_get_msglevel(struct net_device *netdev) +{ + struct wx *wx = netdev_priv(netdev); + + return wx->msg_enable; +} +EXPORT_SYMBOL(wx_get_msglevel); + +void wx_set_msglevel(struct net_device *netdev, u32 data) +{ + struct wx *wx = netdev_priv(netdev); + + wx->msg_enable = data; +} +EXPORT_SYMBOL(wx_set_msglevel); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h index e85538c69454070bbccd60a5e1f549e75677442b..d79157532d3d952346f4b52031e2ebcd5eafd5de 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.h @@ -4,5 +4,36 @@ #ifndef _WX_ETHTOOL_H_ #define _WX_ETHTOOL_H_ +int wx_get_sset_count(struct net_device *netdev, int sset); +void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data); +void wx_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data); +void wx_get_mac_stats(struct net_device *netdev, + struct ethtool_eth_mac_stats *mac_stats); +void wx_get_pause_stats(struct net_device *netdev, + struct ethtool_pause_stats *stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info); +int wx_nway_reset(struct net_device *netdev); +int wx_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd); +int wx_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd); +void wx_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +int wx_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause); +void wx_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack); +int wx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int wx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +u32 wx_get_msglevel(struct net_device *netdev); +void wx_set_msglevel(struct net_device *netdev, u32 data); #endif /* _WX_ETHTOOL_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index 52130df26aee5360e3379a1ef154e20e12eb76e9..d11f7d8db1944b298edb70b3e94cfdedf9d1f2dc 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -12,6 +12,98 @@ #include "wx_lib.h" #include "wx_hw.h" +static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) { + wx_err(wx, "Mdio read c22 command did not complete.\n"); + return ret; + } + + return (u16)rd32(wx, WX_MSCC); +} + +static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + u32 command, val; + int ret; + + /* setup and write the address cycle command */ + command = WX_MSCA_RA(regnum) | + WX_MSCA_PA(phy_addr) | + WX_MSCA_DA(devnum); + wr32(wx, WX_MSCA, command); + + command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; + if (wx->mac.type == wx_mac_em) + command |= WX_MDIO_CLK(6); + wr32(wx, WX_MSCC, command); + + /* wait to complete */ + ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, + 100000, false, wx, WX_MSCC); + if (ret) + wx_err(wx, "Mdio write c22 command did not complete.\n"); + + return ret; +} + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22); + +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF); + return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22); + +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum); +} +EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45); + +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value) +{ + struct wx *wx = bus->priv; + + wr32(wx, WX_MDIO_CLAUSE_SELECT, 0); + return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value); +} +EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45); + static void wx_intr_disable(struct wx *wx, u64 qmask) { u32 mask; @@ -1066,6 +1158,81 @@ static void wx_set_rxpba(struct wx *wx) wr32(wx, WX_TDM_PB_THRE(0), txpbthresh); } +#define WX_ETH_FRAMING 20 + +/** + * wx_hpbthresh - calculate high water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_hpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + int link, tc, kb, marker; + u32 dv_id, rx_pba; + + /* Calculate max LAN frame size */ + link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING; + tc = link; + + /* Calculate delay value for device */ + dv_id = WX_DV(link, tc); + + /* Delay value is calculated in bit times convert to KB */ + kb = WX_BT2KB(dv_id); + rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT; + + marker = rx_pba - kb; + + /* It is possible that the packet buffer is not large enough + * to provide required headroom. In this case throw an error + * to user and a do the best we can. + */ + if (marker < 0) { + dev_warn(&wx->pdev->dev, + "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n"); + marker = tc + 1; + } + + return marker; +} + +/** + * wx_lpbthresh - calculate low water mark for flow control + * + * @wx: board private structure to calculate for + **/ +static int wx_lpbthresh(struct wx *wx) +{ + struct net_device *dev = wx->netdev; + u32 dv_id; + int tc; + + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + + /* Calculate delay value for device */ + dv_id = WX_LOW_DV(tc); + + /* Delay value is calculated in bit times convert to KB */ + return WX_BT2KB(dv_id); +} + +/** + * wx_pbthresh_setup - calculate and setup high low water marks + * + * @wx: board private structure to calculate for + **/ +static void wx_pbthresh_setup(struct wx *wx) +{ + wx->fc.high_water = wx_hpbthresh(wx); + wx->fc.low_water = wx_lpbthresh(wx); + + /* Low water marks must not be larger than high water marks */ + if (wx->fc.low_water > wx->fc.high_water) + wx->fc.low_water = 0; +} + static void wx_configure_port(struct wx *wx) { u32 value, i; @@ -1492,6 +1659,7 @@ static void wx_configure_isb(struct wx *wx) void wx_configure(struct wx *wx) { wx_set_rxpba(wx); + wx_pbthresh_setup(wx); wx_configure_port(wx); wx_set_rx_mode(wx->netdev); @@ -1911,6 +2079,201 @@ int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid) } EXPORT_SYMBOL(wx_vlan_rx_kill_vid); +static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl |= WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring) +{ + u16 reg_idx = ring->reg_idx; + u32 srrctl; + + srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx)); + srrctl &= ~WX_PX_RR_CFG_DROP_EN; + + wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl); +} + +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause) +{ + u16 pause_time = WX_DEFAULT_FCPAUSE; + u32 mflcn_reg, fccfg_reg, reg; + u32 fcrtl, fcrth; + int i; + + /* Low water mark of zero causes XOFF floods */ + if (tx_pause && wx->fc.high_water) { + if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) { + wx_err(wx, "Invalid water mark configuration\n"); + return -EINVAL; + } + } + + /* Disable any previous flow control settings */ + mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL); + mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE; + + fccfg_reg = rd32(wx, WX_RDB_RFCC); + fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X; + + if (rx_pause) + mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE; + if (tx_pause) + fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X; + + /* Set 802.3x based flow control settings. */ + wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg); + wr32(wx, WX_RDB_RFCC, fccfg_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ + if (tx_pause && wx->fc.high_water) { + fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE; + wr32(wx, WX_RDB_RFCL, fcrtl); + fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE; + } else { + wr32(wx, WX_RDB_RFCL, 0); + /* In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark + * to the Rx packet buffer size - 24KB. This allows + * the Tx switch to function even under heavy Rx + * workloads. + */ + fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576; + } + + wr32(wx, WX_RDB_RFCH, fcrth); + + /* Configure pause time */ + reg = pause_time * 0x00010001; + wr32(wx, WX_RDB_RFCV, reg); + + /* Configure flow control refresh threshold value */ + wr32(wx, WX_RDB_RFCRT, pause_time / 2); + + /* We should set the drop enable bit if: + * Number of Rx queues > 1 and flow control is disabled + * + * This allows us to avoid head of line blocking for security + * and performance reasons. + */ + if (wx->num_rx_queues > 1 && !tx_pause) { + for (i = 0; i < wx->num_rx_queues; i++) + wx_enable_rx_drop(wx, wx->rx_ring[i]); + } else { + for (i = 0; i < wx->num_rx_queues; i++) + wx_disable_rx_drop(wx, wx->rx_ring[i]); + } + + return 0; +} +EXPORT_SYMBOL(wx_fc_enable); + +/** + * wx_update_stats - Update the board statistics counters. + * @wx: board private structure + **/ +void wx_update_stats(struct wx *wx) +{ + struct wx_hw_stats *hwstats = &wx->stats; + + u64 non_eop_descs = 0, alloc_rx_buff_failed = 0; + u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0; + u64 restart_queue = 0, tx_busy = 0; + u32 i; + + /* gather some stats to the wx struct that are per queue */ + for (i = 0; i < wx->num_rx_queues; i++) { + struct wx_ring *rx_ring = wx->rx_ring[i]; + + non_eop_descs += rx_ring->rx_stats.non_eop_descs; + alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed; + hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt; + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + } + wx->non_eop_descs = non_eop_descs; + wx->alloc_rx_buff_failed = alloc_rx_buff_failed; + wx->hw_csum_rx_error = hw_csum_rx_error; + wx->hw_csum_rx_good = hw_csum_rx_good; + + for (i = 0; i < wx->num_tx_queues; i++) { + struct wx_ring *tx_ring = wx->tx_ring[i]; + + restart_queue += tx_ring->tx_stats.restart_queue; + tx_busy += tx_ring->tx_stats.tx_busy; + } + wx->restart_queue = restart_queue; + wx->tx_busy = tx_busy; + + hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT); + hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT); + hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB); + hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB); + hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC); + hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC); + hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC); + hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT); + hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT); + hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT); + hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); + hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + + for (i = 0; i < wx->mac.max_rx_queues; i++) + hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); +} +EXPORT_SYMBOL(wx_update_stats); + +/** + * wx_clear_hw_cntrs - Generic clear hardware counters + * @wx: board private structure + * + * Clears all hardware statistics counters by reading them from the hardware + * Statistics counters are clear on read. + **/ +void wx_clear_hw_cntrs(struct wx *wx) +{ + u16 i = 0; + + for (i = 0; i < wx->mac.max_rx_queues; i++) + wr32(wx, WX_PX_MPRC(i), 0); + + rd32(wx, WX_RDM_PKT_CNT); + rd32(wx, WX_TDM_PKT_CNT); + rd64(wx, WX_RDM_BYTE_CNT_LSB); + rd32(wx, WX_TDM_BYTE_CNT_LSB); + rd32(wx, WX_RDM_DRP_PKT); + rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD); + rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD); + rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L); + rd64(wx, WX_RX_MC_FRAMES_GOOD_L); + rd64(wx, WX_TX_MC_FRAMES_GOOD_L); + rd64(wx, WX_RX_BC_FRAMES_GOOD_L); + rd64(wx, WX_TX_BC_FRAMES_GOOD_L); + rd64(wx, WX_RX_CRC_ERROR_FRAMES_L); + rd64(wx, WX_RX_LEN_ERROR_FRAMES_L); + rd32(wx, WX_RDB_LXONTXC); + rd32(wx, WX_RDB_LXOFFTXC); + rd32(wx, WX_MAC_LXONOFFRXC); +} +EXPORT_SYMBOL(wx_clear_hw_cntrs); + /** * wx_start_hw - Prepare hardware for Tx/Rx * @wx: pointer to hardware structure diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 0b3447bc6f2fec53fac9ccd5144430380b17d46b..9e219fa717a225b4e91e688d8f5a98b5daec606b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -4,6 +4,13 @@ #ifndef _WX_HW_H_ #define _WX_HW_H_ +#include + +int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum); +int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value); +int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum); +int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, + int devnum, int regnum, u16 value); void wx_intr_enable(struct wx *wx, u64 qmask); void wx_irq_disable(struct wx *wx); int wx_check_flash_load(struct wx *wx, u32 check_bit); @@ -34,5 +41,8 @@ int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count); int wx_sw_init(struct wx *wx); int wx_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid); int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid); +int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause); +void wx_update_stats(struct wx *wx); +void wx_clear_hw_cntrs(struct wx *wx); #endif /* _WX_HW_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 21505920136c6e97451d6eb8c2817b129965ba28..0481f646a3031fc4dd673c3ec596a7edc4e1d1a4 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -160,60 +160,6 @@ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); } -static bool wx_can_reuse_rx_page(struct wx_rx_buffer *rx_buffer, - int rx_buffer_pgcnt) -{ - unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; - struct page *page = rx_buffer->page; - - /* avoid re-using remote and pfmemalloc pages */ - if (!dev_page_is_reusable(page)) - return false; - -#if (PAGE_SIZE < 8192) - /* if we are only owner of page we can reuse it */ - if (unlikely((rx_buffer_pgcnt - pagecnt_bias) > 1)) - return false; -#endif - - /* If we have drained the page fragment pool we need to update - * the pagecnt_bias and page count so that we fully restock the - * number of references the driver holds. - */ - if (unlikely(pagecnt_bias == 1)) { - page_ref_add(page, USHRT_MAX - 1); - rx_buffer->pagecnt_bias = USHRT_MAX; - } - - return true; -} - -/** - * wx_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on - * @old_buff: donor buffer to have page reused - * - * Synchronizes page for reuse by the adapter - **/ -static void wx_reuse_rx_page(struct wx_ring *rx_ring, - struct wx_rx_buffer *old_buff) -{ - u16 nta = rx_ring->next_to_alloc; - struct wx_rx_buffer *new_buff; - - new_buff = &rx_ring->rx_buffer_info[nta]; - - /* update, and store next to alloc */ - nta++; - rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; - - /* transfer page from old buffer to new buffer */ - new_buff->page = old_buff->page; - new_buff->page_dma = old_buff->page_dma; - new_buff->page_offset = old_buff->page_offset; - new_buff->pagecnt_bias = old_buff->pagecnt_bias; -} - static void wx_dma_sync_frag(struct wx_ring *rx_ring, struct wx_rx_buffer *rx_buffer) { @@ -270,8 +216,6 @@ static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, size, DMA_FROM_DEVICE); skip_sync: - rx_buffer->pagecnt_bias--; - return rx_buffer; } @@ -280,19 +224,9 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct sk_buff *skb, int rx_buffer_pgcnt) { - if (wx_can_reuse_rx_page(rx_buffer, rx_buffer_pgcnt)) { - /* hand second half of page back to the ring */ - wx_reuse_rx_page(rx_ring, rx_buffer); - } else { - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - else - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); - } + if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) + /* the page has been released from the ring */ + WX_CB(skb)->page_released = true; /* clear contents of rx_buffer */ rx_buffer->page = NULL; @@ -335,11 +269,12 @@ static struct sk_buff *wx_build_skb(struct wx_ring *rx_ring, if (size <= WX_RXBUFFER_256) { memcpy(__skb_put(skb, size), page_addr, ALIGN(size, sizeof(long))); - rx_buffer->pagecnt_bias++; - + page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, true); return skb; } + skb_mark_for_recycle(skb); + if (!wx_test_staterr(rx_desc, WX_RXD_STAT_EOP)) WX_CB(skb)->dma = rx_buffer->dma; @@ -382,8 +317,6 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, bi->page_dma = dma; bi->page = page; bi->page_offset = 0; - page_ref_add(page, USHRT_MAX - 1); - bi->pagecnt_bias = USHRT_MAX; return true; } @@ -488,6 +421,7 @@ static bool wx_is_non_eop(struct wx_ring *rx_ring, return false; rx_ring->rx_buffer_info[ntc].skb = skb; + rx_ring->rx_stats.non_eop_descs++; return true; } @@ -721,7 +655,7 @@ static int wx_clean_rx_irq(struct wx_q_vector *q_vector, /* exit if we failed to retrieve a buffer */ if (!skb) { - rx_buffer->pagecnt_bias++; + rx_ring->rx_stats.alloc_rx_buff_failed++; break; } @@ -877,9 +811,11 @@ static bool wx_clean_tx_irq(struct wx_q_vector *q_vector, if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) && - netif_running(tx_ring->netdev)) + netif_running(tx_ring->netdev)) { netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; + } } return !!budget; @@ -956,6 +892,7 @@ static int wx_maybe_stop_tx(struct wx_ring *tx_ring, u16 size) /* A reprieve! - use start_queue because it doesn't call schedule */ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); + ++tx_ring->tx_stats.restart_queue; return 0; } @@ -1533,8 +1470,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)-> frags[f])); - if (wx_maybe_stop_tx(tx_ring, count + 3)) + if (wx_maybe_stop_tx(tx_ring, count + 3)) { + tx_ring->tx_stats.tx_busy++; return NETDEV_TX_BUSY; + } /* record the location of the first descriptor for this packet */ first = &tx_ring->tx_buffer_info[tx_ring->next_to_use]; @@ -2143,7 +2082,7 @@ static void wx_set_ivar(struct wx *wx, s8 direction, * when it needs to update EITR registers at runtime. Hardware * specific quirks/differences are taken care of here. */ -static void wx_write_eitr(struct wx_q_vector *q_vector) +void wx_write_eitr(struct wx_q_vector *q_vector) { struct wx *wx = q_vector->wx; int v_idx = q_vector->v_idx; @@ -2241,8 +2180,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) /* free resources associated with mapping */ page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - __page_frag_cache_drain(rx_buffer->page, - rx_buffer->pagecnt_bias); i++; rx_buffer++; @@ -2665,8 +2602,11 @@ void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats) { struct wx *wx = netdev_priv(netdev); + struct wx_hw_stats *hwstats; int i; + wx_update_stats(wx); + rcu_read_lock(); for (i = 0; i < wx->num_rx_queues; i++) { struct wx_ring *ring = READ_ONCE(wx->rx_ring[i]); @@ -2702,6 +2642,12 @@ void wx_get_stats64(struct net_device *netdev, } rcu_read_unlock(); + + hwstats = &wx->stats; + stats->rx_errors = hwstats->crcerrs + hwstats->rlec; + stats->multicast = hwstats->qmprc; + stats->rx_length_errors = hwstats->rlec; + stats->rx_crc_errors = hwstats->crcerrs; } EXPORT_SYMBOL(wx_get_stats64); @@ -2725,4 +2671,71 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) } EXPORT_SYMBOL(wx_set_features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring) +{ + int i, err = 0; + + /* Setup new Tx resources and free the old Tx resources in that order. + * We can then assign the new resources to the rings via a memcpy. + * The advantage to this approach is that we are guaranteed to still + * have resources even in the case of an allocation failure. + */ + if (new_tx_count != wx->tx_ring_count) { + for (i = 0; i < wx->num_tx_queues; i++) { + memcpy(&temp_ring[i], wx->tx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_tx_count; + err = wx_setup_tx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new tx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_tx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_tx_queues; i++) { + wx_free_tx_resources(wx->tx_ring[i]); + + memcpy(wx->tx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->tx_ring_count = new_tx_count; + } + + /* Repeat the process for the Rx rings if needed */ + if (new_rx_count != wx->rx_ring_count) { + for (i = 0; i < wx->num_rx_queues; i++) { + memcpy(&temp_ring[i], wx->rx_ring[i], + sizeof(struct wx_ring)); + + temp_ring[i].count = new_rx_count; + err = wx_setup_rx_resources(&temp_ring[i]); + if (err) { + wx_err(wx, "setup new rx resources failed, keep using the old config\n"); + while (i) { + i--; + wx_free_rx_resources(&temp_ring[i]); + } + return; + } + } + + for (i = 0; i < wx->num_rx_queues; i++) { + wx_free_rx_resources(wx->rx_ring[i]); + memcpy(wx->rx_ring[i], &temp_ring[i], + sizeof(struct wx_ring)); + } + + wx->rx_ring_count = new_rx_count; + } +} +EXPORT_SYMBOL(wx_set_ring); + +MODULE_DESCRIPTION("Common library for Wangxun(R) Ethernet drivers."); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index df1f4a5951f06ccb44e583ac6a3301838e999832..ec909e876720ca05c709ffb59060d3a4e9546426 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -21,6 +21,7 @@ void wx_free_irq(struct wx *wx); int wx_setup_isb_resources(struct wx *wx); void wx_free_isb_resources(struct wx *wx); u32 wx_misc_isb(struct wx *wx, enum wx_isb_idx idx); +void wx_write_eitr(struct wx_q_vector *q_vector); void wx_configure_vectors(struct wx *wx); void wx_clean_all_rx_rings(struct wx *wx); void wx_clean_all_tx_rings(struct wx *wx); @@ -29,5 +30,7 @@ int wx_setup_resources(struct wx *wx); void wx_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats); int wx_set_features(struct net_device *netdev, netdev_features_t features); +void wx_set_ring(struct wx *wx, u32 new_tx_count, + u32 new_rx_count, struct wx_ring *temp_ring); #endif /* _NGBE_LIB_H_ */ diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index c5cbd177ef62754c25ffa7347a990cb9293c5642..17cdffe388d063eb9dfcb0589a8465b4d7748f72 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -7,6 +7,7 @@ #include #include #include +#include #include #define WX_NCSI_SUP 0x8000 @@ -59,6 +60,25 @@ #define WX_TS_ALARM_ST_DALARM BIT(1) #define WX_TS_ALARM_ST_ALARM BIT(0) +/* statistic */ +#define WX_TX_FRAME_CNT_GOOD_BAD_L 0x1181C +#define WX_TX_BC_FRAMES_GOOD_L 0x11824 +#define WX_TX_MC_FRAMES_GOOD_L 0x1182C +#define WX_RX_FRAME_CNT_GOOD_BAD_L 0x11900 +#define WX_RX_BC_FRAMES_GOOD_L 0x11918 +#define WX_RX_MC_FRAMES_GOOD_L 0x11920 +#define WX_RX_CRC_ERROR_FRAMES_L 0x11928 +#define WX_RX_LEN_ERROR_FRAMES_L 0x11978 +#define WX_RX_UNDERSIZE_FRAMES_GOOD 0x11938 +#define WX_RX_OVERSIZE_FRAMES_GOOD 0x1193C +#define WX_MAC_LXONOFFRXC 0x11E0C + +/*********************** Receive DMA registers **************************/ +#define WX_RDM_DRP_PKT 0x12500 +#define WX_RDM_PKT_CNT 0x12504 +#define WX_RDM_BYTE_CNT_LSB 0x12508 +#define WX_RDM_BMC2OS_CNT 0x12510 + /************************* Port Registers ************************************/ /* port cfg Registers */ #define WX_CFG_PORT_CTL 0x14400 @@ -94,6 +114,9 @@ #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ #define WX_TDM_PB_THRE(_i) (0x18020 + ((_i) * 4)) #define WX_TDM_RP_IDX 0x1820C +#define WX_TDM_PKT_CNT 0x18308 +#define WX_TDM_BYTE_CNT_LSB 0x1830C +#define WX_TDM_OS2BMC_CNT 0x18314 #define WX_TDM_RP_RATE 0x18404 /***************************** RDB registers *********************************/ @@ -106,6 +129,17 @@ /* statistic */ #define WX_RDB_PFCMACDAL 0x19210 #define WX_RDB_PFCMACDAH 0x19214 +#define WX_RDB_LXOFFTXC 0x19218 +#define WX_RDB_LXONTXC 0x1921C +/* Flow Control Registers */ +#define WX_RDB_RFCV 0x19200 +#define WX_RDB_RFCL 0x19220 +#define WX_RDB_RFCL_XONE BIT(31) +#define WX_RDB_RFCH 0x19260 +#define WX_RDB_RFCH_XOFFE BIT(31) +#define WX_RDB_RFCRT 0x192A0 +#define WX_RDB_RFCC 0x192A4 +#define WX_RDB_RFCC_RFCE_802_3X BIT(3) /* ring assignment */ #define WX_RDB_PL_CFG(_i) (0x19300 + ((_i) * 4)) #define WX_RDB_PL_CFG_L4HDR BIT(1) @@ -218,6 +252,8 @@ #define WX_MNG_MBOX_CTL 0x1E044 #define WX_MNG_MBOX_CTL_SWRDY BIT(0) #define WX_MNG_MBOX_CTL_FWRDY BIT(2) +#define WX_MNG_BMC2OS_CNT 0x1E090 +#define WX_MNG_OS2BMC_CNT 0x1E094 /************************************* ETH MAC *****************************/ #define WX_MAC_TX_CFG 0x11000 @@ -251,6 +287,7 @@ enum WX_MSCA_CMD_value { #define WX_MSCC_SADDR BIT(18) #define WX_MSCC_BUSY BIT(22) #define WX_MDIO_CLK(v) FIELD_PREP(GENMASK(21, 19), v) +#define WX_MDIO_CLAUSE_SELECT 0x11220 #define WX_MMC_CONTROL 0x11800 #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ @@ -278,6 +315,7 @@ enum WX_MSCA_CMD_value { #define WX_PX_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ #define WX_7K_ITR 595 #define WX_12K_ITR 336 +#define WX_20K_ITR 200 #define WX_SP_MAX_EITR 0x00000FF8U #define WX_EM_MAX_EITR 0x00007FFCU @@ -300,8 +338,10 @@ enum WX_MSCA_CMD_value { #define WX_PX_RR_WP(_i) (0x01008 + ((_i) * 0x40)) #define WX_PX_RR_RP(_i) (0x0100C + ((_i) * 0x40)) #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) +#define WX_PX_MPRC(_i) (0x01020 + ((_i) * 0x40)) /* PX_RR_CFG bit definitions */ #define WX_PX_RR_CFG_VLAN BIT(31) +#define WX_PX_RR_CFG_DROP_EN BIT(30) #define WX_PX_RR_CFG_SPLIT_MODE BIT(26) #define WX_PX_RR_CFG_RR_THER_SHIFT 16 #define WX_PX_RR_CFG_RR_HDR_SZ GENMASK(15, 12) @@ -339,8 +379,46 @@ enum WX_MSCA_CMD_value { #define WX_MAC_STATE_MODIFIED 0x2 #define WX_MAC_STATE_IN_USE 0x4 +/* BitTimes (BT) conversion */ +#define WX_BT2KB(BT) (((BT) + (8 * 1024 - 1)) / (8 * 1024)) +#define WX_B2BT(BT) ((BT) * 8) + +/* Calculate Delay to respond to PFC */ +#define WX_PFC_D 672 +/* Calculate Cable Delay */ +#define WX_CABLE_DC 5556 /* Delay Copper */ +/* Calculate Delay incurred from higher layer */ +#define WX_HD 6144 + +/* Calculate Interface Delay */ +#define WX_PHY_D 12800 +#define WX_MAC_D 4096 +#define WX_XAUI_D (2 * 1024) +#define WX_ID (WX_MAC_D + WX_XAUI_D + WX_PHY_D) +/* Calculate PCI Bus delay for low thresholds */ +#define WX_PCI_DELAY 10000 + +/* Calculate delay value in bit times */ +#define WX_DV(_max_frame_link, _max_frame_tc) \ + ((36 * (WX_B2BT(_max_frame_link) + WX_PFC_D + \ + (2 * WX_CABLE_DC) + (2 * WX_ID) + WX_HD) / 25 + 1) + \ + 2 * WX_B2BT(_max_frame_tc)) + +/* Calculate low threshold delay values */ +#define WX_LOW_DV(_max_frame_tc) \ + (2 * (2 * WX_B2BT(_max_frame_tc) + (36 * WX_PCI_DELAY / 25) + 1)) + +/* flow control */ +#define WX_DEFAULT_FCPAUSE 0xFFFF + #define WX_MAX_RXD 8192 #define WX_MAX_TXD 8192 +#define WX_MIN_RXD 128 +#define WX_MIN_TXD 128 + +/* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +#define WX_REQ_RX_DESCRIPTOR_MULTIPLE 8 +#define WX_REQ_TX_DESCRIPTOR_MULTIPLE 8 #define WX_MAX_JUMBO_FRAME_SIZE 9432 /* max payload 9414 */ #define VMDQ_P(p) p @@ -759,7 +837,6 @@ struct wx_rx_buffer { dma_addr_t page_dma; struct page *page; unsigned int page_offset; - u16 pagecnt_bias; }; struct wx_queue_stats { @@ -767,9 +844,16 @@ struct wx_queue_stats { u64 bytes; }; +struct wx_tx_queue_stats { + u64 restart_queue; + u64 tx_busy; +}; + struct wx_rx_queue_stats { + u64 non_eop_descs; u64 csum_good_cnt; u64 csum_err; + u64 alloc_rx_buff_failed; }; /* iterator for handling rings in ring container */ @@ -813,6 +897,7 @@ struct wx_ring { struct wx_queue_stats stats; struct u64_stats_sync syncp; union { + struct wx_tx_queue_stats tx_stats; struct wx_rx_queue_stats rx_stats; }; } ____cacheline_internodealigned_in_smp; @@ -844,6 +929,38 @@ enum wx_isb_idx { WX_ISB_MAX }; +struct wx_fc_info { + u32 high_water; /* Flow Ctrl High-water */ + u32 low_water; /* Flow Ctrl Low-water */ +}; + +/* Statistics counters collected by the MAC */ +struct wx_hw_stats { + u64 gprc; + u64 gptc; + u64 gorc; + u64 gotc; + u64 tpr; + u64 tpt; + u64 bprc; + u64 bptc; + u64 mprc; + u64 mptc; + u64 roc; + u64 ruc; + u64 lxonoffrxc; + u64 lxontxc; + u64 lxofftxc; + u64 o2bgptc; + u64 b2ospc; + u64 o2bspc; + u64 b2ogprc; + u64 rdmdrop; + u64 crcerrs; + u64 rlec; + u64 qmprc; +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; @@ -857,6 +974,7 @@ struct wx { enum sp_media_type media_type; struct wx_eeprom_info eeprom; struct wx_addr_filter_info addr_ctrl; + struct wx_fc_info fc; struct wx_mac_addr *mac_table; u16 device_id; u16 vendor_id; @@ -877,6 +995,8 @@ struct wx { int speed; int duplex; struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; bool wol_hw_supported; bool ncsi_enabled; @@ -919,6 +1039,14 @@ struct wx { u32 wol; u16 bd_number; + + struct wx_hw_stats stats; + u64 tx_busy; + u64 non_eop_descs; + u64 restart_queue; + u64 hw_csum_rx_good; + u64 hw_csum_rx_error; + u64 alloc_rx_buff_failed; }; #define WX_INTR_ALL (~0ULL) @@ -952,6 +1080,17 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) wr32(wx, reg, val); } +static inline u64 +rd64(struct wx *wx, u32 reg) +{ + u64 lsb, msb; + + lsb = rd32(wx, reg); + msb = rd32(wx, reg + 4); + + return (lsb | msb << 32); +} + /* On some domestic CPU platforms, sometimes IO is not synchronized with * flushing memory, here use readl() to flush PCI read and write. */ @@ -963,4 +1102,9 @@ wr32m(struct wx *wx, u32 reg, u32 mask, u32 field) #define wx_dbg(wx, fmt, arg...) \ dev_dbg(&(wx)->pdev->dev, fmt, ##arg) +static inline struct wx *phylink_to_wx(struct phylink_config *config) +{ + return container_of(config, struct wx, phylink_config); +} + #endif /* _WX_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index ec0e869e9aacfe4708d082f05e9ac7f693c8bed6..5800bd8c8696324571d290960005e28962b26179 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -7,7 +7,10 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" #include "ngbe_ethtool.h" +#include "ngbe_type.h" static void ngbe_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -41,14 +44,77 @@ static int ngbe_set_wol(struct net_device *netdev, return 0; } +static int ngbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) +{ + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; + + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); + + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); + + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; + + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + ngbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + wx_configure(wx); + ngbe_up(wx); + + return 0; +} + static const struct ethtool_ops ngbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, .get_link = ethtool_op_get_link, - .get_link_ksettings = phy_ethtool_get_link_ksettings, - .set_link_ksettings = phy_ethtool_set_link_ksettings, - .nway_reset = phy_ethtool_nway_reset, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .nway_reset = wx_nway_reset, .get_wol = ngbe_get_wol, .set_wol = ngbe_set_wol, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = ngbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void ngbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c index 6562a2de95277c73758e9a8a6651ad2c0c5c9803..6459bc1d7c2249e6f623170b772b6c6229557716 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_hw.c @@ -85,6 +85,8 @@ int ngbe_reset_hw(struct wx *wx) } ngbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index a4d63d2f3c5bbebce79e6e2f0a07713ee7d558dc..96d80c595cb8621e82bb0b3a497f36231562f20c 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -330,17 +330,19 @@ static void ngbe_disable_device(struct wx *wx) wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH); } + + wx_update_stats(wx); } -static void ngbe_down(struct wx *wx) +void ngbe_down(struct wx *wx) { - phy_stop(wx->phydev); + phylink_stop(wx->phylink); ngbe_disable_device(wx); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } -static void ngbe_up(struct wx *wx) +void ngbe_up(struct wx *wx) { wx_configure_vectors(wx); @@ -357,7 +359,7 @@ static void ngbe_up(struct wx *wx) if (wx->gpio_ctrl) ngbe_sfp_modules_txrx_powerctl(wx, true); - phy_start(wx->phydev); + phylink_start(wx->phylink); } /** @@ -386,7 +388,7 @@ static int ngbe_open(struct net_device *netdev) if (err) goto err_free_resources; - err = ngbe_phy_connect(wx); + err = phylink_connect_phy(wx->phylink, wx->phydev); if (err) goto err_free_irq; @@ -402,7 +404,7 @@ static int ngbe_open(struct net_device *netdev) return 0; err_dis_phy: - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); err_free_irq: wx_free_irq(wx); err_free_resources: @@ -428,7 +430,7 @@ static int ngbe_close(struct net_device *netdev) ngbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); - phy_disconnect(wx->phydev); + phylink_disconnect_phy(wx->phylink); wx_control_hw(wx, false); return 0; @@ -580,6 +582,7 @@ static int ngbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - @@ -675,14 +678,10 @@ static int ngbe_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, wx); - netif_info(wx, probe, netdev, - "PHY: %s, PBA No: Wang Xun GbE Family Controller\n", - wx->mac_type == em_mac_type_mdi ? "Internal" : "External"); - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_register: + phylink_destroy(wx->phylink); wx_control_hw(wx, false); err_clear_interrupt_scheme: wx_clear_interrupt_scheme(wx); @@ -712,6 +711,7 @@ static void ngbe_remove(struct pci_dev *pdev) netdev = wx->netdev; unregister_netdev(netdev); + phylink_destroy(wx->phylink); pci_release_selected_regions(pdev, pci_select_bars(pdev, IORESOURCE_MEM)); diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c index 591f5b7b6da6580795d6640f339f05c9d117da8a..ec54b18c5fe73b942cdeb4c362168c06e7b16a28 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.c @@ -29,117 +29,6 @@ static int ngbe_phy_write_reg_internal(struct mii_bus *bus, int phy_addr, int re return 0; } -static int ngbe_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c22 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value) -{ - u32 command, val, device_type = 0; - struct wx *wx = bus->priv; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0xF); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(device_type); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c22 command did not complete.\n"); - - return ret; -} - -static int ngbe_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int ngbe_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - wr32(wx, NGBE_MDIO_CLAUSE_SELECT, 0x0); - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - command = value | - WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | - WX_MSCC_BUSY | - WX_MDIO_CLK(6); - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) { struct wx *wx = bus->priv; @@ -148,7 +37,7 @@ static int ngbe_phy_read_reg_c22(struct mii_bus *bus, int phy_addr, int regnum) if (wx->mac_type == em_mac_type_mdi) phy_data = ngbe_phy_read_reg_internal(bus, phy_addr, regnum); else - phy_data = ngbe_phy_read_reg_mdi_c22(bus, phy_addr, regnum); + phy_data = wx_phy_read_reg_mdi_c22(bus, phy_addr, regnum); return phy_data; } @@ -162,27 +51,33 @@ static int ngbe_phy_write_reg_c22(struct mii_bus *bus, int phy_addr, if (wx->mac_type == em_mac_type_mdi) ret = ngbe_phy_write_reg_internal(bus, phy_addr, regnum, value); else - ret = ngbe_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); + ret = wx_phy_write_reg_mdi_c22(bus, phy_addr, regnum, value); return ret; } -static void ngbe_handle_link_change(struct net_device *dev) +static void ngbe_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) { - struct wx *wx = netdev_priv(dev); - struct phy_device *phydev; +} + +static void ngbe_mac_link_down(struct phylink_config *config, + unsigned int mode, phy_interface_t interface) +{ +} + +static void ngbe_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct wx *wx = phylink_to_wx(config); u32 lan_speed, reg; - phydev = wx->phydev; - if (!(wx->link != phydev->link || - wx->speed != phydev->speed || - wx->duplex != phydev->duplex)) - return; + wx_fc_enable(wx, tx_pause, rx_pause); - wx->link = phydev->link; - wx->speed = phydev->speed; - wx->duplex = phydev->duplex; - switch (phydev->speed) { + switch (speed) { case SPEED_10: lan_speed = 0; break; @@ -194,54 +89,51 @@ static void ngbe_handle_link_change(struct net_device *dev) lan_speed = 2; break; } + wr32m(wx, NGBE_CFG_LAN_SPEED, 0x3, lan_speed); - if (phydev->link) { - reg = rd32(wx, WX_MAC_TX_CFG); - reg &= ~WX_MAC_TX_CFG_SPEED_MASK; - reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; - wr32(wx, WX_MAC_TX_CFG, reg); - /* Re configure MAC RX */ - reg = rd32(wx, WX_MAC_RX_CFG); - wr32(wx, WX_MAC_RX_CFG, reg); - wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); - reg = rd32(wx, WX_MAC_WDG_TIMEOUT); - wr32(wx, WX_MAC_WDG_TIMEOUT, reg); - } - phy_print_status(phydev); + reg = rd32(wx, WX_MAC_TX_CFG); + reg &= ~WX_MAC_TX_CFG_SPEED_MASK; + reg |= WX_MAC_TX_CFG_SPEED_1G | WX_MAC_TX_CFG_TE; + wr32(wx, WX_MAC_TX_CFG, reg); + + /* Re configure MAC Rx */ + reg = rd32(wx, WX_MAC_RX_CFG); + wr32(wx, WX_MAC_RX_CFG, reg); + wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); + reg = rd32(wx, WX_MAC_WDG_TIMEOUT); + wr32(wx, WX_MAC_WDG_TIMEOUT, reg); } -int ngbe_phy_connect(struct wx *wx) +static const struct phylink_mac_ops ngbe_mac_ops = { + .mac_config = ngbe_mac_config, + .mac_link_down = ngbe_mac_link_down, + .mac_link_up = ngbe_mac_link_up, +}; + +static int ngbe_phylink_init(struct wx *wx) { - int ret; + struct phylink_config *config; + phy_interface_t phy_mode; + struct phylink *phylink; - ret = phy_connect_direct(wx->netdev, - wx->phydev, - ngbe_handle_link_change, - PHY_INTERFACE_MODE_RGMII_ID); - if (ret) { - wx_err(wx, "PHY connect failed.\n"); - return ret; - } + config = &wx->phylink_config; + config->dev = &wx->netdev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_1000FD | MAC_100FD | MAC_10FD | + MAC_SYM_PAUSE | MAC_ASYM_PAUSE; + config->mac_managed_pm = true; - return 0; -} + phy_mode = PHY_INTERFACE_MODE_RGMII_ID; + __set_bit(PHY_INTERFACE_MODE_RGMII_ID, config->supported_interfaces); -static void ngbe_phy_fixup(struct wx *wx) -{ - struct phy_device *phydev = wx->phydev; - struct ethtool_eee eee; - - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); - phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); - - phydev->mac_managed_pm = true; - if (wx->mac_type != em_mac_type_mdi) - return; - /* disable EEE, internal phy does not support eee */ - memset(&eee, 0, sizeof(eee)); - phy_ethtool_set_eee(phydev, &eee); + phylink = phylink_create(config, NULL, phy_mode, &ngbe_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + wx->phylink = phylink; + + return 0; } int ngbe_mdio_init(struct wx *wx) @@ -262,8 +154,8 @@ int ngbe_mdio_init(struct wx *wx) mii_bus->priv = wx; if (wx->mac_type == em_mac_type_rgmii) { - mii_bus->read_c45 = ngbe_phy_read_reg_mdi_c45; - mii_bus->write_c45 = ngbe_phy_write_reg_mdi_c45; + mii_bus->read_c45 = wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = wx_phy_write_reg_mdi_c45; } snprintf(mii_bus->id, MII_BUS_ID_SIZE, "ngbe-%x", pci_dev_id(pdev)); @@ -276,11 +168,16 @@ int ngbe_mdio_init(struct wx *wx) return -ENODEV; phy_attached_info(wx->phydev); - ngbe_phy_fixup(wx); wx->link = 0; wx->speed = 0; wx->duplex = 0; + ret = ngbe_phylink_init(wx); + if (ret) { + wx_err(wx, "failed to init phylink: %d\n", ret); + return ret; + } + return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h index 0a6400dd89c4c08da275e04bc6a25577045c77a3..f610b771888a5ed39283a1af7c452104ea505e03 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_mdio.h @@ -7,6 +7,5 @@ #ifndef _NGBE_MDIO_H_ #define _NGBE_MDIO_H_ -int ngbe_phy_connect(struct wx *wx); int ngbe_mdio_init(struct wx *wx); #endif /* _NGBE_MDIO_H_ */ diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index 72c8cd2d557513e24c510aa3fdc89dc01871e2eb..0a98080a197af8bbe8904aae462793d3de077325 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -59,9 +59,6 @@ #define NGBE_EEPROM_VERSION_L 0x1D #define NGBE_EEPROM_VERSION_H 0x1E -/* Media-dependent registers. */ -#define NGBE_MDIO_CLAUSE_SELECT 0x11220 - /* GPIO Registers */ #define NGBE_GPIO_DR 0x14800 #define NGBE_GPIO_DDR 0x14804 @@ -133,4 +130,7 @@ extern char ngbe_driver_name[]; +void ngbe_down(struct wx *wx); +void ngbe_up(struct wx *wx); + #endif /* _NGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 859da112586a427043e2f608793d4d81226bcc63..fa83cac320d34447a8324dfef9fcde4b06e43086 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -7,38 +7,78 @@ #include "../libwx/wx_ethtool.h" #include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" #include "txgbe_type.h" #include "txgbe_ethtool.h" -static int txgbe_nway_reset(struct net_device *netdev) +static int txgbe_set_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring, + struct kernel_ethtool_ringparam *kernel_ring, + struct netlink_ext_ack *extack) { - struct txgbe *txgbe = netdev_to_txgbe(netdev); + struct wx *wx = netdev_priv(netdev); + u32 new_rx_count, new_tx_count; + struct wx_ring *temp_ring; + int i; - return phylink_ethtool_nway_reset(txgbe->phylink); -} + new_tx_count = clamp_t(u32, ring->tx_pending, WX_MIN_TXD, WX_MAX_TXD); + new_tx_count = ALIGN(new_tx_count, WX_REQ_TX_DESCRIPTOR_MULTIPLE); -static int txgbe_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); + new_rx_count = clamp_t(u32, ring->rx_pending, WX_MIN_RXD, WX_MAX_RXD); + new_rx_count = ALIGN(new_rx_count, WX_REQ_RX_DESCRIPTOR_MULTIPLE); - return phylink_ethtool_ksettings_get(txgbe->phylink, cmd); -} + if (new_tx_count == wx->tx_ring_count && + new_rx_count == wx->rx_ring_count) + return 0; -static int txgbe_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *cmd) -{ - struct txgbe *txgbe = netdev_to_txgbe(netdev); + if (!netif_running(wx->netdev)) { + for (i = 0; i < wx->num_tx_queues; i++) + wx->tx_ring[i]->count = new_tx_count; + for (i = 0; i < wx->num_rx_queues; i++) + wx->rx_ring[i]->count = new_rx_count; + wx->tx_ring_count = new_tx_count; + wx->rx_ring_count = new_rx_count; + + return 0; + } + + /* allocate temporary buffer to store rings in */ + i = max_t(int, wx->num_tx_queues, wx->num_rx_queues); + temp_ring = kvmalloc_array(i, sizeof(struct wx_ring), GFP_KERNEL); + if (!temp_ring) + return -ENOMEM; + + txgbe_down(wx); + + wx_set_ring(wx, new_tx_count, new_rx_count, temp_ring); + kvfree(temp_ring); + + txgbe_up(wx); - return phylink_ethtool_ksettings_set(txgbe->phylink, cmd); + return 0; } static const struct ethtool_ops txgbe_ethtool_ops = { + .supported_coalesce_params = ETHTOOL_COALESCE_USECS | + ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_drvinfo = wx_get_drvinfo, - .nway_reset = txgbe_nway_reset, + .nway_reset = wx_nway_reset, .get_link = ethtool_op_get_link, - .get_link_ksettings = txgbe_get_link_ksettings, - .set_link_ksettings = txgbe_set_link_ksettings, + .get_link_ksettings = wx_get_link_ksettings, + .set_link_ksettings = wx_set_link_ksettings, + .get_sset_count = wx_get_sset_count, + .get_strings = wx_get_strings, + .get_ethtool_stats = wx_get_ethtool_stats, + .get_eth_mac_stats = wx_get_mac_stats, + .get_pause_stats = wx_get_pause_stats, + .get_pauseparam = wx_get_pauseparam, + .set_pauseparam = wx_set_pauseparam, + .get_ringparam = wx_get_ringparam, + .set_ringparam = txgbe_set_ringparam, + .get_coalesce = wx_get_coalesce, + .set_coalesce = wx_set_coalesce, + .get_msglevel = wx_get_msglevel, + .set_msglevel = wx_set_msglevel, }; void txgbe_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c index 37274525027002a42d21fa36ea6db39b03f5cb68..d6b2b3c781b6f2b25f6349c020af38e75c77b359 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c @@ -70,114 +70,6 @@ static void txgbe_init_thermal_sensor_thresh(struct wx *wx) wr32(wx, WX_TS_DALARM_THRE, 614); } -/** - * txgbe_read_pba_string - Reads part number string from EEPROM - * @wx: pointer to hardware structure - * @pba_num: stores the part number string from the EEPROM - * @pba_num_size: part number string buffer length - * - * Reads the part number string from the EEPROM. - **/ -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size) -{ - u16 pba_ptr, offset, length, data; - int ret_val; - - if (!pba_num) { - wx_err(wx, "PBA string buffer was null\n"); - return -EINVAL; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM0_PTR, - &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - ret_val = wx_read_ee_hostif(wx, - wx->eeprom.sw_region_offset + TXGBE_PBANUM1_PTR, - &pba_ptr); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - /* if data is not ptr guard the PBA must be in legacy format which - * means pba_ptr is actually our second data word for the PBA number - * and we can decode it into an ascii string - */ - if (data != TXGBE_PBANUM_PTR_GUARD) { - wx_err(wx, "NVM PBA number is not stored as string\n"); - - /* we will need 11 characters to store the PBA */ - if (pba_num_size < 11) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* extract hex string from data and pba_ptr */ - pba_num[0] = (data >> 12) & 0xF; - pba_num[1] = (data >> 8) & 0xF; - pba_num[2] = (data >> 4) & 0xF; - pba_num[3] = data & 0xF; - pba_num[4] = (pba_ptr >> 12) & 0xF; - pba_num[5] = (pba_ptr >> 8) & 0xF; - pba_num[6] = '-'; - pba_num[7] = 0; - pba_num[8] = (pba_ptr >> 4) & 0xF; - pba_num[9] = pba_ptr & 0xF; - - /* put a null character on the end of our string */ - pba_num[10] = '\0'; - - /* switch all the data but the '-' to hex char */ - for (offset = 0; offset < 10; offset++) { - if (pba_num[offset] < 0xA) - pba_num[offset] += '0'; - else if (pba_num[offset] < 0x10) - pba_num[offset] += 'A' - 0xA; - } - - return 0; - } - - ret_val = wx_read_ee_hostif(wx, pba_ptr, &length); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - - if (length == 0xFFFF || length == 0) { - wx_err(wx, "NVM PBA number section invalid length\n"); - return -EINVAL; - } - - /* check if pba_num buffer is big enough */ - if (pba_num_size < (((u32)length * 2) - 1)) { - wx_err(wx, "PBA string buffer too small\n"); - return -ENOMEM; - } - - /* trim pba length from start of string */ - pba_ptr++; - length--; - - for (offset = 0; offset < length; offset++) { - ret_val = wx_read_ee_hostif(wx, pba_ptr + offset, &data); - if (ret_val != 0) { - wx_err(wx, "NVM Read Error\n"); - return ret_val; - } - pba_num[offset * 2] = (u8)(data >> 8); - pba_num[(offset * 2) + 1] = (u8)(data & 0xFF); - } - pba_num[offset * 2] = '\0'; - - return 0; -} - /** * txgbe_calc_eeprom_checksum - Calculates and returns the checksum * @wx: pointer to hardware structure @@ -306,6 +198,8 @@ int txgbe_reset_hw(struct wx *wx) txgbe_reset_misc(wx); + wx_clear_hw_cntrs(wx); + /* Store the permanent mac address */ wx_get_mac_addr(wx, wx->mac.perm_addr); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h index abc729eb187ae8da87d25cd3f2d3c881199b5192..1f3ecf60e3c44d971e6bb12cc24324478cb108ca 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h @@ -6,7 +6,6 @@ int txgbe_disable_sec_tx_path(struct wx *wx); void txgbe_enable_sec_tx_path(struct wx *wx); -int txgbe_read_pba_string(struct wx *wx, u8 *pba_num, u32 pba_num_size); int txgbe_validate_eeprom_checksum(struct wx *wx, u16 *checksum_val); int txgbe_reset_hw(struct wx *wx); diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index d60c26ba0ba4c98815a9609550ea3046aabbe0eb..bcc47bc6264ab33c92bc0ae2cb75e65a16a44628 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -206,7 +206,6 @@ static int txgbe_request_irq(struct wx *wx) static void txgbe_up_complete(struct wx *wx) { struct net_device *netdev = wx->netdev; - struct txgbe *txgbe; wx_control_hw(wx, true); wx_configure_vectors(wx); @@ -215,8 +214,7 @@ static void txgbe_up_complete(struct wx *wx) smp_mb__before_atomic(); wx_napi_enable_all(wx); - txgbe = netdev_to_txgbe(netdev); - phylink_start(txgbe->phylink); + phylink_start(wx->phylink); /* clear any pending interrupts, may auto mask */ rd32(wx, WX_PX_IC(0)); @@ -286,20 +284,26 @@ static void txgbe_disable_device(struct wx *wx) /* Disable the Tx DMA engine */ wr32m(wx, WX_TDM_CTL, WX_TDM_CTL_TE, 0); + + wx_update_stats(wx); } -static void txgbe_down(struct wx *wx) +void txgbe_down(struct wx *wx) { - struct txgbe *txgbe = netdev_to_txgbe(wx->netdev); - txgbe_disable_device(wx); txgbe_reset(wx); - phylink_stop(txgbe->phylink); + phylink_stop(wx->phylink); wx_clean_all_tx_rings(wx); wx_clean_all_rx_rings(wx); } +void txgbe_up(struct wx *wx) +{ + wx_configure(wx); + txgbe_up_complete(wx); +} + /** * txgbe_init_type_code - Initialize the shared code * @wx: pointer to hardware structure @@ -536,7 +540,6 @@ static int txgbe_probe(struct pci_dev *pdev, u16 eeprom_verh = 0, eeprom_verl = 0, offset = 0; u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; u16 build = 0, major = 0, patch = 0; - u8 part_str[TXGBE_PBANUM_LENGTH]; u32 etrack_id = 0; err = pci_enable_device_mem(pdev); @@ -637,6 +640,7 @@ static int txgbe_probe(struct pci_dev *pdev, netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; netdev->min_mtu = ETH_MIN_MTU; netdev->max_mtu = WX_MAX_JUMBO_FRAME_SIZE - @@ -734,13 +738,6 @@ static int txgbe_probe(struct pci_dev *pdev, else dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); - /* First try to read PBA as a string */ - err = txgbe_read_pba_string(wx, part_str, TXGBE_PBANUM_LENGTH); - if (err) - strncpy(part_str, "Unknown", TXGBE_PBANUM_LENGTH); - - netif_info(wx, probe, netdev, "%pM\n", netdev->dev_addr); - return 0; err_remove_phy: diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c index 4159c84035fdceb318e0617d455bccb132f51ea5..129cd5042180f7662e686cf346fcc5429e15021a 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c @@ -20,6 +20,8 @@ #include "txgbe_phy.h" #include "txgbe_hw.h" +#define TXGBE_I2C_CLK_DEV_NAME "i2c_dw" + static int txgbe_swnodes_register(struct txgbe *txgbe) { struct txgbe_nodes *nodes = &txgbe->nodes; @@ -159,7 +161,8 @@ static int txgbe_mdio_pcs_init(struct txgbe *txgbe) static struct phylink_pcs *txgbe_phylink_mac_select(struct phylink_config *config, phy_interface_t interface) { - struct txgbe *txgbe = netdev_to_txgbe(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); + struct txgbe *txgbe = wx->priv; if (interface == PHY_INTERFACE_MODE_10GBASER) return &txgbe->xpcs->pcs; @@ -175,7 +178,7 @@ static void txgbe_mac_config(struct phylink_config *config, unsigned int mode, static void txgbe_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); } @@ -186,9 +189,11 @@ static void txgbe_mac_link_up(struct phylink_config *config, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); u32 txcfg, wdg; + wx_fc_enable(wx, tx_pause, rx_pause); + txcfg = rd32(wx, WX_MAC_TX_CFG); txcfg &= ~WX_MAC_TX_CFG_SPEED_MASK; @@ -217,7 +222,7 @@ static void txgbe_mac_link_up(struct phylink_config *config, static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); wr32m(wx, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, 0); @@ -228,7 +233,7 @@ static int txgbe_mac_prepare(struct phylink_config *config, unsigned int mode, static int txgbe_mac_finish(struct phylink_config *config, unsigned int mode, phy_interface_t interface) { - struct wx *wx = netdev_priv(to_net_dev(config->dev)); + struct wx *wx = phylink_to_wx(config); txgbe_enable_sec_tx_path(wx); wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE); @@ -253,10 +258,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) phy_interface_t phy_mode; struct phylink *phylink; - config = devm_kzalloc(&wx->pdev->dev, sizeof(*config), GFP_KERNEL); - if (!config) - return -ENOMEM; - + config = &wx->phylink_config; config->dev = &wx->netdev->dev; config->type = PHYLINK_NETDEV; config->mac_capabilities = MAC_10000FD | MAC_1000FD | MAC_100FD | @@ -287,7 +289,7 @@ static int txgbe_phylink_init(struct txgbe *txgbe) } } - txgbe->phylink = phylink; + wx->phylink = phylink; return 0; } @@ -483,7 +485,7 @@ static void txgbe_irq_handler(struct irq_desc *desc) TXGBE_PX_MISC_ETH_AN)) { u32 reg = rd32(wx, TXGBE_CFG_PORT_ST); - phylink_mac_change(txgbe->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); + phylink_mac_change(wx->phylink, !!(reg & TXGBE_CFG_PORT_ST_LINK_UP)); } /* unmask interrupt */ @@ -551,8 +553,8 @@ static int txgbe_clock_register(struct txgbe *txgbe) char clk_name[32]; struct clk *clk; - snprintf(clk_name, sizeof(clk_name), "i2c_designware.%d", - pci_dev_id(pdev)); + snprintf(clk_name, sizeof(clk_name), "%s.%d", + TXGBE_I2C_CLK_DEV_NAME, pci_dev_id(pdev)); clk = clk_register_fixed_rate(NULL, clk_name, NULL, 0, 156250000); if (IS_ERR(clk)) @@ -614,7 +616,7 @@ static int txgbe_i2c_register(struct txgbe *txgbe) info.parent = &pdev->dev; info.fwnode = software_node_fwnode(txgbe->nodes.group[SWNODE_I2C]); - info.name = "i2c_designware"; + info.name = TXGBE_I2C_CLK_DEV_NAME; info.id = pci_dev_id(pdev); info.res = &DEFINE_RES_IRQ(pdev->irq); @@ -647,58 +649,6 @@ static int txgbe_sfp_register(struct txgbe *txgbe) return 0; } -static int txgbe_phy_read(struct mii_bus *bus, int phy_addr, - int devnum, int regnum) -{ - struct wx *wx = bus->priv; - u32 val, command; - int ret; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) { - wx_err(wx, "Mdio read c45 command did not complete.\n"); - return ret; - } - - return (u16)rd32(wx, WX_MSCC); -} - -static int txgbe_phy_write(struct mii_bus *bus, int phy_addr, - int devnum, int regnum, u16 value) -{ - struct wx *wx = bus->priv; - int ret, command; - u16 val; - - /* setup and write the address cycle command */ - command = WX_MSCA_RA(regnum) | - WX_MSCA_PA(phy_addr) | - WX_MSCA_DA(devnum); - wr32(wx, WX_MSCA, command); - - command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY; - wr32(wx, WX_MSCC, command); - - /* wait to complete */ - ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000, - 100000, false, wx, WX_MSCC); - if (ret) - wx_err(wx, "Mdio write c45 command did not complete.\n"); - - return ret; -} - static int txgbe_ext_phy_init(struct txgbe *txgbe) { struct phy_device *phydev; @@ -715,8 +665,8 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) return -ENOMEM; mii_bus->name = "txgbe_mii_bus"; - mii_bus->read_c45 = &txgbe_phy_read; - mii_bus->write_c45 = &txgbe_phy_write; + mii_bus->read_c45 = &wx_phy_read_reg_mdi_c45; + mii_bus->write_c45 = &wx_phy_write_reg_mdi_c45; mii_bus->parent = &pdev->dev; mii_bus->phy_mask = GENMASK(31, 1); mii_bus->priv = wx; @@ -753,6 +703,7 @@ static int txgbe_ext_phy_init(struct txgbe *txgbe) int txgbe_init_phy(struct txgbe *txgbe) { + struct wx *wx = txgbe->wx; int ret; if (txgbe->wx->media_type == sp_media_copper) @@ -760,43 +711,43 @@ int txgbe_init_phy(struct txgbe *txgbe) ret = txgbe_swnodes_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register software nodes\n"); + wx_err(wx, "failed to register software nodes\n"); return ret; } ret = txgbe_mdio_pcs_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init mdio pcs: %d\n", ret); + wx_err(wx, "failed to init mdio pcs: %d\n", ret); goto err_unregister_swnode; } ret = txgbe_phylink_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init phylink\n"); + wx_err(wx, "failed to init phylink\n"); goto err_destroy_xpcs; } ret = txgbe_gpio_init(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init gpio\n"); + wx_err(wx, "failed to init gpio\n"); goto err_destroy_phylink; } ret = txgbe_clock_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register clock: %d\n", ret); + wx_err(wx, "failed to register clock: %d\n", ret); goto err_destroy_phylink; } ret = txgbe_i2c_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to init i2c interface: %d\n", ret); + wx_err(wx, "failed to init i2c interface: %d\n", ret); goto err_unregister_clk; } ret = txgbe_sfp_register(txgbe); if (ret) { - wx_err(txgbe->wx, "failed to register sfp\n"); + wx_err(wx, "failed to register sfp\n"); goto err_unregister_i2c; } @@ -808,7 +759,7 @@ int txgbe_init_phy(struct txgbe *txgbe) clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); err_destroy_phylink: - phylink_destroy(txgbe->phylink); + phylink_destroy(wx->phylink); err_destroy_xpcs: xpcs_destroy(txgbe->xpcs); err_unregister_swnode: @@ -820,8 +771,8 @@ int txgbe_init_phy(struct txgbe *txgbe) void txgbe_remove_phy(struct txgbe *txgbe) { if (txgbe->wx->media_type == sp_media_copper) { - phylink_disconnect_phy(txgbe->phylink); - phylink_destroy(txgbe->phylink); + phylink_disconnect_phy(txgbe->wx->phylink); + phylink_destroy(txgbe->wx->phylink); return; } @@ -829,7 +780,7 @@ void txgbe_remove_phy(struct txgbe *txgbe) platform_device_unregister(txgbe->i2c_dev); clkdev_drop(txgbe->clock); clk_unregister(txgbe->clk); - phylink_destroy(txgbe->phylink); + phylink_destroy(txgbe->wx->phylink); xpcs_destroy(txgbe->xpcs); software_node_unregister_node_group(txgbe->nodes.group); } diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 51199c355f95ce4f21e2fbcb745474316f282e72..801fd0aed1ff5ced2f1ecd626f4d08fed188a3d6 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -88,9 +88,6 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 -/* Part Number String Length */ -#define TXGBE_PBANUM_LENGTH 32 - /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -98,9 +95,6 @@ #define TXGBE_EEPROM_VERSION_L 0x1D #define TXGBE_EEPROM_VERSION_H 0x1E #define TXGBE_ISCSI_BOOT_CONFIG 0x07 -#define TXGBE_PBANUM0_PTR 0x05 -#define TXGBE_PBANUM1_PTR 0x06 -#define TXGBE_PBANUM_PTR_GUARD 0xFAFA #define TXGBE_MAX_MSIX_VECTORS 64 #define TXGBE_MAX_FDIR_INDICES 63 @@ -135,12 +129,8 @@ extern char txgbe_driver_name[]; -static inline struct txgbe *netdev_to_txgbe(struct net_device *netdev) -{ - struct wx *wx = netdev_priv(netdev); - - return wx->priv; -} +void txgbe_down(struct wx *wx); +void txgbe_up(struct wx *wx); #define NODE_PROP(_NAME, _PROP) \ (const struct software_node) { \ @@ -181,7 +171,6 @@ struct txgbe { struct wx *wx; struct txgbe_nodes nodes; struct dw_xpcs *xpcs; - struct phylink *phylink; struct platform_device *sfp_dev; struct platform_device *i2c_dev; struct clk_lookup *clock; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index 6554a2e89d3612dcb8e232802269a34c42d28e83..6449056b57dd3032b0a1fbd991f4248be0801d84 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -598,3 +598,15 @@ int pci_write_config_dword(const struct pci_dev *dev, int where, return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val); } EXPORT_SYMBOL(pci_write_config_dword); + +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set) +{ + u32 val; + + pci_read_config_dword(dev, pos, &val); + val &= ~clear; + val |= set; + pci_write_config_dword(dev, pos, val); +} +EXPORT_SYMBOL(pci_clear_and_set_config_dword); diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig index c0c3f28249907460fa7394e94253839f66a06ede..2a2a3ccd66ad3599638a21fc30df3facb71d02d5 100644 --- a/drivers/pci/controller/Kconfig +++ b/drivers/pci/controller/Kconfig @@ -342,6 +342,10 @@ config PCIE_XILINX_CPM Say 'Y' here if you want kernel support for the Xilinx Versal CPM host bridge. +config PCI_SW64 + bool + depends on SW64 && PCI + source "drivers/pci/controller/cadence/Kconfig" source "drivers/pci/controller/dwc/Kconfig" source "drivers/pci/controller/mobiveil/Kconfig" diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile index 37c8663de7fe1ff7c9c948cd39f4b6ce1a912f5b..9d161c053bc4b206611b82a574c65a0c2f83bcb3 100644 --- a/drivers/pci/controller/Makefile +++ b/drivers/pci/controller/Makefile @@ -39,6 +39,7 @@ obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o obj-$(CONFIG_PCIE_APPLE) += pcie-apple.o obj-$(CONFIG_PCIE_MT7621) += pcie-mt7621.o +obj-$(CONFIG_PCI_SW64) += pci-sunway.o # pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW obj-y += dwc/ diff --git a/drivers/pci/controller/pci-loongson.c b/drivers/pci/controller/pci-loongson.c index d45e7b8dc530d6162730ee677371992c36881501..df08dbf60c5c0abb26d16fd802d0397c3e9e14b9 100644 --- a/drivers/pci/controller/pci-loongson.c +++ b/drivers/pci/controller/pci-loongson.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "../pci.h" @@ -32,6 +33,7 @@ #define DEV_LS7A_CONF 0x7a10 #define DEV_LS7A_GNET 0x7a13 #define DEV_LS7A_EHCI 0x7a14 +#define DEV_LS7A_OHCI 0x7a24 #define DEV_LS7A_DC2 0x7a36 #define DEV_LS7A_HDMI 0x7a37 @@ -80,13 +82,63 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_LPC, system_bus_quirk); +static void loongson_d3_quirk(struct pci_dev *pdev) +{ + pdev->dev_flags |= PCI_DEV_FLAGS_NO_D3; + pdev->no_d1d2 = 1; +} +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT3, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT4, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT5, loongson_d3_quirk); +DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_LOONGSON, + DEV_LS7A_PCIE_PORT6, loongson_d3_quirk); + +/* + * Some Loongson PCIe ports have hardware limitations on their Maximum Read + * Request Size. They can't handle anything larger than this. Sane + * firmware will set proper MRRS at boot, so we only need no_inc_mrrs for + * bridges. However, some MIPS Loongson firmware doesn't set MRRS properly, + * so we have to enforce maximum safe MRRS, which is 256 bytes. + */ +#ifdef CONFIG_MIPS +static void loongson_set_min_mrrs_quirk(struct pci_dev *pdev) +{ + struct pci_bus *bus = pdev->bus; + struct pci_dev *bridge; + static const struct pci_device_id bridge_devids[] = { + { PCI_VDEVICE(LOONGSON, DEV_LS2K_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT0) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT1) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT2) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT3) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT4) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT5) }, + { PCI_VDEVICE(LOONGSON, DEV_LS7A_PCIE_PORT6) }, + { 0, }, + }; + + /* look for the matching bridge */ + while (!pci_is_root_bus(bus)) { + bridge = bus->self; + bus = bus->parent; + + if (pci_match_id(bridge_devids, bridge)) { + if (pcie_get_readrq(pdev) > 256) { + pci_info(pdev, "limiting MRRS to 256\n"); + pcie_set_readrq(pdev, 256); + } + break; + } + } +} +DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, loongson_set_min_mrrs_quirk); +#endif + static void loongson_mrrs_quirk(struct pci_dev *pdev) { - /* - * Some Loongson PCIe ports have h/w limitations of maximum read - * request size. They can't handle anything larger than this. So - * force this limit on any devices attached under these ports. - */ struct pci_host_bridge *bridge = pci_find_host_bridge(pdev->bus); bridge->no_inc_mrrs = 1; @@ -127,6 +179,98 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_HDMI, loongson_pci_pin_quirk); +static void loongson_ohci_quirk(struct pci_dev *dev) +{ + if (dev->revision == 0x2) + dev->resource[0].start += 0x1000; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, DEV_LS7A_OHCI, loongson_ohci_quirk); + +static void loongson_display_quirk(struct pci_dev *dev) +{ + u32 val; + u64 mask, size; + u64 max_size = 0; + int i, num; + struct pci_bus *bus = dev->bus; + + if (!dev->bus->number) { + if (!(dev->vendor == PCI_VENDOR_ID_LOONGSON && dev->device == 0x7a25)) + return; + } else { + while (!pci_is_root_bus(bus->parent)) + bus = bus->parent; + + /* ensure slot is 7a2000 */ + if (bus->self->vendor != PCI_VENDOR_ID_LOONGSON || bus->self->device < 0x7a39) + return; + } + max_size = 0; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + if (dev->resource[i].flags & IORESOURCE_MEM) { + size = dev->resource[i].end - dev->resource[i].start; + if (size > max_size) { + max_size = size; + num = i; + } + } + } + mask = ~(dev->resource[num].end - dev->resource[num].start); + val = (dev->resource[num].start >> (24 - 16)) | ((mask >> 24) & 0xffff); + writel(val, (volatile void *)0x80000efdfb000174UL); + writel(0x80000000, (volatile void *)0x80000efdfb000170UL); +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LOONGSON, 0x7a25, loongson_display_quirk); +DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, + PCI_BASE_CLASS_DISPLAY, 16, loongson_display_quirk); + +static void pci_fixup_aspeed(struct pci_dev *pdev) +{ + struct pci_dev *bridge; + struct pci_bus *bus; + struct pci_dev *vdevp = NULL; + u16 config; + + bus = pdev->bus; + bridge = bus->self; + + /* Is VGA routed to us? */ + if (bridge && (pci_is_bridge(bridge))) { + pci_read_config_word(bridge, PCI_BRIDGE_CONTROL, &config); + + /* Yes, this bridge is PCI bridge-to-bridge spec compliant, + * just return! + */ + if (config & PCI_BRIDGE_CTL_VGA) + return; + + dev_warn(&pdev->dev, "VGA bridge control is not enabled\n"); + } + + /* Just return if the system already have a default device */ + if (vga_default_device()) + return; + + /* No default vga device */ + while ((vdevp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, vdevp))) { + if (vdevp->vendor != 0x1a03) { + /* Have other vga devcie in the system, do nothing */ + dev_info(&pdev->dev, + "Another boot vga device: 0x%x:0x%x\n", + vdevp->vendor, vdevp->device); + return; + } + } + + vga_set_default_device(pdev); + + dev_info(&pdev->dev, + "Boot vga device set as 0x%x:0x%x\n", + pdev->vendor, pdev->device); +} +DECLARE_PCI_FIXUP_CLASS_FINAL(0x1a03, 0x2000, + PCI_CLASS_DISPLAY_VGA, 8, pci_fixup_aspeed); + static struct loongson_pci *pci_bus_to_loongson_pci(struct pci_bus *bus) { struct pci_config_window *cfg; @@ -206,6 +350,36 @@ static void __iomem *pci_loongson_map_bus(struct pci_bus *bus, return NULL; } +static int pci_loongson_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + void __iomem *addr; + + addr = bus->ops->map_bus(bus, devfn, where); + if (!addr) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + if (size == 1) + *val = readb(addr); + else if (size == 2) + *val = readw(addr); + else + *val = readl(addr); + /* + * fix some pcie card not scanning properly when bus number is + * inconsistent during firmware and kernel scan phases. + */ + if (*val == 0x0 && where == PCI_VENDOR_ID) { + writel(*val, addr); + *val = readl(addr); + } + + + return PCIBIOS_SUCCESSFUL; +} + #ifdef CONFIG_OF static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) @@ -229,7 +403,7 @@ static int loongson_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) /* LS2K/LS7A accept 8/16/32-bit PCI config operations */ static struct pci_ops loongson_pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, }; @@ -272,6 +446,7 @@ static int loongson_pci_probe(struct platform_device *pdev) struct device_node *node = dev->of_node; struct pci_host_bridge *bridge; struct resource *regs; + unsigned int num = 0; if (!node) return -ENODEV; @@ -296,7 +471,9 @@ static int loongson_pci_probe(struct platform_device *pdev) } if (priv->data->flags & FLAG_CFG1) { - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (priv->cfg0_base) + num = 1; + regs = platform_get_resource(pdev, IORESOURCE_MEM, num); if (!regs) dev_info(dev, "missing mem resource for cfg1\n"); else { @@ -353,7 +530,7 @@ const struct pci_ecam_ops loongson_pci_ecam_ops = { .init = loongson_pci_ecam_init, .pci_ops = { .map_bus = pci_loongson_map_bus, - .read = pci_generic_config_read, + .read = pci_loongson_config_read, .write = pci_generic_config_write, } }; diff --git a/drivers/pci/controller/pci-sunway.c b/drivers/pci/controller/pci-sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..036994ffde381f09d502a0c36fc6210f1f3a271c --- /dev/null +++ b/drivers/pci/controller/pci-sunway.c @@ -0,0 +1,805 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +#include + +void set_devint_wken(int node) +{ + unsigned long val; + + /* enable INTD wakeup */ + val = 0x80; + sw64_io_write(node, DEVINT_WKEN, val); + sw64_io_write(node, DEVINTWK_INTEN, val); +} + +#ifdef CONFIG_UNCORE_JUNZHANG +void set_adr_int(int node) +{ + sw64_io_write(node, ADR_INT_CONFIG, (0x0 << 16 | 0x3f)); + sw64_io_write(node, ADR_CTL, 0xc); +} +#endif + +void set_pcieport_service_irq(int node, int index) +{ + if (IS_ENABLED(CONFIG_PCIE_PME)) + write_piu_ior0(node, index, PMEINTCONFIG, PME_ENABLE_INTD_CORE0); + + if (IS_ENABLED(CONFIG_PCIEAER)) + write_piu_ior0(node, index, AERERRINTCONFIG, AER_ENABLE_INTD_CORE0); +} + +int chip_pcie_configure(struct pci_controller *hose) +{ + struct pci_dev *dev; + struct pci_bus *bus, *top; + struct list_head *next; + unsigned int max_read_size, smallest_max_payload; + int max_payloadsize; + unsigned long rc_index, node; + unsigned long piuconfig0, value; + unsigned int pcie_caps_offset; + unsigned int rc_conf_value; + u16 devctl, new_values; + bool rc_ari_disabled = false, found = false; + unsigned char bus_max_num; + + node = hose->node; + rc_index = hose->index; + smallest_max_payload = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + smallest_max_payload &= PCI_EXP_DEVCAP_PAYLOAD; + bus_max_num = hose->busn_space->start; + + top = hose->bus; + bus = top; + next = top->devices.next; + + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + if (!found) { + if (pci_is_root_bus(dev->bus)) { + if (list_empty(&dev->subordinate->devices)) + rc_ari_disabled = true; + } else { + if (!pci_ari_enabled(dev->bus)) { + rc_ari_disabled = true; + found = true; + } + } + } + + if (bus->busn_res.end > bus_max_num) + bus_max_num = bus->busn_res.end; + + /* Query device PCIe capability register */ + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + max_payloadsize = dev->pcie_mpss; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + } + + if (rc_ari_disabled) { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value &= ~PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } else { + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL2); + rc_conf_value |= PCI_EXP_DEVCTL2_ARI; + write_rc_conf(node, rc_index, RC_EXP_DEVCTL2, rc_conf_value); + } + + rc_conf_value = read_rc_conf(node, rc_index, RC_EXP_DEVCAP); + rc_conf_value &= PCI_EXP_DEVCAP_PAYLOAD; + max_payloadsize = rc_conf_value; + if (max_payloadsize < smallest_max_payload) + smallest_max_payload = max_payloadsize; + + max_read_size = 0x2; /* Limit to 512B */ + value = read_rc_conf(node, rc_index, RC_EXP_DEVCTL); + value &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + value |= (max_read_size << 12) | (smallest_max_payload << 5); + write_rc_conf(node, rc_index, RC_EXP_DEVCTL, value); + new_values = (max_read_size << 12) | (smallest_max_payload << 5); + + piuconfig0 = read_piu_ior0(node, rc_index, PIUCONFIG0); + piuconfig0 &= ~(0x7fUL << 9); + if (smallest_max_payload == 0x2) { + piuconfig0 |= (0x20UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } else { + piuconfig0 |= (0x40UL << 9); + write_piu_ior0(node, rc_index, PIUCONFIG0, piuconfig0); + } + + pr_info("Node%ld RC%ld MPSS %luB, MRRS %luB, Piuconfig0 %#lx, ARI %s\n", + node, rc_index, (1UL << smallest_max_payload) << 7, + (1UL << max_read_size) << 7, piuconfig0, + rc_ari_disabled ? "disabled" : "enabled"); + + /* Now, set the max_payload_size for all devices to that value. */ + bus = top; + next = top->devices.next; + for (;;) { + if (next == &bus->devices) { + /* end of this bus, go up or finish */ + if (bus == top) + break; + next = bus->self->bus_list.next; + bus = bus->self->bus; + continue; + } + dev = list_entry(next, struct pci_dev, bus_list); + if (dev->subordinate) { + /* this is a pci-pci bridge, do its devices next */ + next = dev->subordinate->devices.next; + bus = dev->subordinate; + } else + next = dev->bus_list.next; + + pcie_caps_offset = dev->pcie_cap; + if (pcie_caps_offset == 0) + continue; + + pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, &devctl); + devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ); + devctl |= new_values; + pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL, devctl); + } + + return bus_max_num; +} + +static int check_pci_linkup(unsigned long node, unsigned long index) +{ + unsigned long rc_debug; + + if (is_guest_or_emul()) { + if (node == 0 && index == 0) + return 0; + else + return 1; + } else { + rc_debug = read_piu_ior1(node, index, RCDEBUGINF1); + } + + return !(rc_debug == 0x111); +} + +static void set_rc_piu(unsigned long node, unsigned long index) +{ + unsigned int i __maybe_unused; + unsigned int value; + u32 rc_misc_ctrl; + + if (is_guest_or_emul()) + return; + + /* configure RC, set PCI-E root controller */ + write_rc_conf(node, index, RC_COMMAND, 0x00100007); + write_rc_conf(node, index, RC_PORT_LINK_CTL, 0x1f0020); + write_rc_conf(node, index, RC_EXP_DEVCTL, 0x2850); + write_rc_conf(node, index, RC_EXP_DEVCTL2, 0x6); + write_rc_conf(node, index, RC_ORDER_RULE_CTL, 0x0100); + + /* enable DBI_RO_WR_EN */ + rc_misc_ctrl = read_rc_conf(node, index, RC_MISC_CONTROL_1); + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl | 0x1); + + /* fix up DEVICE_ID_VENDOR_ID register */ + value = (PCI_DEVICE_ID_SW64_ROOT_BRIDGE << 16) | PCI_VENDOR_ID_JN; + write_rc_conf(node, index, RC_VENDOR_ID, value); + + /* set PCI-E root class code */ + value = read_rc_conf(node, index, RC_REVISION_ID); + write_rc_conf(node, index, RC_REVISION_ID, (PCI_CLASS_BRIDGE_HOST << 16) | value); + + /* disable DBI_RO_WR_EN */ + write_rc_conf(node, index, RC_MISC_CONTROL_1, rc_misc_ctrl); + + write_rc_conf(node, index, RC_PRIMARY_BUS, 0xffffff); + write_piu_ior0(node, index, PIUCONFIG0, PIUCONFIG0_INIT_VAL); + + write_piu_ior1(node, index, PIUCONFIG1, 0x2); + write_piu_ior1(node, index, ERRENABLE, -1); + + /* set DMA offset value PCITODMA_OFFSET */ + write_piu_ior0(node, index, EPDMABAR, PCITODMA_OFFSET); + if (IS_ENABLED(CONFIG_PCI_MSI)) { + write_piu_ior0(node, index, MSIADDR, MSIX_MSG_ADDR); +#ifdef CONFIG_UNCORE_XUELANG + for (i = 0; i < 256; i++) + write_piu_ior0(node, index, MSICONFIG0 + (i << 7), 0); +#endif + } +} + +static void set_intx(unsigned long node, unsigned long index, + unsigned long int_conf) +{ + if (is_guest_or_emul()) + return; + +#if defined(CONFIG_UNCORE_XUELANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x8UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x1UL << 10)); +#elif defined(CONFIG_UNCORE_JUNZHANG) + write_piu_ior0(node, index, INTACONFIG, int_conf | (0x1UL << 10)); + write_piu_ior0(node, index, INTBCONFIG, int_conf | (0x2UL << 10)); + write_piu_ior0(node, index, INTCCONFIG, int_conf | (0x4UL << 10)); + write_piu_ior0(node, index, INTDCONFIG, int_conf | (0x8UL << 10)); +#endif +} + +static unsigned long get_rc_enable(unsigned long node) +{ + unsigned long rc_enable; + + if (is_guest_or_emul()) + return 1; + + rc_enable = sw64_io_read(node, IO_START); + + return rc_enable; +} + +static int map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + + if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT) + return hose->service_irq; + else + return hose->int_irq; +} + +static void hose_init(struct pci_controller *hose) +{ + unsigned long pci_io_base; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + pci_io_base = IO_BASE | (hose->node << IO_NODE_SHIFT) + | PCI_BASE | (hose->index << IO_RC_SHIFT); + + hose->dense_mem_base = pci_io_base; + hose->dense_io_base = pci_io_base | PCI_LEGACY_IO; + hose->ep_config_space_base = __va(pci_io_base | PCI_EP_CFG); + hose->rc_config_space_base = __va(pci_io_base | PCI_RC_CFG); + + hose->mem_space->start = pci_io_base + PCI_32BIT_MEMIO; + hose->mem_space->end = hose->mem_space->start + PCI_32BIT_MEMIO_SIZE - 1; + hose->mem_space->name = "pci memory space"; + hose->mem_space->flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, hose->mem_space) < 0) + pr_err("Failed to request MEM on hose %ld\n", hose->index); + hose->pre_mem_space->start = pci_io_base | PCI_64BIT_MEMIO; + hose->pre_mem_space->end = hose->pre_mem_space->start + PCI_64BIT_MEMIO_SIZE - 1; + hose->pre_mem_space->name = "pci pre mem space"; + hose->pre_mem_space->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH | IORESOURCE_MEM_64; + + if (request_resource(&iomem_resource, hose->pre_mem_space) < 0) + pr_err("Failed to request 64bit MEM on hose %ld\n", hose->index); + hose->io_space->start = pci_io_base | PCI_LEGACY_IO; + hose->io_space->end = hose->io_space->start + PCI_LEGACY_IO_SIZE - 1; + hose->io_space->name = "pci io space"; + hose->io_space->flags = IORESOURCE_IO; + + if (request_resource(&ioport_resource, hose->io_space) < 0) + pr_err("Failed to request IO on hose %ld\n", hose->index); + hose->busn_space->name = "PCI busn"; + hose->busn_space->start = 0xff; + hose->busn_space->end = 0xff; + hose->busn_space->flags = IORESOURCE_BUS; + hose->first_busno = hose->self_busno = hose->busn_space->start; + hose->last_busno = hose->busn_space->end; + + if (is_in_host()) { + if (IS_ENABLED(CONFIG_PCI_MSI)) + memset(hose->piu_msiconfig, 0, 256/8); + } +}; + +static struct sw64_pci_init_ops chip_pci_init_ops = { + .map_irq = map_irq, + .get_rc_enable = get_rc_enable, + .hose_init = hose_init, + .set_rc_piu = set_rc_piu, + .check_pci_linkup = check_pci_linkup, + .set_intx = set_intx, +}; + +void __init setup_chip_pci_ops(void) +{ + sw64_chip_init->pci_init = chip_pci_init_ops; +} + +static unsigned long rc_linkup; +static struct pci_controller *head, **tail = &head; + +static void pci_mark_rc_linkup(unsigned long node, unsigned long index) +{ + set_bit(node * 8 + index, &rc_linkup); +} + +static int pci_get_rc_linkup(unsigned long node, unsigned long index) +{ + return test_bit(node * 8 + index, &rc_linkup); +} + +/** + * Link the specified pci controller to list + */ +extern struct pci_controller *hose_head; +static void pci_link_controller(struct pci_controller *hose) +{ + if (unlikely(!hose)) + return; + + *tail = hose; + tail = &hose->next; + + if (!hose_head) + hose_head = head; +} + +struct pci_controller *bus_num_to_pci_controller(unsigned long bus_num) +{ + struct pci_controller *hose; + + for (hose = head; hose; hose = hose->next) { + if (bus_num >= hose->first_busno && bus_num <= hose->last_busno) + return hose; + } + + return NULL; +} + +struct pci_controller *pci_bus_to_pci_controller(const struct pci_bus *bus) +{ + struct pci_config_window *cfg = NULL; + + if (unlikely(!bus)) + return NULL; + + if (acpi_disabled) + return (struct pci_controller *)(bus->sysdata); + + cfg = (struct pci_config_window *)bus->sysdata; + return (struct pci_controller *)(cfg->priv); +} + +/** + * PCIe Root Complex read config space operations + */ +static int sw64_pcie_read_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + u32 data; + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = hose->rc_config_space_base; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc read addr:%px bus %d, devfn %#x, where %#x size=%d\t", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, size); + + if ((uintptr_t)where & (size - 1)) { + *val = 0; + return PCIBIOS_BAD_REGISTER_NUMBER; + } + + /** + * Workaround for sw6a chipset due to only support scan with devfn = 0, + * while sw6b does not have this limit. + */ + if (unlikely(devfn > 0)) { + *val = ~0; + return PCIBIOS_DEVICE_NOT_FOUND; + } + + data = readl(cfg_iobase + ((where & ~3) << 5)); + + switch (size) { + case 1: + *val = (data >> (8 * (where & 0x3))) & 0xff; + break; + case 2: + *val = (data >> (8 * (where & 0x2))) & 0xffff; + break; + default: + *val = data; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("*val %#x\n ", *val); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * PCIe Root Complex write config space operations + */ +int sw64_pcie_write_rc_cfg(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + u32 data; + u32 shift = 8 * (where & 3); + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase = (void *)hose->rc_config_space_base; + + if ((uintptr_t)where & (size - 1)) + return PCIBIOS_BAD_REGISTER_NUMBER; + + switch (size) { + case 1: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xff << shift); + data |= (val & 0xff) << shift; + break; + case 2: + data = readl(cfg_iobase + ((where & ~3) << 5)); + data &= ~(0xffff << shift); + data |= (val & 0xffff) << shift; + break; + default: + data = val; + break; + } + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("rc write addr:%px bus %d, devfn %#x, where %#x *val %#x size %d\n", + cfg_iobase + ((where & ~3) << 5), bus->number, devfn, where, val, size); + + writel(data, cfg_iobase + ((where & ~3) << 5)); + + return PCIBIOS_SUCCESSFUL; +} + +/** + * sw64_pcie_valid_device - check if a valid device is present on bus + * @bus : PCI bus structure + * @devfn: device/function + * + * @return: 'true' on success and 'false' if invalid device is found + */ +static bool sw64_pcie_valid_device(struct pci_bus *bus, unsigned int devfn) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_in_host()) { + /* Only one device down on each root complex */ + if (bus->number == hose->self_busno && devfn > 0) + return false; + } + + return true; +} + +/** + * sw64_pcie_config_read - read val from config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val[out]: the value read from PCI host controller or device + * + * @return: Whether read operation success + */ +static int sw64_pcie_config_read(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + int ret = PCIBIOS_DEVICE_NOT_FOUND; + + if (is_guest_or_emul()) + return pci_generic_config_read(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) { + ret = sw64_pcie_read_rc_cfg(bus, devfn, where, size, val); + } else { + if (pci_get_rc_linkup(hose->node, hose->index)) + ret = pci_generic_config_read(bus, devfn, where, size, val); + else + return ret; + } + return ret; +} + +/** + * sw64_pcie_config_write - write val to config space of PCI host controller or device + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * @size : size of val + * @val : the value write to PCI host controller or device + * + * @return: Whether write operation success + */ +static int sw64_pcie_config_write(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + + if (is_guest_or_emul()) + return pci_generic_config_write(bus, devfn, where, size, val); + + hose->self_busno = hose->busn_space->start; + + if (unlikely(bus->number == hose->self_busno)) + return sw64_pcie_write_rc_cfg(bus, devfn, where, size, val); + else + return pci_generic_config_write(bus, devfn, where, size, val); +} + +/** + * sw64_pcie_map_bus - get configuration base address + * @bus : PCI bus structure + * @devfn: device/function + * @where: offset from base + * + * @return: base address of the configuration space needed to be + * accessed. + */ +static void __iomem *sw64_pcie_map_bus(struct pci_bus *bus, + unsigned int devfn, int where) +{ + struct pci_controller *hose = pci_bus_to_pci_controller(bus); + void __iomem *cfg_iobase; + unsigned long relbus; + + if (!sw64_pcie_valid_device(bus, devfn)) + return NULL; + + /** + * ECAM of Sunway PCI host controller is slightly + * different from the standrad: + * [31:24]: bus number + * [23:19]: device number + * [18:16]: function number + * [15:12]: reserved + * [11:8] : extended config space registers + * [7:2] : legacy config space registers + */ + relbus = (bus->number << 24) | (devfn << 16) | where; + + cfg_iobase = hose->ep_config_space_base + relbus; + + if (IS_ENABLED(CONFIG_PCI_DEBUG)) + pr_debug("addr:%px bus %d, devfn %d, where %d\n", + cfg_iobase, bus->number, devfn, where); + return cfg_iobase; +} + +#ifdef CONFIG_ACPI +int sw64_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + return map_irq(dev, slot, pin); +} + +static void setup_intx_irqs(struct pci_controller *hose) +{ + unsigned long int_conf, node, val_node; + unsigned long index, irq; + int rcid; + + node = hose->node; + index = hose->index; + + if (!node_online(node)) + val_node = next_node_in(node, node_online_map); + else + val_node = node; + irq = irq_alloc_descs_from(NR_IRQS_LEGACY, 2, val_node); + WARN_ON(irq < 0); + irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq); + irq_set_status_flags(irq, IRQ_LEVEL); + hose->int_irq = irq; + irq_set_chip_and_handler(irq + 1, &dummy_irq_chip, handle_level_irq); + hose->service_irq = irq + 1; + rcid = cpu_to_rcid(0); + + pr_info_once("INTx are directed to node %d core %d.\n", + ((rcid >> 6) & 0x3), (rcid & 0x1f)); + int_conf = 1UL << 62 | rcid; /* rebase all intx on the first logical cpu */ + + set_intx(node, index, int_conf); + + set_pcieport_service_irq(node, index); +} + +static int sw64_pci_prepare_controller(struct pci_controller *hose, + struct acpi_device *adev) +{ + unsigned long long index, node; + unsigned long long rc_config_base_addr; + unsigned long long pci_io_base_addr; + unsigned long long ep_io_base_addr; + acpi_status rc; + + /* Get node from ACPI namespace */ + node = acpi_get_node(adev->handle); + if (node == NUMA_NO_NODE) { + dev_err(&adev->dev, "unable to get node ID\n"); + return -EEXIST; + } + + /* Get index from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "INDX", NULL, &index); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve INDX\n"); + return -EEXIST; + } + + /** + * Get Root Complex config space base address. + * + * For sw64, Root Complex config space base addr is different + * from Endpoint config space base address. Use MCFG table to + * pass Endpoint config space base address, and define Root Complex + * config space base address("RCCB") separately in the ACPI namespace. + */ + rc = acpi_evaluate_integer(adev->handle, "RCCB", NULL, &rc_config_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCCB\n"); + return -EEXIST; + } + + /* Get Root Complex I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "RCIO", NULL, &pci_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve RCIO\n"); + return -EEXIST; + } + + /* Get Endpoint I/O space base addr from ACPI namespace */ + rc = acpi_evaluate_integer(adev->handle, "EPIO", NULL, &ep_io_base_addr); + if (rc != AE_OK) { + dev_err(&adev->dev, "unable to retrieve EPIO\n"); + return -EEXIST; + } + + hose->iommu_enable = false; + hose->index = index; + hose->node = node; + + hose->sparse_mem_base = 0; + hose->sparse_io_base = 0; + hose->dense_mem_base = pci_io_base_addr; + hose->dense_io_base = ep_io_base_addr; + + hose->rc_config_space_base = __va(rc_config_base_addr); + + hose->first_busno = 0xff; + hose->last_busno = 0xff; + hose->self_busno = 0xff; + + hose->need_domain_info = 0; + +#if IS_ENABLED(CONFIG_PCI_MSI) + if (is_in_host()) + memset(hose->piu_msiconfig, 0, 256 / 8); /* 256 bits bitmap */ +#endif + + /** + * There are two prerequisites for Root Complex + * of Sunway to work: + * 1. Root Complex enable + * 2. Root Complex link up + */ + set_rc_piu(hose->node, hose->index); + if (check_pci_linkup(hose->node, hose->index)) { + /** + * Root Complex link up failed. + * This usually means that no device on the slot. + */ + dev_info(&adev->dev, ": failed to link up\n", + hose->node, hose->index); + } else { + pci_mark_rc_linkup(hose->node, hose->index); + dev_info(&adev->dev, ": successfully link up\n", + hose->node, hose->index); + } + + setup_intx_irqs(hose); + + pci_link_controller(hose); + + return 0; +} + +/** + * Use the info from ACPI to init pci_controller + */ +static int sw64_pci_ecam_init(struct pci_config_window *cfg) +{ + struct pci_controller *hose = NULL; + struct device *dev = cfg->parent; + struct acpi_device *adev = to_acpi_device(dev); + phys_addr_t mcfg_addr; + int ret; + + /** + * First, check whether Root Complex is enabled. + * If Root Complex disabled, there's no need to continue. + * + * In ACPI namespace, we use _STA method to indicate + * whether Root Complex is enabled. + * + * The _STA has been checked when creating acpi_device. + * Double check here to get the latest hardware status. + */ + ret = acpi_bus_get_status(adev); + if (ret) { + dev_err(dev, "unable to retrieve _STA\n"); + return ret; + } + + if (!adev->status.present) { + dev_err(dev, "RC is not enabled\n"); + return -ENODEV; + } + + hose = kzalloc(sizeof(*hose), GFP_KERNEL); + if (!hose) { + dev_err(dev, "out of memory when alloc mem for pci_controller\n"); + return -ENOMEM; + } + + /* Get Endpoint config space base address from MCFG table */ + mcfg_addr = cfg->res.start - (cfg->busr.start << cfg->ops->bus_shift); + + /** + * "__va(mcfg_addr)" is equal to "cfg->win", so we can also use + * "hose->ep_config_space_base = cfg->win" here + */ + hose->ep_config_space_base = __va(mcfg_addr); + + /* Init pci_controller */ + ret = sw64_pci_prepare_controller(hose, adev); + if (ret) { + kfree(hose); + dev_err(&adev->dev, "failed to init pci controller\n"); + return ret; + } + + cfg->priv = (void *)hose; + + return 0; +} + +const struct pci_ecam_ops sw64_pci_ecam_ops = { + .bus_shift = 24, + .init = sw64_pci_ecam_init, + .pci_ops = { + .map_bus = sw64_pcie_map_bus, + .read = sw64_pcie_config_read, + .write = sw64_pcie_config_write, + } +}; +#endif diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c index ef1d8857a51ba691701e049efb9c05d668e9dc27..8298d02a667bdc7dde01a367b20a80df4a6599cb 100644 --- a/drivers/pci/msi/msi.c +++ b/drivers/pci/msi/msi.c @@ -402,12 +402,32 @@ static int msi_capability_init(struct pci_dev *dev, int nvec, return ret; } +#ifdef CONFIG_LOONGARCH +static unsigned int pci_irq_numbers = 32; + +static int __init pci_irq_limit(char *str) +{ + get_option(&str, &pci_irq_numbers); + + if (pci_irq_numbers == 0) + pci_irq_numbers = 32; + return 0; +} + +early_param("pci_irq_limit", pci_irq_limit); +#endif + int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd) { int nvec; int rc; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + maxvec = pci_irq_numbers; +#endif + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) return -EINVAL; @@ -778,6 +798,11 @@ int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int { int hwsize, rc, nvec = maxvec; +#ifdef CONFIG_LOONGARCH + if (maxvec > 32) + nvec = pci_irq_numbers; +#endif + if (maxvec < minvec) return -ERANGE; diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index a607f277ccf108d2c42045fb21c6f29bbb9cd1bd..e43adac054c4cf85f846f814644c67879eaf074d 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -32,6 +32,9 @@ #include #include #include +#ifdef CONFIG_MACH_LOONGSON64 +#include +#endif #include "pci.h" DEFINE_MUTEX(pci_slot_mutex); @@ -172,6 +175,15 @@ static bool pci_bridge_d3_disable; /* Force bridge_d3 for all PCIe ports */ static bool pci_bridge_d3_force; +#ifdef CONFIG_MACH_LOONGSON64 + +#ifndef CONFIG_PM_SLEEP +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + +#endif + static int __init pcie_port_pm_setup(char *str) { if (!strcmp(str, "off")) @@ -4778,7 +4790,11 @@ int pcie_flr(struct pci_dev *dev) * 100ms, but may silently discard requests while the FLR is in * progress. Wait 100ms before trying to access the device. */ +#ifdef CONFIG_SW64 + msleep(1000); +#else msleep(100); +#endif return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS); } @@ -5165,6 +5181,69 @@ int pci_bridge_wait_for_secondary_bus(struct pci_dev *dev, char *reset_type) PCIE_RESET_READY_POLL_MS - delay); } +static void pci_save_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + int i; + + /* if not yitian 710, should return here */ + if (!dev->broken_bus_reset) + return; + + /* save pcie type1 config space header*/ + for (i = 0; i < 16; i++) + pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]); + + pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &saved->dev_ctrl); + pcie_capability_read_word(dev, PCI_EXP_RTCTL, &saved->root_ctrl); + pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &saved->dev_ctrl2); + + if (dev->acs_cap) + pci_read_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + &saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER + if (dev->aer_cap) + pci_read_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + &saved->root_err_cmd); +#endif + + pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &saved->slot_ctrl); +} + +static void pci_restore_yitian710_regs(struct pci_dev *dev, + struct pci_saved_regs *saved) +{ + if (!dev->broken_bus_reset) + return; + + /* restore pcie type1 config space header */ + pci_restore_config_space_range(dev, 0, 15, 0, false); + + /* + * restore Device Control, Root Control Register and Device Control 2 + * in PCI Express Capability + */ + pcie_capability_write_word(dev, PCI_EXP_DEVCTL, saved->dev_ctrl); + pcie_capability_write_word(dev, PCI_EXP_RTCTL, saved->root_ctrl); + pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, saved->dev_ctrl2); + + /* restore ACS Capability Register */ + if (dev->acs_cap) + pci_write_config_dword(dev, dev->acs_cap + PCI_ACS_CAP, + saved->acs_cap_ctrl); + +#ifdef CONFIG_PCIEAER + /* restore AER Root Error Command Register */ + if (dev->aer_cap) + pci_write_config_dword(dev, dev->aer_cap + PCI_ERR_ROOT_COMMAND, + saved->root_err_cmd); +#endif + + /* restore Slot Control Register */ + pcie_capability_write_word(dev, PCI_EXP_SLTCTL, saved->slot_ctrl); +} + void pci_reset_secondary_bus(struct pci_dev *dev) { u16 ctrl; @@ -5197,9 +5276,18 @@ void __weak pcibios_reset_secondary_bus(struct pci_dev *dev) */ int pci_bridge_secondary_bus_reset(struct pci_dev *dev) { + int rc; + struct pci_saved_regs saved = { }; + + /* save key regs for yitian710 during bus rest*/ + pci_save_yitian710_regs(dev, &saved); + pcibios_reset_secondary_bus(dev); + rc = pci_bridge_wait_for_secondary_bus(dev, "bus reset"); - return pci_bridge_wait_for_secondary_bus(dev, "bus reset"); + /* restore regs for yitian710*/ + pci_restore_yitian710_regs(dev, &saved); + return rc; } EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset); @@ -6147,8 +6235,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) { u16 v; int ret; +#ifdef CONFIG_MACH_LOONGSON64 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); - +#endif if (rq < 128 || rq > 4096 || !is_power_of_2(rq)) return -EINVAL; @@ -6166,7 +6255,9 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) v = (ffs(rq) - 8) << 12; - if (bridge->no_inc_mrrs) { +#ifdef CONFIG_MACH_LOONGSON64 + if (pm_suspend_target_state == PM_SUSPEND_ON && + bridge->no_inc_mrrs) { int max_mrrs = pcie_get_readrq(dev); if (rq > max_mrrs) { @@ -6174,6 +6265,7 @@ int pcie_set_readrq(struct pci_dev *dev, int rq) return -EINVAL; } } +#endif ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_READRQ, v); diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index fc18e42f0a6edff2ded292aeccfbff7be661908a..5875f4f2f11075569099ac33f13b46268f0a1fb6 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -423,17 +423,6 @@ static void pcie_aspm_check_latency(struct pci_dev *endpoint) } } -static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos, - u32 clear, u32 set) -{ - u32 val; - - pci_read_config_dword(pdev, pos, &val); - val &= ~clear; - val |= set; - pci_write_config_dword(pdev, pos, val); -} - /* Calculate L1.2 PM substate timing parameters */ static void aspm_calc_l12_info(struct pcie_link_state *link, u32 parent_l1ss_cap, u32 child_l1ss_cap) @@ -494,10 +483,12 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK; if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1_2_MASK, 0); } /* Program T_POWER_ON times in both ports */ @@ -505,22 +496,26 @@ static void aspm_calc_l12_info(struct pcie_link_state *link, pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2); /* Program Common_Mode_Restore_Time in upstream device */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1); /* Program LTR_L1.2_THRESHOLD time in both ports */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_LTR_L12_TH_VALUE | - PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_LTR_L12_TH_VALUE | + PCI_L1SS_CTL1_LTR_L12_TH_SCALE, + ctl1); if (pl1_2_enables || cl1_2_enables) { - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0, - pl1_2_enables); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0, - cl1_2_enables); + pci_clear_and_set_config_dword(parent, + parent->l1ss + PCI_L1SS_CTL1, 0, + pl1_2_enables); + pci_clear_and_set_config_dword(child, + child->l1ss + PCI_L1SS_CTL1, 0, + cl1_2_enables); } } @@ -680,10 +675,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) */ /* Disable all L1 substates */ - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, 0); /* * If needed, disable L1, and it gets enabled later * in pcie_config_aspm_link(). @@ -706,10 +701,10 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) val |= PCI_L1SS_CTL1_PCIPM_L1_2; /* Enable what we need to enable */ - pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); - pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, - PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); + pci_clear_and_set_config_dword(child, child->l1ss + PCI_L1SS_CTL1, + PCI_L1SS_CTL1_L1SS_MASK, val); } static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index 43159965e09e932db82759ba5bfa64380357ba8f..622c53bf6abe43182745e6e5c35669fb27838367 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c @@ -142,6 +142,7 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK; flags |= IORESOURCE_MEM; + if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) flags |= IORESOURCE_PREFETCH; diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index ae95d09507722e0950fc71bde0a9ee9793e909eb..5290455742917c06006690a10aa044424ca206ca 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -4521,6 +4521,7 @@ DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a01, PCI_CLASS_NOT_DEFINED, DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_AMD, 0x1a02, PCI_CLASS_NOT_DEFINED, 8, quirk_relaxedordering_disable); +#ifndef CONFIG_SW64 /* * Per PCIe r3.0, sec 2.2.9, "Completion headers must supply the same * values for the Attribute as were supplied in the header of the @@ -4577,6 +4578,7 @@ static void quirk_chelsio_T5_disable_root_port_attributes(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CHELSIO, PCI_ANY_ID, quirk_chelsio_T5_disable_root_port_attributes); +#endif /* * pci_acs_ctrl_enabled - compare desired ACS controls with those provided @@ -5067,6 +5069,13 @@ static const struct pci_dev_acs_enabled { { PCI_VENDOR_ID_AMPERE, 0xE00A, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00B, pci_quirk_xgene_acs }, { PCI_VENDOR_ID_AMPERE, 0xE00C, pci_quirk_xgene_acs }, +#ifdef CONFIG_ARCH_PHYTIUM + /* because PLX switch Vendor id is 0x10b5 on phytium cpu */ + { 0x10b5, PCI_ANY_ID, pci_quirk_xgene_acs }, + /* because rootcomplex Vendor id is 0x17cd on phytium cpu */ + { 0x17cd, PCI_ANY_ID, pci_quirk_xgene_acs }, +#endif + /* Broadcom multi-function device */ { PCI_VENDOR_ID_BROADCOM, 0x16D7, pci_quirk_mf_endpoint_acs }, { PCI_VENDOR_ID_BROADCOM, 0x1750, pci_quirk_mf_endpoint_acs }, @@ -6223,3 +6232,18 @@ static void pci_fixup_d3cold_delay_1sec(struct pci_dev *pdev) pdev->d3cold_delay = 1000; } DECLARE_PCI_FIXUP_FINAL(0x5555, 0x0004, pci_fixup_d3cold_delay_1sec); +/* + * On Alibaba yitian710 Soc, the Hardware does always clear pcie config space + * and some key registers between resetting the secondary bus. This results in + * the OS cannot recover the fatal pcie error, which causes unexpected system + * error finally. + * + * Luckily, it seems a simple save/restore of these regs during the bus reset + * can fix the issues. + */ +static void quirk_save_yitian710_regs(struct pci_dev *dev) +{ + dev->broken_bus_reset = 1; +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_VENDOR_ID_ALIBABA, 0x8000, + PCI_CLASS_BRIDGE_PCI, 8, quirk_save_yitian710_regs); diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 273d67ecf6d2530f0f31c8dd393b31b1fde560ee..ec6e0d9194a1c577c1378444470a23ff614fc101 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -217,6 +217,13 @@ config MARVELL_CN10K_DDR_PMU Enable perf support for Marvell DDR Performance monitoring event on CN10K platform. +config DWC_PCIE_PMU + tristate "Synopsys DesignWare PCIe PMU" + depends on PCI + help + Enable perf support for Synopsys DesignWare PCIe PMU Performance + monitoring event on platform including the Alibaba Yitian 710. + source "drivers/perf/arm_cspmu/Kconfig" source "drivers/perf/amlogic/Kconfig" diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index 16b3ec4db916d0bcb5f3895a82c7d9b1a77e18a2..a06338e3401c9f6ebbe9593a5176e220f760a357 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_MARVELL_CN10K_TAD_PMU) += marvell_cn10k_tad_pmu.o obj-$(CONFIG_MARVELL_CN10K_DDR_PMU) += marvell_cn10k_ddr_pmu.o obj-$(CONFIG_APPLE_M1_CPU_PMU) += apple_m1_cpu_pmu.o obj-$(CONFIG_ALIBABA_UNCORE_DRW_PMU) += alibaba_uncore_drw_pmu.o +obj-$(CONFIG_DWC_PCIE_PMU) += dwc_pcie_pmu.o obj-$(CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU) += arm_cspmu/ obj-$(CONFIG_MESON_DDR_PMU) += amlogic/ obj-$(CONFIG_CXL_PMU) += cxl_pmu.o diff --git a/drivers/perf/arm-cmn.c b/drivers/perf/arm-cmn.c index caae2d3e9d3ead3b32efd4c215a3a1ef132d3b8d..e6ff6b00527448d027b7dfbdd4623a38187e1dee 100644 --- a/drivers/perf/arm-cmn.c +++ b/drivers/perf/arm-cmn.c @@ -2428,6 +2428,7 @@ static int arm_cmn_probe(struct platform_device *pdev) struct arm_cmn *cmn; const char *name; static atomic_t id; + struct resource *cfg; int err, rootnode, this_id; cmn = devm_kzalloc(&pdev->dev, sizeof(*cmn), GFP_KERNEL); @@ -2442,7 +2443,16 @@ static int arm_cmn_probe(struct platform_device *pdev) rootnode = arm_cmn600_acpi_probe(pdev, cmn); } else { rootnode = 0; - cmn->base = devm_platform_ioremap_resource(pdev, 0); + + /* + * Avoid registering resources as the PMUs registers are + * scattered through CMN, and may appear either side of + * registers for other 'devices'. (e.g. the MPAM MSC controls). + */ + cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!cfg) + return -EINVAL; + cmn->base = devm_ioremap(&pdev->dev, cfg->start, resource_size(cfg)); if (IS_ERR(cmn->base)) return PTR_ERR(cmn->base); if (cmn->part == PART_CMN600) diff --git a/drivers/perf/dwc_pcie_pmu.c b/drivers/perf/dwc_pcie_pmu.c new file mode 100644 index 0000000000000000000000000000000000000000..957058ad0099e2faa97c28d22153285983abf045 --- /dev/null +++ b/drivers/perf/dwc_pcie_pmu.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare PCIe PMU driver + * + * Copyright (C) 2021-2023 Alibaba Inc. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define DWC_PCIE_VSEC_RAS_DES_ID 0x02 +#define DWC_PCIE_EVENT_CNT_CTL 0x8 + +/* + * Event Counter Data Select includes two parts: + * - 27-24: Group number(4-bit: 0..0x7) + * - 23-16: Event number(8-bit: 0..0x13) within the Group + * + * Put them together as in TRM. + */ +#define DWC_PCIE_CNT_EVENT_SEL GENMASK(27, 16) +#define DWC_PCIE_CNT_LANE_SEL GENMASK(11, 8) +#define DWC_PCIE_CNT_STATUS BIT(7) +#define DWC_PCIE_CNT_ENABLE GENMASK(4, 2) +#define DWC_PCIE_PER_EVENT_OFF 0x1 +#define DWC_PCIE_PER_EVENT_ON 0x3 +#define DWC_PCIE_EVENT_CLEAR GENMASK(1, 0) +#define DWC_PCIE_EVENT_PER_CLEAR 0x1 + +#define DWC_PCIE_EVENT_CNT_DATA 0xC + +#define DWC_PCIE_TIME_BASED_ANAL_CTL 0x10 +#define DWC_PCIE_TIME_BASED_REPORT_SEL GENMASK(31, 24) +#define DWC_PCIE_TIME_BASED_DURATION_SEL GENMASK(15, 8) +#define DWC_PCIE_DURATION_MANUAL_CTL 0x0 +#define DWC_PCIE_DURATION_1MS 0x1 +#define DWC_PCIE_DURATION_10MS 0x2 +#define DWC_PCIE_DURATION_100MS 0x3 +#define DWC_PCIE_DURATION_1S 0x4 +#define DWC_PCIE_DURATION_2S 0x5 +#define DWC_PCIE_DURATION_4S 0x6 +#define DWC_PCIE_DURATION_4US 0xFF +#define DWC_PCIE_TIME_BASED_TIMER_START BIT(0) +#define DWC_PCIE_TIME_BASED_CNT_ENABLE 0x1 + +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW 0x14 +#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH 0x18 + +/* Event attributes */ +#define DWC_PCIE_CONFIG_EVENTID GENMASK(15, 0) +#define DWC_PCIE_CONFIG_TYPE GENMASK(19, 16) +#define DWC_PCIE_CONFIG_LANE GENMASK(27, 20) + +#define DWC_PCIE_EVENT_ID(event) FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config) +#define DWC_PCIE_EVENT_TYPE(event) FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config) +#define DWC_PCIE_EVENT_LANE(event) FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config) + +enum dwc_pcie_event_type { + DWC_PCIE_TIME_BASE_EVENT, + DWC_PCIE_LANE_EVENT, + DWC_PCIE_EVENT_TYPE_MAX, +}; + +#define DWC_PCIE_LANE_EVENT_MAX_PERIOD GENMASK_ULL(31, 0) +#define DWC_PCIE_MAX_PERIOD GENMASK_ULL(63, 0) + +struct dwc_pcie_pmu { + struct pmu pmu; + struct pci_dev *pdev; /* Root Port device */ + u16 ras_des_offset; + u32 nr_lanes; + + struct list_head pmu_node; + struct hlist_node cpuhp_node; + struct perf_event *event[DWC_PCIE_EVENT_TYPE_MAX]; + int on_cpu; +}; + +#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu)) + +static int dwc_pcie_pmu_hp_state; +static struct list_head dwc_pcie_dev_info_head = + LIST_HEAD_INIT(dwc_pcie_dev_info_head); +static bool notify; + +struct dwc_pcie_dev_info { + struct platform_device *plat_dev; + struct pci_dev *pdev; + struct list_head dev_node; +}; + +struct dwc_pcie_vendor_id { + int vendor_id; +}; + +static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = { + {.vendor_id = PCI_VENDOR_ID_ALIBABA }, + {} /* terminator */ +}; + +static ssize_t cpumask_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu)); +} +static DEVICE_ATTR_RO(cpumask); + +static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL +}; + +static struct attribute_group dwc_pcie_cpumask_attr_group = { + .attrs = dwc_pcie_pmu_cpumask_attrs, +}; + +struct dwc_pcie_format_attr { + struct device_attribute attr; + u64 field; + int config; +}; + +PMU_FORMAT_ATTR(eventid, "config:0-15"); +PMU_FORMAT_ATTR(type, "config:16-19"); +PMU_FORMAT_ATTR(lane, "config:20-27"); + +static struct attribute *dwc_pcie_format_attrs[] = { + &format_attr_type.attr, + &format_attr_eventid.attr, + &format_attr_lane.attr, + NULL, +}; + +static struct attribute_group dwc_pcie_format_attrs_group = { + .name = "format", + .attrs = dwc_pcie_format_attrs, +}; + +struct dwc_pcie_event_attr { + struct device_attribute attr; + enum dwc_pcie_event_type type; + u16 eventid; + u8 lane; +}; + +static ssize_t dwc_pcie_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dwc_pcie_event_attr *eattr; + + eattr = container_of(attr, typeof(*eattr), attr); + + if (eattr->type == DWC_PCIE_LANE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n", + eattr->eventid, eattr->type); + else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT) + return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n", + eattr->eventid, eattr->type); + + return 0; +} + +#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane) \ + (&((struct dwc_pcie_event_attr[]) {{ \ + .attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL), \ + .type = _type, \ + .eventid = _eventid, \ + .lane = _lane, \ + }})[0].attr.attr) + +#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0) +#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid) \ + DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0) + +static struct attribute *dwc_pcie_pmu_time_event_attrs[] = { + /* Group #0 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09), + + /* Group #1 */ + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_PCIe_TLP_Data_Payload, 0x20), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_PCIe_TLP_Data_Payload, 0x21), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Tx_CCIX_TLP_Data_Payload, 0x22), + DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(Rx_CCIX_TLP_Data_Payload, 0x23), + + /* + * Leave it to the user to specify the lane ID to avoid generating + * a list of hundreds of events. + */ + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nulified_tlp, 0x604), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nulified_tlp, 0x605), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tl, 0x606), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715), + DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716), + DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717), + NULL +}; + +static const struct attribute_group dwc_pcie_event_attrs_group = { + .name = "events", + .attrs = dwc_pcie_pmu_time_event_attrs, +}; + +static const struct attribute_group *dwc_pcie_attr_groups[] = { + &dwc_pcie_event_attrs_group, + &dwc_pcie_format_attrs_group, + &dwc_pcie_cpumask_attr_group, + NULL +}; + +static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + if (enable) + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON); + else + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF); +} + +static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu, + bool enable) +{ + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + + pci_clear_and_set_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, + DWC_PCIE_TIME_BASED_TIMER_START, enable); +} + +static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 val; + + pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val); + + return val; +} + +static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + int event_id = DWC_PCIE_EVENT_ID(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 lo, hi, ss; + u64 val; + + /* + * The 64-bit value of the data counter is spread across two + * registers that are not synchronized. In order to read them + * atomically, ensure that the high 32 bits match before and after + * reading the low 32 bits. + */ + pci_read_config_dword(pdev, + ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi); + do { + /* snapshot the high 32 bits */ + ss = hi; + + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW, + &lo); + pci_read_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, + &hi); + } while (hi != ss); + + val = ((u64)hi << 32) | lo; + /* + * The Group#1 event measures the amount of data processed in 16-byte + * units. Simplify the end-user interface by multiplying the counter + * at the point of read. + */ + if (event_id >= 0x20 && event_id <= 0x23) + val *= 16; + + return val; +} + +static void dwc_pcie_pmu_event_update(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + u64 delta, prev, now = 0; + + do { + prev = local64_read(&hwc->prev_count); + + if (type == DWC_PCIE_LANE_EVENT) + now = dwc_pcie_pmu_read_lane_event_counter(event); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + now = dwc_pcie_pmu_read_time_based_counter(event); + + } while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev); + + delta = (now - prev) & DWC_PCIE_MAX_PERIOD; + /* 32-bit counter for Lane Event Counting */ + if (type == DWC_PCIE_LANE_EVENT) + delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD; + + local64_add(delta, &event->count); +} + +static int dwc_pcie_pmu_event_init(struct perf_event *event) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct perf_event *sibling; + u32 lane; + + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* We don't support sampling */ + if (is_sampling_event(event)) + return -EINVAL; + + /* We cannot support task bound events */ + if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + if (event->group_leader != event && + !is_software_event(event->group_leader)) + return -EINVAL; + + for_each_sibling_event(sibling, event->group_leader) { + if (sibling->pmu != event->pmu && !is_software_event(sibling)) + return -EINVAL; + } + + if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX) + return -EINVAL; + + if (type == DWC_PCIE_LANE_EVENT) { + lane = DWC_PCIE_EVENT_LANE(event); + if (lane < 0 || lane >= pcie_pmu->nr_lanes) + return -EINVAL; + } + + event->cpu = pcie_pmu->on_cpu; + + return 0; +} + +static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + hwc->state = 0; + local64_set(&hwc->prev_count, 0); + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, true); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true); +} + +static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + struct hw_perf_event *hwc = &event->hw; + + if (event->hw.state & PERF_HES_STOPPED) + return; + + if (type == DWC_PCIE_LANE_EVENT) + dwc_pcie_pmu_lane_event_enable(pcie_pmu, false); + else if (type == DWC_PCIE_TIME_BASE_EVENT) + dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false); + + dwc_pcie_pmu_event_update(event); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; +} + +static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + struct pci_dev *pdev = pcie_pmu->pdev; + struct hw_perf_event *hwc = &event->hw; + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + int event_id = DWC_PCIE_EVENT_ID(event); + int lane = DWC_PCIE_EVENT_LANE(event); + u16 ras_des_offset = pcie_pmu->ras_des_offset; + u32 ctrl; + + /* one counter for each type and it is in use */ + if (pcie_pmu->event[type]) + return -ENOSPC; + + pcie_pmu->event[type] = event; + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + + if (type == DWC_PCIE_LANE_EVENT) { + /* EVENT_COUNTER_DATA_REG needs clear manually */ + ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) | + FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) | + FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR); + pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL, + ctrl); + } else if (type == DWC_PCIE_TIME_BASE_EVENT) { + /* + * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely + * use it with any manually controlled duration. And it is + * cleared when next measurement starts. + */ + ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) | + FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL, + DWC_PCIE_DURATION_MANUAL_CTL) | + DWC_PCIE_TIME_BASED_CNT_ENABLE; + pci_write_config_dword( + pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl); + } + + if (flags & PERF_EF_START) + dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD); + + perf_event_update_userpage(event); + + return 0; +} + +static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags) +{ + struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu); + enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event); + + dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE); + perf_event_update_userpage(event); + pcie_pmu->event[type] = NULL; +} + +static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node) +{ + cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node); +} + +/* + * Find the binded DES capability device info of a PCI device. + * @pdev: The PCI device. + */ +static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev) +{ + struct dwc_pcie_dev_info *dev_info; + + list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node) + if (dev_info->pdev == pdev) + return dev_info; + + return NULL; +} + +static void dwc_pcie_unregister_pmu(void *data) +{ + struct dwc_pcie_pmu *pcie_pmu = data; + + perf_pmu_unregister(&pcie_pmu->pmu); +} + +static bool dwc_pcie_match_des_cap(struct pci_dev *pdev) +{ + const struct dwc_pcie_vendor_id *vid; + u16 vsec = 0; + u32 val; + + if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT)) + return false; + + for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) { + vsec = pci_find_vsec_capability(pdev, vid->vendor_id, + DWC_PCIE_VSEC_RAS_DES_ID); + if (vsec) + break; + } + if (!vsec) + return false; + + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + if (PCI_VNDR_HEADER_REV(val) != 0x04) + return false; + + pci_dbg(pdev, + "Detected PCIe Vendor-Specific Extended Capability RAS DES\n"); + return true; +} + +static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info) +{ + platform_device_unregister(dev_info->plat_dev); + list_del(&dev_info->dev_node); + kfree(dev_info); +} + +static int dwc_pcie_register_dev(struct pci_dev *pdev) +{ + struct platform_device *plat_dev; + struct dwc_pcie_dev_info *dev_info; + u32 bdf; + + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", bdf, + pdev, sizeof(*pdev)); + + if (IS_ERR(plat_dev)) + return PTR_ERR(plat_dev); + + dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); + if (!dev_info) + return -ENOMEM; + + /* Cache platform device to handle pci device hotplug */ + dev_info->plat_dev = plat_dev; + dev_info->pdev = pdev; + list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head); + + return 0; +} + +static int dwc_pcie_pmu_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + struct pci_dev *pdev = to_pci_dev(dev); + struct dwc_pcie_dev_info *dev_info; + + switch (action) { + case BUS_NOTIFY_ADD_DEVICE: + if (!dwc_pcie_match_des_cap(pdev)) + return NOTIFY_DONE; + if (dwc_pcie_register_dev(pdev)) + return NOTIFY_BAD; + break; + case BUS_NOTIFY_DEL_DEVICE: + dev_info = dwc_pcie_find_dev_info(pdev); + if (!dev_info) + return NOTIFY_DONE; + dwc_pcie_unregister_dev(dev_info); + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block dwc_pcie_pmu_nb = { + .notifier_call = dwc_pcie_pmu_notifier, +}; + +static int dwc_pcie_pmu_probe(struct platform_device *plat_dev) +{ + struct pci_dev *pdev = plat_dev->dev.platform_data; + struct dwc_pcie_pmu *pcie_pmu; + char *name; + u32 bdf, val; + u16 vsec; + int ret; + + vsec = pci_find_vsec_capability(pdev, pdev->vendor, + DWC_PCIE_VSEC_RAS_DES_ID); + pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val); + bdf = PCI_DEVID(pdev->bus->number, pdev->devfn); + name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", bdf); + if (!name) + return -ENOMEM; + + pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL); + if (!pcie_pmu) + return -ENOMEM; + + pcie_pmu->pdev = pdev; + pcie_pmu->ras_des_offset = vsec; + pcie_pmu->nr_lanes = pcie_get_width_cap(pdev); + pcie_pmu->on_cpu = -1; + pcie_pmu->pmu = (struct pmu){ + .name = name, + .parent = &pdev->dev, + .module = THIS_MODULE, + .attr_groups = dwc_pcie_attr_groups, + .capabilities = PERF_PMU_CAP_NO_EXCLUDE, + .task_ctx_nr = perf_invalid_context, + .event_init = dwc_pcie_pmu_event_init, + .add = dwc_pcie_pmu_event_add, + .del = dwc_pcie_pmu_event_del, + .start = dwc_pcie_pmu_event_start, + .stop = dwc_pcie_pmu_event_stop, + .read = dwc_pcie_pmu_event_update, + }; + + /* Add this instance to the list used by the offline callback */ + ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state, + &pcie_pmu->cpuhp_node); + if (ret) { + pci_err(pdev, "Error %d registering hotplug @%x\n", ret, bdf); + return ret; + } + + /* Unwind when platform driver removes */ + ret = devm_add_action_or_reset(&plat_dev->dev, + dwc_pcie_pmu_remove_cpuhp_instance, + &pcie_pmu->cpuhp_node); + if (ret) + return ret; + + ret = perf_pmu_register(&pcie_pmu->pmu, name, -1); + if (ret) { + pci_err(pdev, "Error %d registering PMU @%x\n", ret, bdf); + return ret; + } + ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu, + pcie_pmu); + if (ret) + return ret; + + return 0; +} + +static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + if (pcie_pmu->on_cpu == -1) + pcie_pmu->on_cpu = cpumask_local_spread( + 0, dev_to_node(&pcie_pmu->pdev->dev)); + + return 0; +} + +static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node) +{ + struct dwc_pcie_pmu *pcie_pmu; + struct pci_dev *pdev; + int node; + cpumask_t mask; + unsigned int target; + + pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node); + /* Nothing to do if this CPU doesn't own the PMU */ + if (cpu != pcie_pmu->on_cpu) + return 0; + + pcie_pmu->on_cpu = -1; + pdev = pcie_pmu->pdev; + node = dev_to_node(&pdev->dev); + if (cpumask_and(&mask, cpumask_of_node(node), cpu_online_mask) && + cpumask_andnot(&mask, &mask, cpumask_of(cpu))) + target = cpumask_any(&mask); + else + target = cpumask_any_but(cpu_online_mask, cpu); + + if (target >= nr_cpu_ids) { + pci_err(pdev, "There is no CPU to set\n"); + return 0; + } + + /* This PMU does NOT support interrupt, just migrate context. */ + perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target); + pcie_pmu->on_cpu = target; + + return 0; +} + +static struct platform_driver dwc_pcie_pmu_driver = { + .probe = dwc_pcie_pmu_probe, + .driver = {.name = "dwc_pcie_pmu",}, +}; + +static int __init dwc_pcie_pmu_init(void) +{ + struct pci_dev *pdev = NULL; + bool found = false; + int ret; + + for_each_pci_dev(pdev) { + if (!dwc_pcie_match_des_cap(pdev)) + continue; + + ret = dwc_pcie_register_dev(pdev); + if (ret) { + pci_dev_put(pdev); + return ret; + } + + found = true; + } + if (!found) + return -ENODEV; + + ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "perf/dwc_pcie_pmu:online", + dwc_pcie_pmu_online_cpu, + dwc_pcie_pmu_offline_cpu); + if (ret < 0) + return ret; + + dwc_pcie_pmu_hp_state = ret; + + ret = platform_driver_register(&dwc_pcie_pmu_driver); + if (ret) + goto platform_driver_register_err; + + ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + if (ret) + goto platform_driver_register_err; + notify = true; + + return 0; + +platform_driver_register_err: + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); + + return ret; +} + +static void __exit dwc_pcie_pmu_exit(void) +{ + struct dwc_pcie_dev_info *dev_info, *tmp; + + if (notify) + bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb); + list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node) + dwc_pcie_unregister_dev(dev_info); + platform_driver_unregister(&dwc_pcie_pmu_driver); + cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state); +} + +module_init(dwc_pcie_pmu_init); +module_exit(dwc_pcie_pmu_exit); + +MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller"); +MODULE_AUTHOR("Shuai Xue "); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 7dfb7190580efaa47bcc8a68f73a642e23270f6f..79753411b778cb3766f100f05bb5605d239bea45 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -512,6 +512,7 @@ source "drivers/pinctrl/berlin/Kconfig" source "drivers/pinctrl/cirrus/Kconfig" source "drivers/pinctrl/freescale/Kconfig" source "drivers/pinctrl/intel/Kconfig" +source "drivers/pinctrl/zhaoxin/Kconfig" source "drivers/pinctrl/mediatek/Kconfig" source "drivers/pinctrl/meson/Kconfig" source "drivers/pinctrl/mvebu/Kconfig" diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index dd6cda27029492812122d2712336237cebc234f3..4275eca92488ef21dbfbfd0e02b5a9e2f7416ed8 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -58,6 +58,7 @@ obj-$(CONFIG_PINCTRL_BERLIN) += berlin/ obj-y += cirrus/ obj-y += freescale/ obj-$(CONFIG_X86) += intel/ +obj-$(CONFIG_X86) += zhaoxin/ obj-y += mediatek/ obj-$(CONFIG_PINCTRL_MESON) += meson/ obj-y += mvebu/ diff --git a/drivers/pinctrl/zhaoxin/Kconfig b/drivers/pinctrl/zhaoxin/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..65f95ca80d5c719bea361b24cbc9fcf2722efbac --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Kconfig @@ -0,0 +1,28 @@ +# SPDX-License-Identifier: GPL-2.0 +# Intel pin control drivers + +if (X86 || COMPILE_TEST) + +config PINCTRL_ZHAOXIN + tristate + select PINMUX + select PINCONF + select GENERIC_PINCONF + select GPIOLIB + select GPIOLIB_IRQCHIP + +config PINCTRL_KX7000 + tristate "Zhaoxin KX7000 pinctrl and GPIO driver" + depends on ACPI && X86 + default m + select PINCTRL_ZHAOXIN + help + This pinctrl driver provides an interface that allows configuring + of Zhaoxin KX7000 chipset pins and using them as GPIOs. + + To compile this driver as a module, choose M here: the + module will be called pinctrl-kx7000. + + If unsure, say Y. + +endif diff --git a/drivers/pinctrl/zhaoxin/Makefile b/drivers/pinctrl/zhaoxin/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a3acfa66f196b489e0e7f398bd50cd8a0de6e0d9 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/Makefile @@ -0,0 +1,4 @@ +# zhaoxin pin control drivers + +obj-$(CONFIG_PINCTRL_ZHAOXIN) += pinctrl-zhaoxin.o +obj-$(CONFIG_PINCTRL_KX7000) += pinctrl-kx7000.o diff --git a/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c new file mode 100644 index 0000000000000000000000000000000000000000..f249dd369e7c8694efd04324dc4cb5a83f2d0491 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-kx7000.c @@ -0,0 +1,354 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * zhaoxin KX7000 pinctrl/GPIO driver + * + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include + +#include + +#include "pinctrl-zhaoxin.h" + +#define ZX_CAL_ARRAY(a, b) \ +{ \ + .pmio_offset = (a), \ + .size = (b), \ +} + +#define PMIO_RX90 100 +#define PMIO_RX8C 200 + +#define ZX_CAL_INDEX_ARRAY(a, b, c) \ +{ \ + .reg_port_base = (PMIO_RX90), \ + .reg_data_base = (PMIO_RX8C), \ + .index = (a), \ + .cal_array = (b), \ + .size = (c), \ +} + +/* kx7000 pin define */ +static const struct pinctrl_pin_desc kx7000_pins[] = { + + PINCTRL_PIN(0, "IOD_CPUTCK"), + PINCTRL_PIN(1, "IOD_CPUTMS"), + PINCTRL_PIN(2, "IOD_CPUTRST"), + PINCTRL_PIN(3, "IOD_CPUTDO"), + PINCTRL_PIN(4, "IOD_CPUTDI"), + PINCTRL_PIN(5, "IOD_ZLSCLK0"), + PINCTRL_PIN(6, "IOD_ZLDATA0"), + PINCTRL_PIN(7, "IOD_ZLSCLK1"), + PINCTRL_PIN(8, "IOD_ZLDATA1"), + PINCTRL_PIN(9, "IOD_CLK27M"), + PINCTRL_PIN(10, "IOD_CPURST"), + PINCTRL_PIN(11, "IOD_PWORK"), + PINCTRL_PIN(12, "IOD_RSMRST"), + PINCTRL_PIN(13, "IOD_THRMTRIP"), + //GPIO range 0 + PINCTRL_PIN(14, "USBHOC0"), + PINCTRL_PIN(15, "USBHOC1"), + PINCTRL_PIN(16, "USBHOC2"), + PINCTRL_PIN(17, "USBHOC3"), + PINCTRL_PIN(18, "USBHOC4"), + PINCTRL_PIN(19, "USBHOC5"), + PINCTRL_PIN(20, "USBHOC6"), + PINCTRL_PIN(21, "USBHOC7"), + //gpio range 1 + PINCTRL_PIN(22, "USB4SBTX0"), + PINCTRL_PIN(23, "USB4SBRX0"), + PINCTRL_PIN(24, "USB4SBTX1"), + PINCTRL_PIN(25, "USB4SBRX1"), + //gpio range 2 + PINCTRL_PIN(26, "I2C1DT"), + PINCTRL_PIN(27, "I2C1CK"), + PINCTRL_PIN(28, "I2C1INT"), + //gpio range 3 + PINCTRL_PIN(29, "I2C2DT"), + PINCTRL_PIN(30, "I2C2CK"), + //gpio range 4 + PINCTRL_PIN(31, "I2C2INT"), + //gpio range 5 + PINCTRL_PIN(32, "SMBDT1"), + PINCTRL_PIN(33, "SMBCK1"), + PINCTRL_PIN(34, "SMBDT2"), + PINCTRL_PIN(35, "SMBCK2"), + PINCTRL_PIN(36, "SMBALRT"), + //gpio range 6 + PINCTRL_PIN(37, "SME_I2CDT"), + PINCTRL_PIN(38, "SME_I2CCK"), + //gpio range 7 + PINCTRL_PIN(39, "PWM"), + PINCTRL_PIN(40, "TACH"), + //gpio range 8 + PINCTRL_PIN(41, "GPIO0"), + PINCTRL_PIN(42, "GPIO1"), + PINCTRL_PIN(43, "GPIO2"), + PINCTRL_PIN(44, "GPIO3"), + PINCTRL_PIN(45, "GPIO4"), + PINCTRL_PIN(46, "GPIO5"), + PINCTRL_PIN(47, "GPIO6"), + PINCTRL_PIN(48, "GPIO7"), + PINCTRL_PIN(49, "GPIO8"), + PINCTRL_PIN(50, "GPIO9"), + PINCTRL_PIN(51, "LPCCLK"), + PINCTRL_PIN(52, "LPCDRQ1"), + //gpio range 9 + PINCTRL_PIN(53, "LPCDRQ0"), + PINCTRL_PIN(54, "LPCFRAME"), + PINCTRL_PIN(55, "LPCAD3"), + PINCTRL_PIN(56, "LPCAD2"), + PINCTRL_PIN(57, "LPCAD1"), + PINCTRL_PIN(58, "LPCAD0"), + //gpio range 10 + PINCTRL_PIN(59, "SERIRQ"), + PINCTRL_PIN(60, "AZRST"), + PINCTRL_PIN(61, "AZBITCLK"), + PINCTRL_PIN(62, "AZSDIN0"), + PINCTRL_PIN(63, "AZSDIN1"), + PINCTRL_PIN(64, "AZSDOUT"), + PINCTRL_PIN(65, "AZSYNC"), + //gpio range 11 + PINCTRL_PIN(66, "I2S1_SCLK"), + PINCTRL_PIN(67, "I2S1_TXD"), + PINCTRL_PIN(68, "I2S1_WS"), + PINCTRL_PIN(69, "I2S1_MCLK"), + //gpio range 12 + PINCTRL_PIN(70, "I2S1_RXD"), + //gpio range 13 + PINCTRL_PIN(71, "I2S1_INT"), + PINCTRL_PIN(72, "MSPIDI"), + PINCTRL_PIN(73, "MSPIDO"), + PINCTRL_PIN(74, "MSPIIO2"), + PINCTRL_PIN(75, "MSPIIO3"), + PINCTRL_PIN(76, "MSPICLK"), + PINCTRL_PIN(77, "MSPISS0"), + //gpio range 14 + PINCTRL_PIN(78, "MSPISS1"), + PINCTRL_PIN(79, "MSPISS2"), + //gpio range 15 + PINCTRL_PIN(80, "SPIDEVINT"), + PINCTRL_PIN(81, "BIOSSEL"), + //gpio range 16 + PINCTRL_PIN(82, "THRM"), + PINCTRL_PIN(83, "PEXWAKE"), + PINCTRL_PIN(84, "PWRBTN"), + //gpio range 17 + PINCTRL_PIN(85, "SPKR"), + PINCTRL_PIN(86, "PME"), + //gpio range 18 + PINCTRL_PIN(87, "BATLOW"), + PINCTRL_PIN(88, "EXTSMI"), + PINCTRL_PIN(89, "SUSA"), + PINCTRL_PIN(90, "SUSB"), + PINCTRL_PIN(91, "SUSC"), + PINCTRL_PIN(92, "GPWAKE"), + PINCTRL_PIN(93, "RING"), + PINCTRL_PIN(94, "LID"), + PINCTRL_PIN(95, "SLPS0"), + PINCTRL_PIN(96, "PCIRST"), + PINCTRL_PIN(97, "SVID_VREN"), + //gpio range 19 + PINCTRL_PIN(98, "INTRUDER"), + //gpio range 20 + PINCTRL_PIN(99, "GFX_I2CCLK0"), + PINCTRL_PIN(100, "GFX_I2CDAT0"), + PINCTRL_PIN(101, "GFX_I2CCLK1"), + PINCTRL_PIN(102, "GFX_I2CDAT1"), + PINCTRL_PIN(103, "GFX_I2CCLK2"), + PINCTRL_PIN(104, "GFX_I2CDAT2"), + PINCTRL_PIN(105, "GFX_I2CCLK3"), + PINCTRL_PIN(106, "GFX_I2CDAT3"), + PINCTRL_PIN(107, "GFX_GPIO0"), + PINCTRL_PIN(108, "GFX_GPIO1"), + PINCTRL_PIN(109, "GFX_GPIO2"), + PINCTRL_PIN(110, "GFX_GPIO3"), + PINCTRL_PIN(111, "CRTHSYNC"), + PINCTRL_PIN(112, "CRTVSYNC"), +}; + +#define NOT_DEFINE -30000 + +static int calibrate_int[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static int calibrate_sattus[] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, + 63, 64, 65, 66, 67, 68, + 69, 70, + 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62 +}; + +static const struct reg_cal_array kx7000_int_cal[] = { + ZX_CAL_ARRAY(0x58, 16), + ZX_CAL_ARRAY(0x5A, 2), + ZX_CAL_ARRAY(0xDA, 16), + ZX_CAL_ARRAY(0xDE, 16), +}; + +static const struct reg_calibrate int_cal[] = { + { + .reg = kx7000_int_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_int_cal), + .cal_array = calibrate_int, + .size = ARRAY_SIZE(calibrate_int), + } +}; + +static const struct reg_cal_array kx7000_status_cal[] = { + ZX_CAL_ARRAY((0x8), 16), + ZX_CAL_ARRAY((0xE), 2), + ZX_CAL_ARRAY((0xA), 16), + ZX_CAL_ARRAY((0xC), 16), +}; + +static const struct reg_calibrate status_cal[] = { + { + .reg = kx7000_status_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_status_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct reg_cal_array kx7000_mod_sel_cal[] = { + ZX_CAL_ARRAY((0x0), 16), + ZX_CAL_ARRAY((0x6), 2), + ZX_CAL_ARRAY((0x2), 16), + ZX_CAL_ARRAY((0x4), 16), +}; + +static const struct reg_calibrate mod_sel_cal[] = { + { + .reg = kx7000_mod_sel_cal, + .reg_cal_size = ARRAY_SIZE(kx7000_mod_sel_cal), + .cal_array = calibrate_sattus, + .size = ARRAY_SIZE(calibrate_sattus), + } +}; + +static const struct index_cal_array kx7000_gpio_in_cal[] = { + ZX_CAL_INDEX_ARRAY(0x98, NULL, 71), +}; + +static const struct index_cal_array kx7000_gpio_out_cal[] = { + ZX_CAL_INDEX_ARRAY(0x90, NULL, 71), +}; + +static int calibrate_trigger[] = { + 0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 18, 19, + 20, 21, 22, 23, + 24, 25, 26, 27, + 28, 29, 30, 31, + 32, 33, 34, 35, + 36, 50, 51, 52, + 53, 54, 55, 56, + 57, 58, 59, 60, + 61, 62, 63, 64, + 65, 66, 67, 68, + 69, 70 +}; + +static const struct index_cal_array kx7000_trigger_cal[] = { + ZX_CAL_INDEX_ARRAY(0xA0, calibrate_trigger, 50), +}; + +static const struct zhaoxin_pin_topology kx7000_pin_topologys[] = { + { + .int_cal = int_cal, + .status_cal = status_cal, + .mod_sel_cal = mod_sel_cal, + .gpio_in_cal = kx7000_gpio_in_cal, + .gpio_out_cal = kx7000_gpio_out_cal, + .trigger_cal = kx7000_trigger_cal, + } +}; + +#define KX7000_GPP(s, e, g) \ +{ \ + .zhaoxin_range_pin_base = (s), \ + .zhaoxin_range_pin_size = ((e) - (s) + 1), \ + .zhaoxin_range_gpio_base = (g), \ +} + +static const struct zhaoxin_pin_map2_gpio kx7000_pinmap_gpps[] = { + KX7000_GPP(0, 13, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(14, 19, 10), + KX7000_GPP(20, 21, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(22, 25, 65), + KX7000_GPP(26, 28, 43), + KX7000_GPP(29, 30, 41), + KX7000_GPP(31, 31, 49), + KX7000_GPP(32, 36, 16), + KX7000_GPP(37, 38, 69), + KX7000_GPP(39, 40, 67), + KX7000_GPP(41, 50, 0), + KX7000_GPP(51, 52, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(53, 53, 39), + KX7000_GPP(54, 58, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(59, 59, 40), + KX7000_GPP(60, 65, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(66, 69, 35), + KX7000_GPP(70, 70, 46), + KX7000_GPP(71, 71, 64), + KX7000_GPP(72, 77, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(78, 78, 50), + KX7000_GPP(79, 79, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(80, 80, 51), + KX7000_GPP(81, 81, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(82, 82, 52), + KX7000_GPP(83, 84, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(85, 85, 53), + KX7000_GPP(86, 86, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(87, 95, 54), + KX7000_GPP(96, 97, ZHAOXIN_GPIO_BASE_NOMAP), + KX7000_GPP(98, 98, 63), + KX7000_GPP(99, 112, 21), +}; + +static const struct zhaoxin_pinctrl_soc_data kx7000_soc_data = { + .pins = kx7000_pins, + .npins = ARRAY_SIZE(kx7000_pins), + .pin_topologys = kx7000_pin_topologys, + .zhaoxin_pin_maps = kx7000_pinmap_gpps, + .pin_map_size = ARRAY_SIZE(kx7000_pinmap_gpps), +}; + +static const struct acpi_device_id kx7000_pinctrl_acpi_match[] = { + { "KX8344B", (kernel_ulong_t)&kx7000_soc_data }, + { } +}; +MODULE_DEVICE_TABLE(acpi, kx7000_pinctrl_acpi_match); + +static const struct dev_pm_ops kx7000_pinctrl_pm_ops = { + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(zhaoxin_pinctrl_suspend_noirq, zhaoxin_pinctrl_resume_noirq) +}; + +static struct platform_driver kx7000_pinctrl_driver = { + .probe = zhaoxin_pinctrl_probe_by_hid, + .driver = { + .name = "kx7000-pinctrl", + .acpi_match_table = kx7000_pinctrl_acpi_match, + .pm = &kx7000_pinctrl_pm_ops, + }, +}; + +module_platform_driver(kx7000_pinctrl_driver); + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_DESCRIPTION("Shanghai Zhaoxin pinctrl driver"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c new file mode 100644 index 0000000000000000000000000000000000000000..1e434869d3dd8e24e33207562b95663acd49a6a1 --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.c @@ -0,0 +1,758 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * zhaoxin pinctrl common code + * + * Copyright(c) 2021 Shanghai Zhaoxin Corporation. All rights reserved. + * + */ + +#define DRIVER_VERSION "1.0.0" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "../core.h" +#include "pinctrl-zhaoxin.h" + +static int pin_to_hwgpio(struct pinctrl_gpio_range *range, unsigned int pin) +{ + int offset = 0; + + if (range->pins) { + for (offset = 0; offset < range->npins; offset++) + if (pin == range->pins[offset]) + break; + return range->base+offset-range->gc->base; + } else + return pin-range->pin_base+range->base-range->gc->base; +} + +static u16 zx_pad_read16(struct zhaoxin_pinctrl *pctrl, u8 index) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + return inw(pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static void zx_pad_write16(struct zhaoxin_pinctrl *pctrl, u8 index, u16 value) +{ + outb(index, pctrl->pmio_rx90+pctrl->pmio_base); + outw(value, pctrl->pmio_rx8c+pctrl->pmio_base); +} + +static int zhaoxin_get_groups_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->ngroups; +} + +static const char *zhaoxin_get_group_name(struct pinctrl_dev *pctldev, unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->groups[group].name; +} + +static int zhaoxin_get_group_pins(struct pinctrl_dev *pctldev, unsigned int group, + const unsigned int **pins, unsigned int *npins) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *pins = pctrl->soc->groups[group].pins; + *npins = pctrl->soc->groups[group].npins; + + return 0; +} + +static void zhaoxin_pin_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned int pin) +{ + +} + +static const struct pinctrl_ops zhaoxin_pinctrl_ops = { + .get_groups_count = zhaoxin_get_groups_count, + .get_group_name = zhaoxin_get_group_name, + .get_group_pins = zhaoxin_get_group_pins, + .pin_dbg_show = zhaoxin_pin_dbg_show, +}; + +static int zhaoxin_get_functions_count(struct pinctrl_dev *pctldev) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->nfunctions; +} + +static const char *zhaoxin_get_function_name(struct pinctrl_dev *pctldev, unsigned int function) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + return pctrl->soc->functions[function].name; +} + +static int zhaoxin_get_function_groups(struct pinctrl_dev *pctldev, unsigned int function, + const char * const **groups, unsigned int *const ngroups) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + *groups = pctrl->soc->functions[function].groups; + *ngroups = pctrl->soc->functions[function].ngroups; + + return 0; +} + +static int zhaoxin_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned int function, + unsigned int group) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + + dev_dbg(pctrl->dev, "%s,group=%d,func=%d\n", __func__, group, function); + return 0; +} + +#define ZHAOXIN_PULL_UP_20K 0x80 +#define ZHAOXIN_PULL_UP_10K 0x40 +#define ZHAOXIN_PULL_UP_47K 0x20 +#define ZHAOXIN_PULL_DOWN 0x10 + +#define ZHAOXIN_PULL_UP 0xe0 + +static void zhaoxin_gpio_set_gpio_mode_and_pull(struct zhaoxin_pinctrl *pctrl, unsigned int pin, + bool isup) +{ + u16 tmp = 0; + u16 value; + u16 value_back = 0; + + if (isup) + tmp = ZHAOXIN_PULL_UP_10K|1; + else + tmp = ZHAOXIN_PULL_DOWN|1; + value = zx_pad_read16(pctrl, pin); + + /* for gpio */ + if (pin <= 0x32 && pin >= 0x29) { + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value &= ~(0x1); + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } else {/* for pgpio */ + if (isup) { + value &= (~(ZHAOXIN_PULL_DOWN)); + value |= tmp; + } else { + value &= (~(ZHAOXIN_PULL_UP)); + value |= tmp; + } + value |= 0x1; + zx_pad_write16(pctrl, pin, value); + value_back = zx_pad_read16(pctrl, pin); + } +} + +static int zhaoxin_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, unsigned int pin) +{ + struct zhaoxin_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + int hwgpio = pin_to_hwgpio(range, pin); + + dev_dbg(pctrl->dev, "%s, hwgpio=%d, pin=%d\n", __func__, hwgpio, pin); + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, true); + return 0; +} + +static const struct pinmux_ops zhaoxin_pinmux_ops = { + .get_functions_count = zhaoxin_get_functions_count, + .get_function_name = zhaoxin_get_function_name, + .get_function_groups = zhaoxin_get_function_groups, + .set_mux = zhaoxin_pinmux_set_mux, + .gpio_request_enable = zhaoxin_gpio_request_enable, +}; + +static int zhaoxin_config_get(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *config) +{ + return 0; +} + +static int zhaoxin_config_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, + unsigned int nconfigs) +{ + return 0; +} + +static const struct pinconf_ops zhaoxin_pinconf_ops = { + .is_generic = true, + .pin_config_get = zhaoxin_config_get, + .pin_config_set = zhaoxin_config_set, +}; + +static const struct pinctrl_desc zhaoxin_pinctrl_desc = { + .pctlops = &zhaoxin_pinctrl_ops, + .pmxops = &zhaoxin_pinmux_ops, + .confops = &zhaoxin_pinconf_ops, + .owner = THIS_MODULE, +}; + +static int zhaoxin_gpio_to_pin(struct zhaoxin_pinctrl *pctrl, unsigned int offset, + const struct zhaoxin_pin_topology **community, + const struct zhaoxin_pin_map2_gpio **padgrp) +{ + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + const struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (offset >= map->zhaoxin_range_gpio_base && + offset < map->zhaoxin_range_gpio_base + map->zhaoxin_range_pin_size) { + int pin; + + pin = map->zhaoxin_range_pin_base + offset - map->zhaoxin_range_gpio_base; + if (padgrp) + *padgrp = map; + return pin; + } + } + return -EINVAL; +} + +static __maybe_unused int zhaoxin_pin_to_gpio(struct zhaoxin_pinctrl *pctrl, int pin) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + + pin_maps = pctrl->pin_maps; + if (!pin_maps) + return -EINVAL; + + return pin - pin_maps->zhaoxin_range_pin_base + pin_maps->zhaoxin_range_gpio_base; +} + +static int zhaoxin_gpio_get(struct gpio_chip *chip, unsigned int offset) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(chip); + const struct index_cal_array *gpio_in_cal; + int gap = offset/16; + int bit = offset%16; + int pin; + int value; + + gpio_in_cal = pctrl->pin_topologys->gpio_in_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + value = zx_pad_read16(pctrl, gpio_in_cal->index+gap); + value &= (1<pin_topologys->gpio_out_cal; + pin = zhaoxin_gpio_to_pin(pctrl, offset, NULL, NULL); + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + org = zx_pad_read16(pctrl, gpio_out_cal->index+gap); + if (value) + org |= (1<index+gap, org); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); +} + +static int zhaoxin_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) +{ + return pinctrl_gpio_direction_input(chip->base + offset); +} + +static int zhaoxin_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int value) +{ + return pinctrl_gpio_direction_output(chip->base + offset); +} + +static int zhaoxin_gpio_request(struct gpio_chip *gc, unsigned int offset) +{ + return gpiochip_generic_request(gc, offset); +} + +static void zhaoxin_gpio_free(struct gpio_chip *gc, unsigned int offset) +{ + gpiochip_generic_free(gc, offset); +} + +static int zhaoxin_gpio_config(struct gpio_chip *gc, unsigned int offset, unsigned long config) +{ + return gpiochip_generic_config(gc, offset, config); +} + +static const struct gpio_chip zhaoxin_gpio_chip = { + .owner = THIS_MODULE, + .request = zhaoxin_gpio_request, + .free = zhaoxin_gpio_free, + .direction_input = zhaoxin_gpio_direction_input, + .direction_output = zhaoxin_gpio_direction_output, + .get = zhaoxin_gpio_get, + .set = zhaoxin_gpio_set, + .set_config = zhaoxin_gpio_config, +}; + +static void zhaoxin_gpio_irq_ack(struct irq_data *d) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *status_cal; + const struct reg_cal_array *reg_off; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + int bit_off = 0; + u16 value; + u16 value_read; + + status_cal = pctrl->pin_topologys->status_cal; + if (gpio >= 0) { + for (i = 0; i < status_cal->size; i++) + if (gpio == status_cal->cal_array[i]) + break; + for (j = 0; j < status_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += status_cal->reg[j].size; + } + reg_off = &status_cal->reg[j-1]; + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = readw(pctrl->pm_pmio_base+reg_off->pmio_offset); + value_read = value; + value |= (1<pm_pmio_base+reg_off->pmio_offset); + } +} + +static void zhaoxin_gpio_irq_mask_unmask(struct irq_data *d, bool mask) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + int gpio = irqd_to_hwirq(d); + int i, j; + int offset = 0; + int base_offset = 0; + const struct reg_cal_array *reg_off, *mod; + int bit_off = 0; + u16 value; + u16 value1; + + int_cal = pctrl->pin_topologys->int_cal; + mod_sel_cal = pctrl->pin_topologys->mod_sel_cal; + + if (gpio >= 0) { + for (i = 0; i < int_cal->size; i++) + if (gpio == int_cal->cal_array[i]) + break; + for (j = 0; j < int_cal->reg_cal_size; j++) { + if (offset > i) + break; + offset += int_cal->reg[j].size; + } + reg_off = &(int_cal->reg[j-1]); + mod = &(mod_sel_cal->reg[j-1]); + bit_off = i-(offset-reg_off->size); + base_offset = reg_off->pmio_offset; + value = inw(pctrl->pmio_base+reg_off->pmio_offset); + if (mask) + value &= (~(1<pmio_base+reg_off->pmio_offset); + if (mask) { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } else { + value1 = readw(pctrl->pm_pmio_base+mod->pmio_offset); + value1 |= (1<pm_pmio_base+mod->pmio_offset); + } + } +} + +static void zhaoxin_gpio_irq_mask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, true); +} + +static void zhaoxin_gpio_irq_unmask(struct irq_data *d) +{ + zhaoxin_gpio_irq_mask_unmask(d, false); +} + +/* + * father domain irq handle + */ +static irqreturn_t zhaoxin_gpio_irq(int irq, void *data) +{ + struct zhaoxin_pinctrl *pctrl = data; + struct gpio_chip *gc = &pctrl->chip; + const struct reg_calibrate *init; + const struct reg_calibrate *stat_cal; + unsigned int i, bit_offset; + u16 status, enable; + unsigned long pending; + int index = 0; + int ret = 0; + int subirq; + unsigned int hwirq; + + init = pctrl->pin_topologys->int_cal; + stat_cal = pctrl->pin_topologys->status_cal; + for (i = 0; i < init->reg_cal_size; i++) { + pending = 0; + status = readw(pctrl->pm_pmio_base + stat_cal->reg[i].pmio_offset); + enable = inw(pctrl->pmio_base + init->reg[i].pmio_offset); + enable &= status; + pending = enable; + for_each_set_bit(bit_offset, &pending, init->reg[i].size) { + hwirq = init->cal_array[index + bit_offset]; + subirq = irq_find_mapping(gc->irq.domain, hwirq); + generic_handle_irq(subirq); + } + + ret += pending ? 1 : 0; + index += init->reg[i].size; + } + + return IRQ_RETVAL(ret); +} + +static int zhaoxin_gpio_irq_type(struct irq_data *d, unsigned int type) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int gpio = irqd_to_hwirq(d); + const struct index_cal_array *trigger_cal; + unsigned int pin; + unsigned long flags; + u8 index; + int position, point; + u16 value; + bool isup = true; + + trigger_cal = pctrl->pin_topologys->trigger_cal; + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (type & IRQ_TYPE_EDGE_FALLING) + isup = true; + else if (type & IRQ_TYPE_EDGE_RISING) + isup = true; + else if (type & IRQ_TYPE_LEVEL_LOW) + isup = true; + else if (type & IRQ_TYPE_LEVEL_HIGH) + isup = false; + + zhaoxin_gpio_set_gpio_mode_and_pull(pctrl, pin, isup); + + for (position = 0; position < trigger_cal->size; position++) + if (trigger_cal->cal_array[position] == gpio) + break; + + index = trigger_cal->index + ALIGN(position+1, 4)/4-1; + point = position % 4; + + raw_spin_lock_irqsave(&pctrl->lock, flags); + + value = zx_pad_read16(pctrl, index); + + if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) + value |= TRIGGER_BOTH_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_FALLING) + value |= TRIGGER_FALL_EDGE << (point*4); + else if (type & IRQ_TYPE_EDGE_RISING) + value |= TRIGGER_RISE_EDGE << (point*4); + else if (type & IRQ_TYPE_LEVEL_LOW) + value |= TRIGGER_LOW_LEVEL << (point*4); + else if (type & IRQ_TYPE_LEVEL_HIGH) + value |= TRIGGER_HIGH_LEVEL << (point*4); + else + dev_dbg(pctrl->dev, "%s wrang type\n", __func__); + + zx_pad_write16(pctrl, index, value); + + if (type & IRQ_TYPE_EDGE_BOTH) + irq_set_handler_locked(d, handle_edge_irq); + else if (type & IRQ_TYPE_LEVEL_MASK) + irq_set_handler_locked(d, handle_level_irq); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); + + return 0; +} + +static int zhaoxin_gpio_irq_wake(struct irq_data *d, unsigned int on) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + unsigned int pin; + + pin = zhaoxin_gpio_to_pin(pctrl, irqd_to_hwirq(d), NULL, NULL); + if (pin) { + if (on) + enable_irq_wake(pctrl->irq); + else + disable_irq_wake(pctrl->irq); + } + + return 0; +} + +static int zhaoxin_gpio_add_pin_ranges(struct gpio_chip *gc) +{ + struct zhaoxin_pinctrl *pctrl = gpiochip_get_data(gc); + int ret, i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *map = &pctrl->pin_maps[i]; + + if (map->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + ret = gpiochip_add_pin_range(&pctrl->chip, dev_name(pctrl->dev), + map->zhaoxin_range_gpio_base, map->zhaoxin_range_pin_base, + map->zhaoxin_range_pin_size); + if (ret) { + dev_err(pctrl->dev, "failed to add GPIO pin range\n"); + return ret; + } + } + + return 0; +} + +static unsigned int zhaoxin_gpio_ngpio(const struct zhaoxin_pinctrl *pctrl) +{ + const struct zhaoxin_pin_map2_gpio *pin_maps; + unsigned int ngpio = 0; + int i; + + for (i = 0; i < pctrl->pin_map_size; i++) { + pin_maps = &pctrl->pin_maps[i]; + if (pin_maps->zhaoxin_range_gpio_base == ZHAOXIN_GPIO_BASE_NOMAP) + continue; + if (pin_maps->zhaoxin_range_gpio_base + pin_maps->zhaoxin_range_pin_size > ngpio) + ngpio = pin_maps->zhaoxin_range_gpio_base + + pin_maps->zhaoxin_range_pin_size; + } + + return ngpio; +} + +static int zhaoxin_gpio_probe(struct zhaoxin_pinctrl *pctrl, int irq) +{ + int ret; + struct gpio_irq_chip *girq; + + pctrl->chip = zhaoxin_gpio_chip; + + pctrl->chip.ngpio = zhaoxin_gpio_ngpio(pctrl); + pctrl->chip.label = dev_name(pctrl->dev); + pctrl->chip.parent = pctrl->dev; + pctrl->chip.base = -1; + pctrl->chip.add_pin_ranges = zhaoxin_gpio_add_pin_ranges; + + pctrl->irq = irq; + + pctrl->irqchip.name = dev_name(pctrl->dev); + pctrl->irqchip.irq_ack = zhaoxin_gpio_irq_ack; + pctrl->irqchip.irq_mask = zhaoxin_gpio_irq_mask; + pctrl->irqchip.irq_unmask = zhaoxin_gpio_irq_unmask; + pctrl->irqchip.irq_set_type = zhaoxin_gpio_irq_type; + pctrl->irqchip.irq_set_wake = zhaoxin_gpio_irq_wake; + pctrl->irqchip.flags = IRQCHIP_MASK_ON_SUSPEND; + + ret = devm_request_irq(pctrl->dev, irq, zhaoxin_gpio_irq, IRQF_SHARED | IRQF_NO_THREAD, + dev_name(pctrl->dev), pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to request interrupt\n"); + return ret; + } + girq = &pctrl->chip.irq; + girq->chip = &pctrl->irqchip; + /* This will let us handle the IRQ in the driver */ + girq->parent_handler = NULL; + girq->num_parents = 0; + girq->default_type = IRQ_TYPE_NONE; + girq->handler = handle_bad_irq; + ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); + if (ret) { + dev_err(pctrl->dev, "failed to register gpiochip\n"); + return ret; + } + + return 0; +} + +static int zhaoxin_pinctrl_pm_init(struct zhaoxin_pinctrl *pctrl) +{ + return 0; +} + +static int zhaoxin_pinctrl_probe(struct platform_device *pdev, + const struct zhaoxin_pinctrl_soc_data *soc_data) +{ + struct zhaoxin_pinctrl *pctrl; + int ret, i, irq; + struct resource *res; + void __iomem *regs; + + pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL); + if (!pctrl) + return -ENOMEM; + pctrl->dev = &pdev->dev; + pctrl->soc = soc_data; + raw_spin_lock_init(&pctrl->lock); + pctrl->pin_topologys = pctrl->soc->pin_topologys; + pctrl->pin_map_size = pctrl->soc->pin_map_size; + pctrl->pin_maps = devm_kcalloc(&pdev->dev, pctrl->pin_map_size, + sizeof(*pctrl->pin_maps), GFP_KERNEL); + if (!pctrl->pin_maps) + return -ENOMEM; + for (i = 0; i < pctrl->pin_map_size; i++) { + struct zhaoxin_pin_map2_gpio *community = &pctrl->pin_maps[i]; + *community = pctrl->soc->zhaoxin_pin_maps[i]; + } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + pctrl->pm_pmio_base = regs; + pctrl->pmio_base = 0x800; + pctrl->pmio_rx90 = 0x90; + pctrl->pmio_rx8c = 0x8c; + irq = platform_get_irq(pdev, 0); + if (irq < 0) + return irq; + + ret = zhaoxin_pinctrl_pm_init(pctrl); + if (ret) + return ret; + pctrl->pctldesc = zhaoxin_pinctrl_desc; + pctrl->pctldesc.name = dev_name(&pdev->dev); + pctrl->pctldesc.pins = pctrl->soc->pins; + pctrl->pctldesc.npins = pctrl->soc->npins; + pctrl->pctldev = devm_pinctrl_register(&pdev->dev, &pctrl->pctldesc, pctrl); + if (IS_ERR(pctrl->pctldev)) { + dev_err(&pdev->dev, "failed to register pinctrl driver\n"); + return PTR_ERR(pctrl->pctldev); + } + ret = zhaoxin_gpio_probe(pctrl, irq); + + if (ret) + return ret; + platform_set_drvdata(pdev, pctrl); + return 0; +} + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = device_get_match_data(&pdev->dev); + if (!data) + return -ENODATA; + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_hid); + +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data; + + data = zhaoxin_pinctrl_get_soc_data(pdev); + if (IS_ERR(data)) + return PTR_ERR(data); + + return zhaoxin_pinctrl_probe(pdev, data); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_probe_by_uid); + +const struct zhaoxin_pinctrl_soc_data *zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev) +{ + const struct zhaoxin_pinctrl_soc_data *data = NULL; + const struct zhaoxin_pinctrl_soc_data **table; + struct acpi_device *adev; + unsigned int i; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) { + const void *match = device_get_match_data(&pdev->dev); + + table = (const struct zhaoxin_pinctrl_soc_data **)match; + for (i = 0; table[i]; i++) { + if (!strcmp(adev->pnp.unique_id, table[i]->uid)) { + data = table[i]; + break; + } + } + } else { + const struct platform_device_id *id; + + id = platform_get_device_id(pdev); + if (!id) + return ERR_PTR(-ENODEV); + + table = (const struct zhaoxin_pinctrl_soc_data **)id->driver_data; + data = table[pdev->id]; + } + + return data ?: ERR_PTR(-ENODATA); +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_get_soc_data); + +#ifdef CONFIG_PM_SLEEP + +int zhaoxin_pinctrl_suspend_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_suspend_noirq); + +int zhaoxin_pinctrl_resume_noirq(struct device *dev) +{ + return 0; +} +EXPORT_SYMBOL_GPL(zhaoxin_pinctrl_resume_noirq); +#endif + +MODULE_AUTHOR("www.zhaoxin.com"); +MODULE_VERSION(DRIVER_VERSION); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Zhaoxin pinctrl/GPIO core driver"); diff --git a/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h new file mode 100644 index 0000000000000000000000000000000000000000..cebea382dbe996cb1637a4328517b5407ef877df --- /dev/null +++ b/drivers/pinctrl/zhaoxin/pinctrl-zhaoxin.h @@ -0,0 +1,136 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * zhaoxin pinctrl common code + * Copyright(c) 2023 Shanghai Zhaoxin Corporation. All rights reserved. + */ + +#ifndef PINCTRL_zhaoxin_H +#define PINCTRL_zhaoxin_H + +#include +#include +#include +#include +#include +#include +#include +#include + +struct platform_device; +struct device; + +/** + * struct zhaoxin_pingroup pin define + */ +struct zhaoxin_pingroup { + const char *name; + const unsigned int *pins; + size_t npins; + unsigned short mode; + const unsigned int *modes; +}; + +/** + * struct zhaoxin_function + */ +struct zhaoxin_function { + const char *name; + const char * const *groups; + size_t ngroups; +}; + +/** + * struct zhaoxin_pin_map2_gpio + * @zhaoxin_range_pin_base + * @size: pin number + * @zhaoxin_range_gpio_base + */ +struct zhaoxin_pin_map2_gpio { + unsigned int zhaoxin_range_pin_base; + unsigned int zhaoxin_range_pin_size; + int zhaoxin_range_gpio_base; +}; + +#define MAX_GPIO 256 + +struct reg_cal_array { + int pmio_offset; + int size; +}; + +struct reg_calibrate { + const struct reg_cal_array *reg; + const int reg_cal_size; + const int *cal_array; + const int size; +}; + +struct index_cal_array { + int reg_port_base; + int reg_data_base; + int index; + int *cal_array; + int size; +}; + +struct zhaoxin_pin_topology { + const struct reg_calibrate *int_cal; + const struct reg_calibrate *mod_sel_cal; + const struct reg_calibrate *status_cal; + const struct index_cal_array *gpio_in_cal; + const struct index_cal_array *gpio_out_cal; + const struct index_cal_array *gpio_dir_cal; + const struct index_cal_array *trigger_cal; +}; + +#define TRIGGER_FALL_EDGE 0 +#define TRIGGER_RISE_EDGE 1 +#define TRIGGER_BOTH_EDGE 2 +#define TRIGGER_LOW_LEVEL 3 +#define TRIGGER_HIGH_LEVEL 4 + +#define ZHAOXIN_GPIO_BASE_NOMAP -1 + +struct zhaoxin_pinctrl_soc_data { + const char *uid; + const struct pinctrl_pin_desc *pins; + size_t npins; + const struct zhaoxin_pingroup *groups; + size_t ngroups; + const struct zhaoxin_function *functions; + size_t nfunctions; + const struct zhaoxin_pin_topology *pin_topologys; + const struct zhaoxin_pin_map2_gpio *zhaoxin_pin_maps; + size_t pin_map_size; +}; + +const struct zhaoxin_pinctrl_soc_data * + zhaoxin_pinctrl_get_soc_data(struct platform_device *pdev); + +struct zhaoxin_pinctrl { + struct device *dev; + raw_spinlock_t lock; + struct pinctrl_desc pctldesc; + struct pinctrl_dev *pctldev; + struct gpio_chip chip; + struct irq_chip irqchip; + const struct zhaoxin_pinctrl_soc_data *soc; + const struct zhaoxin_pin_topology *pin_topologys; + struct zhaoxin_pin_map2_gpio *pin_maps; + size_t pin_map_size; + int irq; + int pmio_base; + void __iomem *pm_pmio_base; + int pmio_rx90; + int pmio_rx8c; +}; + +int zhaoxin_pinctrl_probe_by_hid(struct platform_device *pdev); +int zhaoxin_pinctrl_probe_by_uid(struct platform_device *pdev); + +#ifdef CONFIG_PM_SLEEP +int zhaoxin_pinctrl_suspend_noirq(struct device *dev); +int zhaoxin_pinctrl_resume_noirq(struct device *dev); +#endif + +#endif /* PINCTRL_zhaoxin_H */ diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 868b20361769c37048ef58392d9aae43abcaf021..f26534a4a83b5aedb85c616f90db739a0ceaba4a 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -9,6 +9,8 @@ source "drivers/platform/chrome/Kconfig" source "drivers/platform/mellanox/Kconfig" +source "drivers/platform/mpam/Kconfig" + source "drivers/platform/olpc/Kconfig" source "drivers/platform/surface/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index 41640172975a795df917a824d0608e356588c925..54ee16e4e4d8a96e7795dfe152827693b862ad56 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -5,9 +5,11 @@ obj-$(CONFIG_X86) += x86/ obj-$(CONFIG_LOONGARCH) += loongarch/ +obj-$(CONFIG_SW64) += sw64/ obj-$(CONFIG_MELLANOX_PLATFORM) += mellanox/ obj-$(CONFIG_MIPS) += mips/ obj-$(CONFIG_OLPC_EC) += olpc/ obj-$(CONFIG_GOLDFISH) += goldfish/ obj-$(CONFIG_CHROME_PLATFORMS) += chrome/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam/ diff --git a/drivers/platform/mpam/Kconfig b/drivers/platform/mpam/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..75f5b2454fbe45b9531e6a680ecdc55df166200b --- /dev/null +++ b/drivers/platform/mpam/Kconfig @@ -0,0 +1,8 @@ +# Confusingly, this is everything but the CPU bits of MPAM. CPU here means +# CPU resources, not containers or cgroups etc. +config ARM_CPU_RESCTRL + bool + default y + depends on ARM64 && ARCH_HAS_CPU_RESCTRL + depends on MISC_FILESYSTEMS + select RESCTRL_RMID_DEPENDS_ON_CLOSID diff --git a/drivers/platform/mpam/Makefile b/drivers/platform/mpam/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..37693be531c34151078e6e6055d0bb4eea9099bf --- /dev/null +++ b/drivers/platform/mpam/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ARM_CPU_RESCTRL) += mpam_devices.o mpam_resctrl.o diff --git a/drivers/platform/mpam/mpam_devices.c b/drivers/platform/mpam/mpam_devices.c new file mode 100644 index 0000000000000000000000000000000000000000..906f8a6b694080a989904efd3963d84dcd1ed01c --- /dev/null +++ b/drivers/platform/mpam/mpam_devices.c @@ -0,0 +1,2516 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2022 Arm Ltd. + +#define pr_fmt(fmt) "mpam: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include "mpam_internal.h" + +extern int ddrc_freq; + +/* + * mpam_list_lock protects the SRCU lists when writing. Once the + * mpam_enabled key is enabled these lists are read-only, + * unless the error interrupt disables the driver. + */ +static DEFINE_MUTEX(mpam_list_lock); +static LIST_HEAD(mpam_all_msc); + +struct srcu_struct mpam_srcu; + +enum mpam_machine_type mpam_current_machine; + +/* MPAM isn't available until all the MSC have been probed. */ +static u32 mpam_num_msc; + +static int mpam_cpuhp_state; +static DEFINE_MUTEX(mpam_cpuhp_state_lock); + +/* + * The smallest common values for any CPU or MSC in the system. + * Generating traffic outside this range will result in screaming interrupts. + */ +u16 mpam_partid_max; +u8 mpam_pmg_max; +static bool partid_max_init, partid_max_published; +static DEFINE_SPINLOCK(partid_max_lock); + +/* + * mpam is enabled once all devices have been probed from CPU online callbacks, + * scheduled via this work_struct. If access to an MSC depends on a CPU that + * was not brought online at boot, this can happen surprisingly late. + */ +static DECLARE_WORK(mpam_enable_work, &mpam_enable); + +/* + * All mpam error interrupts indicate a software bug. On receipt, disable the + * driver. + */ +static DECLARE_WORK(mpam_broken_work, &mpam_disable); + +/* + * An MSC is a container for resources, each identified by their RIS index. + * Components are a group of RIS that control the same thing. + * Classes are the set components of the same type. + * + * e.g. The set of RIS that make up the L2 are a component. These are sometimes + * termed slices. They should be configured as if they were one MSC. + * + * e.g. The SoC probably has more than one L2, each attached to a distinct set + * of CPUs. All the L2 components are grouped as a class. + * + * When creating an MSC, struct mpam_msc is added to the all mpam_all_msc list, + * then linked via struct mpam_ris to a component and a class. + * The same MSC may exist under different class->component paths, but the RIS + * index will be unique. + */ +LIST_HEAD(mpam_classes); + +static u32 __mpam_read_reg(struct mpam_msc *msc, u16 reg) +{ + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + return readl_relaxed(msc->mapped_hwpage + reg); +} + +static void __mpam_write_reg(struct mpam_msc *msc, u16 reg, u32 val) +{ + WARN_ON_ONCE(reg + sizeof(u32) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(val, msc->mapped_hwpage + reg); +} + +#define mpam_read_partsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + ____ret = __mpam_read_reg(msc, MPAMF_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_partsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->part_sel_lock); \ + __mpam_write_reg(msc, MPAMCFG_##reg, val); \ +}) + +#define mpam_read_monsel_reg(msc, reg) \ +({ \ + u32 ____ret; \ + \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + ____ret = __mpam_read_reg(msc, MSMON_##reg); \ + \ + ____ret; \ +}) + +#define mpam_write_monsel_reg(msc, reg, val) \ +({ \ + lockdep_assert_held_once(&msc->mon_sel_lock); \ + __mpam_write_reg(msc, MSMON_##reg, val); \ +}) + +static u64 mpam_msc_read_idr(struct mpam_msc *msc) +{ + u64 idr_high = 0, idr_low; + + lockdep_assert_held(&msc->part_sel_lock); + + idr_low = mpam_read_partsel_reg(msc, IDR); + if (FIELD_GET(MPAMF_IDR_HAS_EXT, idr_low)) + idr_high = mpam_read_partsel_reg(msc, IDR + 4); + + return (idr_high << 32) | idr_low; +} + +static void mpam_msc_zero_esr(struct mpam_msc *msc) +{ + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ESR + 4); +} + +static u64 mpam_msc_read_esr(struct mpam_msc *msc) +{ + u64 esr_high = 0, esr_low; + + esr_low = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR); + if (msc->has_extd_esr) + esr_high = readl_relaxed(msc->mapped_hwpage + MPAMF_ESR + 4); + + return (esr_high << 32) | esr_low; +} + +static void __mpam_part_sel(u8 ris_idx, u16 partid, struct mpam_msc *msc) +{ + u32 partsel; + + lockdep_assert_held(&msc->part_sel_lock); + + partsel = FIELD_PREP(MPAMCFG_PART_SEL_RIS, ris_idx) | + FIELD_PREP(MPAMCFG_PART_SEL_PARTID_SEL, partid); + mpam_write_partsel_reg(msc, PART_SEL, partsel); +} + +int mpam_register_requestor(u16 partid_max, u8 pmg_max) +{ + int err = 0; + + spin_lock(&partid_max_lock); + if (!partid_max_init) { + mpam_partid_max = partid_max; + mpam_pmg_max = pmg_max; + partid_max_init = true; + } else if (!partid_max_published) { + mpam_partid_max = min(mpam_partid_max, partid_max); + mpam_pmg_max = min(mpam_pmg_max, pmg_max); + } else { + /* New requestors can't lower the values */ + if ((partid_max < mpam_partid_max) || (pmg_max < mpam_pmg_max)) + err = -EBUSY; + } + spin_unlock(&partid_max_lock); + + return err; +} +EXPORT_SYMBOL(mpam_register_requestor); + +static struct mpam_component * +mpam_component_alloc(struct mpam_class *class, int id, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + comp = kzalloc(sizeof(*comp), gfp); + if (!comp) + return ERR_PTR(-ENOMEM); + + comp->comp_id = id; + INIT_LIST_HEAD_RCU(&comp->ris); + /* affinity is updated when ris are added */ + INIT_LIST_HEAD_RCU(&comp->class_list); + comp->class = class; + + list_add_rcu(&comp->class_list, &class->components); + + return comp; +} + +static struct mpam_component * +mpam_component_get(struct mpam_class *class, int id, bool alloc, gfp_t gfp) +{ + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(comp, &class->components, class_list) { + if (comp->comp_id == id) + return comp; + } + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_component_alloc(class, id, gfp); +} + +static struct mpam_class * +mpam_class_alloc(u8 level_idx, enum mpam_class_types type, gfp_t gfp) +{ + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + class = kzalloc(sizeof(*class), gfp); + if (!class) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD_RCU(&class->components); + /* affinity is updated when ris are added */ + class->level = level_idx; + class->type = type; + INIT_LIST_HEAD_RCU(&class->classes_list); + ida_init(&class->ida_csu_mon); + ida_init(&class->ida_mbwu_mon); + + list_add_rcu(&class->classes_list, &mpam_classes); + + return class; +} + +static struct mpam_class * +mpam_class_get(u8 level_idx, enum mpam_class_types type, bool alloc, gfp_t gfp) +{ + bool found = false; + struct mpam_class *class; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + if (class->type == type && class->level == level_idx) { + found = true; + break; + } + } + + if (found) + return class; + + if (!alloc) + return ERR_PTR(-ENOENT); + + return mpam_class_alloc(level_idx, type, gfp); +} + +static void mpam_class_destroy(struct mpam_class *class) +{ + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&class->classes_list); + synchronize_srcu(&mpam_srcu); + kfree(class); +} + +static void mpam_comp_destroy(struct mpam_component *comp) +{ + struct mpam_class *class = comp->class; + + lockdep_assert_held(&mpam_list_lock); + + list_del_rcu(&comp->class_list); + synchronize_srcu(&mpam_srcu); + kfree(comp); + + if (list_empty(&class->components)) + mpam_class_destroy(class); +} + +/* synchronise_srcu() before freeing ris */ +static void mpam_ris_destroy(struct mpam_msc_ris *ris) +{ + struct mpam_component *comp = ris->comp; + struct mpam_class *class = comp->class; + struct mpam_msc *msc = ris->msc; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + clear_bit(ris->ris_idx, msc->ris_idxs); + list_del_rcu(&ris->comp_list); + list_del_rcu(&ris->msc_list); + + cpumask_andnot(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_andnot(&class->affinity, &class->affinity, &ris->affinity); + + if (list_empty(&comp->ris)) + mpam_comp_destroy(comp); +} + +/* + * There are two ways of reaching a struct mpam_msc_ris. Via the + * class->component->ris, or via the msc. + * When destroying the msc, the other side needs unlinking and cleaning up too. + * synchronise_srcu() before freeing msc. + */ +static void mpam_msc_destroy(struct mpam_msc *msc) +{ + struct mpam_msc_ris *ris, *tmp; + + lockdep_assert_held(&mpam_list_lock); + lockdep_assert_preemption_enabled(); + + list_for_each_entry_safe(ris, tmp, &msc->ris, msc_list) + mpam_ris_destroy(ris); +} + +/* + * The cacheinfo structures are only populated when CPUs are online. + * This helper walks the device tree to include offline CPUs too. + */ +static int get_cpumask_from_cache_id(u32 cache_id, u32 cache_level, + cpumask_t *affinity) +{ + int cpu, err; + u32 iter_level; + int iter_cache_id; + struct device_node *iter; + + if (!acpi_disabled) { + if (mpam_current_machine == MPAM_YITIAN710) + return acpi_pptt_get_cpumask_from_cache_id_and_level( + cache_id, cache_level, affinity); + return acpi_pptt_get_cpumask_from_cache_id(cache_id, affinity); + } + + for_each_possible_cpu(cpu) { + iter = of_get_cpu_node(cpu, NULL); + if (!iter) { + pr_err("Failed to find cpu%d device node\n", cpu); + return -ENOENT; + } + + while ((iter = of_find_next_cache_node(iter))) { + err = of_property_read_u32(iter, "cache-level", + &iter_level); + if (err || (iter_level != cache_level)) { + of_node_put(iter); + continue; + } + + /* + * get_cpu_cacheinfo_id() isn't ready until sometime + * during device_initcall(). Use cache_of_get_id(). + */ + iter_cache_id = cache_of_get_id(iter); + if (cache_id == ~0UL) { + of_node_put(iter); + continue; + } + + if (iter_cache_id == cache_id) + cpumask_set_cpu(cpu, affinity); + + of_node_put(iter); + } + } + + return 0; +} + + +/* + * cpumask_of_node() only knows about online CPUs. This can't tell us whether + * a class is represented on all possible CPUs. + */ +static void get_cpumask_from_node_id(u32 node_id, cpumask_t *affinity) +{ + int cpu; + + for_each_possible_cpu(cpu) { + if (node_id == cpu_to_node(cpu)) + cpumask_set_cpu(cpu, affinity); + } +} + +static int get_cpumask_from_cache(struct device_node *cache, + cpumask_t *affinity) +{ + int err; + u32 cache_level; + int cache_id; + + err = of_property_read_u32(cache, "cache-level", &cache_level); + if (err) { + pr_err("Failed to read cache-level from cache node\n"); + return -ENOENT; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + pr_err("Failed to calculate cache-id from cache node\n"); + return -ENOENT; + } + + return get_cpumask_from_cache_id(cache_id, cache_level, affinity); +} + +static int mpam_ris_get_affinity(struct mpam_msc *msc, cpumask_t *affinity, + enum mpam_class_types type, + struct mpam_class *class, + struct mpam_component *comp) +{ + int err; + + switch (type) { + case MPAM_CLASS_CACHE: + err = get_cpumask_from_cache_id(comp->comp_id, class->level, + affinity); + if (err) + return err; + + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with cache node", + dev_name(&msc->pdev->dev)); + + break; + case MPAM_CLASS_MEMORY: + get_cpumask_from_node_id(comp->comp_id, affinity); + if (cpumask_empty(affinity)) + pr_warn_once("%s no CPUs associated with memory node", + dev_name(&msc->pdev->dev)); + break; + case MPAM_CLASS_UNKNOWN: + return 0; + } + + cpumask_and(affinity, affinity, &msc->accessibility); + + return 0; +} + +static int mpam_ris_create_locked(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, + int component_id, gfp_t gfp) +{ + int err; + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + if (test_and_set_bit(ris_idx, msc->ris_idxs)) + return -EBUSY; + + ris = devm_kzalloc(&msc->pdev->dev, sizeof(*ris), gfp); + if (!ris) + return -ENOMEM; + + class = mpam_class_get(class_id, type, true, gfp); + if (IS_ERR(class)) + return PTR_ERR(class); + + comp = mpam_component_get(class, component_id, true, gfp); + if (IS_ERR(comp)) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return PTR_ERR(comp); + } + + err = mpam_ris_get_affinity(msc, &ris->affinity, type, class, comp); + if (err) { + if (list_empty(&class->components)) + mpam_class_destroy(class); + return err; + } + + ris->ris_idx = ris_idx; + INIT_LIST_HEAD_RCU(&ris->comp_list); + INIT_LIST_HEAD_RCU(&ris->msc_list); + ris->msc = msc; + ris->comp = comp; + + cpumask_or(&comp->affinity, &comp->affinity, &ris->affinity); + cpumask_or(&class->affinity, &class->affinity, &ris->affinity); + list_add_rcu(&ris->comp_list, &comp->ris); + list_add_rcu(&ris->msc_list, &msc->ris); + + return 0; +} + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id) +{ + int err; + + mutex_lock(&mpam_list_lock); + err = mpam_ris_create_locked(msc, ris_idx, type, class_id, + component_id, GFP_KERNEL); + mutex_unlock(&mpam_list_lock); + + return err; +} + +static struct mpam_msc_ris *mpam_get_or_create_ris(struct mpam_msc *msc, + u8 ris_idx) +{ + int err; + struct mpam_msc_ris *ris, *found = ERR_PTR(-ENOENT); + + lockdep_assert_held(&mpam_list_lock); + + if (!test_bit(ris_idx, msc->ris_idxs)) { + err = mpam_ris_create_locked(msc, ris_idx, MPAM_CLASS_UNKNOWN, + 0, 0, GFP_ATOMIC); + if (err) + return ERR_PTR(err); + } + + list_for_each_entry(ris, &msc->ris, msc_list) { + if (ris->ris_idx == ris_idx) { + found = ris; + break; + } + } + + return found; +} + +static void mpam_ris_hw_probe(struct mpam_msc_ris *ris) +{ + int err; + struct mpam_msc *msc = ris->msc; + struct mpam_props *props = &ris->props; + struct mpam_class *class = ris->comp->class; + + lockdep_assert_held(&msc->lock); + lockdep_assert_held(&msc->part_sel_lock); + + /* Cache Capacity Partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CCAP_PART, ris->idr)) { + u32 ccap_features = mpam_read_partsel_reg(msc, CCAP_IDR); + + props->cmax_wd = FIELD_GET(MPAMF_CCAP_IDR_CMAX_WD, ccap_features); + if (props->cmax_wd) + mpam_set_feature(mpam_feat_ccap_part, props); + } + + /* Cache Portion partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_CPOR_PART, ris->idr)) { + u32 cpor_features = mpam_read_partsel_reg(msc, CPOR_IDR); + + props->cpbm_wd = FIELD_GET(MPAMF_CPOR_IDR_CPBM_WD, cpor_features); + if (props->cpbm_wd) + mpam_set_feature(mpam_feat_cpor_part, props); + } + + /* Memory bandwidth partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_MBW_PART, ris->idr)) { + u32 mbw_features = mpam_read_partsel_reg(msc, MBW_IDR); + + /* portion bitmap resolution */ + props->mbw_pbm_bits = FIELD_GET(MPAMF_MBW_IDR_BWPBM_WD, mbw_features); + if (props->mbw_pbm_bits && + FIELD_GET(MPAMF_MBW_IDR_HAS_PBM, mbw_features)) + mpam_set_feature(mpam_feat_mbw_part, props); + + props->bwa_wd = FIELD_GET(MPAMF_MBW_IDR_BWA_WD, mbw_features); + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MAX, mbw_features)) + mpam_set_feature(mpam_feat_mbw_max, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_MIN, mbw_features)) + mpam_set_feature(mpam_feat_mbw_min, props); + + if (props->bwa_wd && FIELD_GET(MPAMF_MBW_IDR_HAS_PROP, mbw_features)) + mpam_set_feature(mpam_feat_mbw_prop, props); + } + + /* Priority partitioning */ + if (FIELD_GET(MPAMF_IDR_HAS_PRI_PART, ris->idr)) { + u32 pri_features = mpam_read_partsel_reg(msc, PRI_IDR); + + props->intpri_wd = FIELD_GET(MPAMF_PRI_IDR_INTPRI_WD, pri_features); + if (props->intpri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_INTPRI, pri_features)) { + mpam_set_feature(mpam_feat_intpri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_INTPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_intpri_part_0_low, props); + } + + props->dspri_wd = FIELD_GET(MPAMF_PRI_IDR_DSPRI_WD, pri_features); + if (props->dspri_wd && FIELD_GET(MPAMF_PRI_IDR_HAS_DSPRI, pri_features)) { + mpam_set_feature(mpam_feat_dspri_part, props); + if (FIELD_GET(MPAMF_PRI_IDR_DSPRI_0_IS_LOW, pri_features)) + mpam_set_feature(mpam_feat_dspri_part_0_low, props); + } + } + + /* Performance Monitoring */ + if (FIELD_GET(MPAMF_IDR_HAS_MSMON, ris->idr)) { + u32 msmon_features = mpam_read_partsel_reg(msc, MSMON_IDR); + + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_CSU, msmon_features)) { + u32 csumonidr, discard; + + /* + * If the firmware max-nrdy-us property is missing, the + * CSU counters can't be used. Should we wait forever? + */ + err = device_property_read_u32(&msc->pdev->dev, + "arm,not-ready-us", + &discard); + + csumonidr = mpam_read_partsel_reg(msc, CSUMON_IDR); + props->num_csu_mon = FIELD_GET(MPAMF_CSUMON_IDR_NUM_MON, csumonidr); + if (props->num_csu_mon && !err) + mpam_set_feature(mpam_feat_msmon_csu, props); + else if (props->num_csu_mon) + pr_err_once("Counters are not usable because not-ready timeout was not provided by firmware."); + } + if (FIELD_GET(MPAMF_MSMON_IDR_MSMON_MBWU, msmon_features)) { + bool has_long; + u32 mbwumonidr = mpam_read_partsel_reg(msc, MBWUMON_IDR); + + props->num_mbwu_mon = FIELD_GET(MPAMF_MBWUMON_IDR_NUM_MON, mbwumonidr); + if (props->num_mbwu_mon) + mpam_set_feature(mpam_feat_msmon_mbwu, props); + + if (FIELD_GET(MPAMF_MBWUMON_IDR_HAS_RWBW, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_rwbw, props); + + /* + * Treat long counter and its extension, lwd as mutually + * exclusive feature bits. Though these are dependent + * fields at the implementation level, there would never + * be a need for mpam_feat_msmon_mbwu_44counter (long + * counter) and mpam_feat_msmon_mbwu_63counter (lwd) + * bits to be set together. + * + * mpam_feat_msmon_mbwu isn't treated as an exclusive + * bit as this feature bit would be used as the "front + * facing feature bit" for any checks related to mbwu + * monitors. + */ + has_long = FIELD_GET(MPAMF_MBWUMON_IDR_HAS_LONG, mbwumonidr); + if (props->num_mbwu_mon && has_long) { + if (FIELD_GET(MPAMF_MBWUMON_IDR_LWD, mbwumonidr)) + mpam_set_feature(mpam_feat_msmon_mbwu_63counter, props); + else + mpam_set_feature(mpam_feat_msmon_mbwu_44counter, props); + } + } + } + + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) + if (mpam_current_machine == MPAM_YITIAN710 && class->type == MPAM_CLASS_MEMORY) + mpam_set_feature(mpam_feat_impl_msmon_mbwu, props); + + /* + * RIS with PARTID narrowing don't have enough storage for one + * configuration per PARTID. If these are in a class we could use, + * reduce the supported partid_max to match the numer of intpartid. + * If the class is unknown, just ignore it. + */ + if (FIELD_GET(MPAMF_IDR_HAS_PARTID_NRW, ris->idr) && + class->type != MPAM_CLASS_UNKNOWN) { + u32 nrwidr = mpam_read_partsel_reg(msc, PARTID_NRW_IDR); + u16 partid_max = FIELD_GET(MPAMF_PARTID_NRW_IDR_INTPARTID_MAX, nrwidr); + + mpam_set_feature(mpam_feat_partid_nrw, props); + msc->partid_max = min(msc->partid_max, partid_max); + } +} + +static int mpam_msc_hw_probe(struct mpam_msc *msc) +{ + u64 idr; + u16 partid_max; + u8 ris_idx, pmg_max; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + spin_lock(&msc->part_sel_lock); + idr = mpam_read_partsel_reg(msc, AIDR); + if ((idr & MPAMF_AIDR_ARCH_MAJOR_REV) != MPAM_ARCHITECTURE_V1) { + pr_err_once("%s does not match MPAM architecture v1.0\n", + dev_name(&msc->pdev->dev)); + spin_unlock(&msc->part_sel_lock); + return -EIO; + } + + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + msc->ris_max = FIELD_GET(MPAMF_IDR_RIS_MAX, idr); + + /* Use these values so partid/pmg always starts with a valid value */ + msc->partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + msc->pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + + for (ris_idx = 0; ris_idx <= msc->ris_max; ris_idx++) { + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + idr = mpam_msc_read_idr(msc); + spin_unlock(&msc->part_sel_lock); + + partid_max = FIELD_GET(MPAMF_IDR_PARTID_MAX, idr); + pmg_max = FIELD_GET(MPAMF_IDR_PMG_MAX, idr); + msc->partid_max = min(msc->partid_max, partid_max); + msc->pmg_max = min(msc->pmg_max, pmg_max); + msc->has_extd_esr = FIELD_GET(MPAMF_IDR_HAS_EXT_ESR, idr); + + ris = mpam_get_or_create_ris(msc, ris_idx); + if (IS_ERR(ris)) { + return PTR_ERR(ris); + } + ris->idr = idr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris_idx, 0, msc); + mpam_ris_hw_probe(ris); + spin_unlock(&msc->part_sel_lock); + } + + spin_lock(&partid_max_lock); + mpam_partid_max = min(mpam_partid_max, msc->partid_max); + mpam_pmg_max = min(mpam_pmg_max, msc->pmg_max); + spin_unlock(&partid_max_lock); + + msc->probed = true; + + return 0; +} + +struct mon_read +{ + struct mpam_msc_ris *ris; + struct mon_cfg *ctx; + enum mpam_device_features type; + u64 *val; + int err; +}; + +static bool mpam_ris_has_mbwu_long_counter(struct mpam_msc_ris *ris) +{ + return (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props) || + mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)); +} + +static u64 mpam_msc_read_mbwu_l(struct mpam_msc *msc) +{ + int retry = 3; + u32 mbwu_l_low; + u64 mbwu_l_high1, mbwu_l_high2; + + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + do { + mbwu_l_high1 = mbwu_l_high2; + mbwu_l_low = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L); + mbwu_l_high2 = readl_relaxed(msc->mapped_hwpage + MSMON_MBWU_L + 4); + + retry--; + } while (mbwu_l_high1 != mbwu_l_high2 && retry > 0); + + if (mbwu_l_high2 == mbwu_l_high1) + return (mbwu_l_high1 << 32) | mbwu_l_low; + return MSMON___NRDY_L; +} + +static void mpam_msc_zero_mbwu_l(struct mpam_msc *msc) +{ + lockdep_assert_held_once(&msc->mon_sel_lock); + + WARN_ON_ONCE((MSMON_MBWU_L + sizeof(u64)) > msc->mapped_hwpage_sz); + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), &msc->accessibility)); + + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L); + writel_relaxed(0, msc->mapped_hwpage + MSMON_MBWU_L + 4); +} + +static void gen_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mon_cfg *ctx = m->ctx; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_CSU; + break; + case mpam_feat_msmon_mbwu: + *ctl_val = MSMON_CFG_MBWU_CTL_TYPE_MBWU; + break; + default: + return; + } + + /* + * For CSU counters its implementation-defined what happens when not + * filtering by partid. + */ + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PARTID; + + *flt_val = FIELD_PREP(MSMON_CFG_MBWU_FLT_PARTID, ctx->partid); + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); + if (m->ctx->match_pmg) { + *ctl_val |= MSMON_CFG_x_CTL_MATCH_PMG; + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_PMG, ctx->pmg); + } + + if (mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, &m->ris->props)) + *flt_val |= FIELD_PREP(MSMON_CFG_MBWU_FLT_RWBW, ctx->opts); +} + +static void read_msmon_ctl_flt_vals(struct mon_read *m, u32 *ctl_val, + u32 *flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + + switch (m->type) { + case mpam_feat_msmon_csu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_CSU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_CSU_FLT); + break; + case mpam_feat_msmon_mbwu: + *ctl_val = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + *flt_val = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + break; + default: + return; + } +} + +static void write_msmon_ctl_flt_vals(struct mon_read *m, u32 ctl_val, + u32 flt_val) +{ + struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; + + /* + * Write the ctl_val with the enable bit cleared, reset the counter, + * then enable counter. + */ + switch (m->type) { + case mpam_feat_msmon_csu: + mpam_write_monsel_reg(msc, CFG_CSU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val); + mpam_write_monsel_reg(msc, CSU, 0); + mpam_write_monsel_reg(msc, CFG_CSU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + break; + case mpam_feat_msmon_mbwu: + mpam_write_monsel_reg(msc, CFG_MBWU_FLT, flt_val); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val); + + if (mpam_ris_has_mbwu_long_counter(m->ris)) + mpam_msc_zero_mbwu_l(m->ris->msc); + else + mpam_write_monsel_reg(msc, MBWU, 0); + + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, ctl_val|MSMON_CFG_x_CTL_EN); + + mbwu_state = &m->ris->mbwu_state[m->ctx->mon]; + if (mbwu_state) + mbwu_state->prev_val = 0; + + break; + default: + return; + } +} + +static u64 mpam_msmon_overflow_val(struct mpam_msc_ris *ris) +{ + /* TODO: implement scaling counters */ + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + return GENMASK_ULL(62, 0); + else if (mpam_has_feature(mpam_feat_msmon_mbwu_44counter, &ris->props)) + return GENMASK_ULL(43, 0); + else + return GENMASK_ULL(30, 0); +} + +static void __ris_msmon_read(void *arg) +{ + bool nrdy = false; + unsigned long flags; + bool config_mismatch; + struct mon_read *m = arg; + u64 now, overflow_val = 0; + struct mon_cfg *ctx = m->ctx; + bool reset_on_next_read = false; + struct mpam_msc_ris *ris = m->ris; + struct mpam_msc *msc = m->ris->msc; + struct msmon_mbwu_state *mbwu_state; + u32 mon_sel, ctl_val, flt_val, cur_ctl, cur_flt; + + lockdep_assert_held(&msc->lock); + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, ctx->mon) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + if (m->type == mpam_feat_msmon_mbwu) { + mbwu_state = &ris->mbwu_state[ctx->mon]; + if (mbwu_state) { + reset_on_next_read = mbwu_state->reset_on_next_read; + mbwu_state->reset_on_next_read = false; + } + } + + /* + * Read the existing configuration to avoid re-writing the same values. + * This saves waiting for 'nrdy' on subsequent reads. + */ + read_msmon_ctl_flt_vals(m, &cur_ctl, &cur_flt); + gen_msmon_ctl_flt_vals(m, &ctl_val, &flt_val); + config_mismatch = cur_flt != flt_val || + cur_ctl != (ctl_val | MSMON_CFG_x_CTL_EN); + + if (config_mismatch || reset_on_next_read) + write_msmon_ctl_flt_vals(m, ctl_val, flt_val); + + switch (m->type) { + case mpam_feat_msmon_csu: + now = mpam_read_monsel_reg(msc, CSU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + break; + case mpam_feat_msmon_mbwu: + /* + * If long or lwd counters are supported, use them, else revert + * to the 32 bit counter. + */ + if (mpam_ris_has_mbwu_long_counter(ris)) { + now = mpam_msc_read_mbwu_l(msc); + nrdy = now & MSMON___NRDY_L; + if (mpam_has_feature(mpam_feat_msmon_mbwu_63counter, &ris->props)) + now = FIELD_GET(MSMON___LWD_VALUE, now); + else + now = FIELD_GET(MSMON___L_VALUE, now); + } else { + now = mpam_read_monsel_reg(msc, MBWU); + nrdy = now & MSMON___NRDY; + now = FIELD_GET(MSMON___VALUE, now); + } + + if (nrdy) + break; + + if (!mbwu_state) + break; + + /* Add any pre-overflow value to the mbwu_state->val */ + if (mbwu_state->prev_val > now) + overflow_val = mpam_msmon_overflow_val(ris) - mbwu_state->prev_val; + + mbwu_state->prev_val = now; + mbwu_state->correction += overflow_val; + + /* Include bandwidth consumed before the last hardware reset */ + now += mbwu_state->correction; + break; + default: + return; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + + if (nrdy) { + m->err = -EBUSY; + return; + } + + *(m->val) += now; +} + +static void __ris_impl_msmon_read(void *arg) +{ + unsigned long flags; + struct mon_read *m = arg; + u64 mb_val = 0; + struct mon_cfg *ctx = m->ctx; + struct mpam_msc *msc = m->ris->msc; + u32 custom_reg_base_addr, cycle, val; + + lockdep_assert_held(&msc->lock); + if (m->type != mpam_feat_impl_msmon_mbwu) + return; + + /* Other machine can extend this function */ + if (mpam_current_machine != MPAM_YITIAN710) + return; + + spin_lock_irqsave(&msc->part_sel_lock, flags); + + __mpam_write_reg(msc, MPAMCFG_PART_SEL, ctx->mon); + + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + + cycle = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_WINDW_OFFSET); + val = __mpam_read_reg(msc, custom_reg_base_addr + MPAMF_CUST_MBWC_OFFSET); + + spin_unlock_irqrestore(&msc->part_sel_lock, flags); + + if (val & MSMON___NRDY) { + m->err = -EBUSY; + return; + } + + mb_val = MBWU_GET(val); + + mb_val = mb_val * 32 * ddrc_freq * 1000000 / cycle; /* B/s */ + *(m->val) += mb_val; +} + +static int _msmon_read(struct mpam_component *comp, struct mon_read *arg) +{ + int err, idx; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg->ris = ris; + + msc = ris->msc; + mutex_lock(&msc->lock); + if (arg->type == mpam_feat_msmon_csu || + arg->type == mpam_feat_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_msmon_read, arg, true); + else if (arg->type == mpam_feat_impl_msmon_mbwu) + err = smp_call_function_any(&msc->accessibility, + __ris_impl_msmon_read, arg, true); + else + err = -EOPNOTSUPP; + mutex_unlock(&msc->lock); + if (!err && arg->err) + err = arg->err; + if (err) + break; + } + srcu_read_unlock(&mpam_srcu, idx); + + return err; +} + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features type, u64 *val) +{ + int err; + struct mon_read arg; + u64 wait_jiffies = 0; + struct mpam_props *cprops = &comp->class->props; + + might_sleep(); + + if (!mpam_is_enabled()) + return -EIO; + + if (!mpam_has_feature(type, cprops)) + return -EOPNOTSUPP; + + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + if (err == -EBUSY) + wait_jiffies = usecs_to_jiffies(comp->class->nrdy_usec); + + while (wait_jiffies) + wait_jiffies = schedule_timeout_uninterruptible(wait_jiffies); + + if (err == -EBUSY) { + memset(&arg, 0, sizeof(arg)); + arg.ctx = ctx; + arg.type = type; + arg.val = val; + *val = 0; + + err = _msmon_read(comp, &arg); + } + + return err; +} + +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp) +{ + int idx, i; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + ris->mbwu_state[i].correction = 0; + ris->mbwu_state[i].reset_on_next_read = true; + } + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx) +{ + int idx; + unsigned long flags; + struct mpam_msc *msc; + struct mpam_msc_ris *ris; + + if (!mpam_is_enabled()) + return; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + if (!mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + continue; + + msc = ris->msc; + spin_lock_irqsave(&msc->mon_sel_lock, flags); + ris->mbwu_state[ctx->mon].correction = 0; + ris->mbwu_state[ctx->mon].reset_on_next_read = true; + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_reset_msc_bitmap(struct mpam_msc *msc, u16 reg, u16 wd) +{ + u32 num_words, msb; + u32 bm = ~0; + int i; + + lockdep_assert_held(&msc->part_sel_lock); + + /* + * Write all ~0 to all but the last 32bit-word, which may + * have fewer bits... + */ + num_words = DIV_ROUND_UP(wd, 32); + for (i = 0; i < num_words - 1; i++, reg += sizeof(bm)) + __mpam_write_reg(msc, reg, bm); + + /* + * ....and then the last (maybe) partial 32bit word. When wd is a + * multiple of 32, msb should be 31 to write a full 32bit word. + */ + msb = (wd - 1) % 32; + bm = GENMASK(msb , 0); + if (bm) + __mpam_write_reg(msc, reg, bm); +} + +static void mpam_reprogram_ris_partid(struct mpam_msc_ris *ris, u16 partid, + struct mpam_config *cfg) +{ + u32 pri_val = 0; + u16 cmax = MPAMCFG_CMAX_CMAX; + struct mpam_msc *msc = ris->msc; + u16 bwa_fract = MPAMCFG_MBW_MAX_MAX; + struct mpam_props *rprops = &ris->props; + u16 dspri = GENMASK(rprops->dspri_wd, 0); + u16 intpri = GENMASK(rprops->intpri_wd, 0); + u32 custom_reg_base_addr; + + spin_lock(&msc->part_sel_lock); + __mpam_part_sel(ris->ris_idx, partid, msc); + + if(mpam_has_feature(mpam_feat_partid_nrw, rprops)) + mpam_write_partsel_reg(msc, INTPARTID, + (MPAMCFG_PART_SEL_INTERNAL | partid)); + + if (mpam_has_feature(mpam_feat_cpor_part, rprops)) { + if (mpam_has_feature(mpam_feat_cpor_part, cfg)) + mpam_write_partsel_reg(msc, CPBM, cfg->cpbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_CPBM, + rprops->cpbm_wd); + } + + if (mpam_has_feature(mpam_feat_mbw_part, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_part, cfg)) + mpam_write_partsel_reg(msc, MBW_PBM, cfg->mbw_pbm); + else + mpam_reset_msc_bitmap(msc, MPAMCFG_MBW_PBM, + rprops->mbw_pbm_bits); + } + + if (mpam_has_feature(mpam_feat_mbw_min, rprops)) + mpam_write_partsel_reg(msc, MBW_MIN, 0); + + if (mpam_has_feature(mpam_feat_mbw_max, rprops)) { + if (mpam_has_feature(mpam_feat_mbw_max, cfg)) + mpam_write_partsel_reg(msc, MBW_MAX, cfg->mbw_max); + else + mpam_write_partsel_reg(msc, MBW_MAX, bwa_fract); + } + + if (mpam_has_feature(mpam_feat_mbw_prop, rprops)) + mpam_write_partsel_reg(msc, MBW_PROP, bwa_fract); + + if (mpam_has_feature(mpam_feat_ccap_part, rprops)) + mpam_write_partsel_reg(msc, CMAX, cmax); + + if (mpam_has_feature(mpam_feat_intpri_part, rprops) || + mpam_has_feature(mpam_feat_dspri_part, rprops)) { + /* aces high? */ + if (!mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + intpri = 0; + if (!mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + dspri = 0; + + if (mpam_has_feature(mpam_feat_intpri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_INTPRI, intpri); + if (mpam_has_feature(mpam_feat_dspri_part, rprops)) + pri_val |= FIELD_PREP(MPAMCFG_PRI_DSPRI, dspri); + + mpam_write_partsel_reg(msc, PRI, pri_val); + } + + if (FIELD_GET(MPAMF_IDR_HAS_IMPL_IDR, ris->idr)) { + if (mpam_current_machine == MPAM_YITIAN710) { + custom_reg_base_addr = __mpam_read_reg(msc, MPAMF_IMPL_IDR); + __mpam_write_reg(msc, custom_reg_base_addr + + MPAMF_CUST_WINDW_OFFSET, + MBWU_WINWD_MAX); + } + } + + spin_unlock(&msc->part_sel_lock); +} + +struct reprogram_ris { + struct mpam_msc_ris *ris; + struct mpam_config *cfg; +}; + +/* Call with MSC lock held */ +static int mpam_reprogram_ris(void *_arg) +{ + u16 partid, partid_max; + struct reprogram_ris *arg = _arg; + struct mpam_msc_ris *ris = arg->ris; + struct mpam_config *cfg = arg->cfg; + + if (ris->in_reset_state) + return 0; + + spin_lock(&partid_max_lock); + partid_max = mpam_partid_max; + spin_unlock(&partid_max_lock); + for (partid = 0; partid <= partid_max; partid++) + mpam_reprogram_ris_partid(ris, partid, cfg); + + return 0; +} + +static int mpam_restore_mbwu_state(void *_ris) +{ + int i; + struct mon_read mwbu_arg; + struct mpam_msc_ris *ris = _ris; + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + if (ris->mbwu_state[i].enabled) { + mwbu_arg.ris = ris; + mwbu_arg.ctx = &ris->mbwu_state[i].cfg; + mwbu_arg.type = mpam_feat_msmon_mbwu; + + __ris_msmon_read(&mwbu_arg); + } + } + + return 0; +} + +static int mpam_save_mbwu_state(void *arg) +{ + int i; + u64 val; + struct mon_cfg *cfg; + unsigned long flags; + u32 cur_flt, cur_ctl, mon_sel; + struct mpam_msc_ris *ris = arg; + struct mpam_msc *msc = ris->msc; + struct msmon_mbwu_state *mbwu_state; + + for (i = 0; i < ris->props.num_mbwu_mon; i++) { + mbwu_state = &ris->mbwu_state[i]; + cfg = &mbwu_state->cfg; + + spin_lock_irqsave(&msc->mon_sel_lock, flags); + mon_sel = FIELD_PREP(MSMON_CFG_MON_SEL_MON_SEL, i) | + FIELD_PREP(MSMON_CFG_MON_SEL_RIS, ris->ris_idx); + mpam_write_monsel_reg(msc, CFG_MON_SEL, mon_sel); + + cur_flt = mpam_read_monsel_reg(msc, CFG_MBWU_FLT); + cur_ctl = mpam_read_monsel_reg(msc, CFG_MBWU_CTL); + mpam_write_monsel_reg(msc, CFG_MBWU_CTL, 0); + + if (mpam_ris_has_mbwu_long_counter(ris)) { + val = mpam_msc_read_mbwu_l(msc); + mpam_msc_zero_mbwu_l(msc); + } else { + val = mpam_read_monsel_reg(msc, MBWU); + mpam_write_monsel_reg(msc, MBWU, 0); + } + + cfg->mon = i; + cfg->pmg = FIELD_GET(MSMON_CFG_MBWU_FLT_PMG, cur_flt); + cfg->match_pmg = FIELD_GET(MSMON_CFG_x_CTL_MATCH_PMG, cur_ctl); + cfg->partid = FIELD_GET(MSMON_CFG_MBWU_FLT_PARTID, cur_flt); + mbwu_state->correction += val; + mbwu_state->enabled = FIELD_GET(MSMON_CFG_x_CTL_EN, cur_ctl); + spin_unlock_irqrestore(&msc->mon_sel_lock, flags); + } + + return 0; +} + +/* + * Called via smp_call_on_cpu() to prevent migration, while still being + * pre-emptible. + */ +static int mpam_reset_ris(void *arg) +{ + struct mpam_msc_ris *ris = arg; + struct reprogram_ris reprogram_arg; + struct mpam_config empty_cfg = { 0 }; + + if (ris->in_reset_state) + return 0; + + reprogram_arg.ris = ris; + reprogram_arg.cfg = &empty_cfg; + + mpam_reprogram_ris(&reprogram_arg); + + return 0; +} + +/* + * Get the preferred CPU for this MSC. If it is accessible from this CPU, + * this CPU is preferred. This can be preempted/migrated, it will only result + * in more work. + */ +static int mpam_get_msc_preferred_cpu(struct mpam_msc *msc) +{ + int cpu = raw_smp_processor_id(); + + if (cpumask_test_cpu(cpu, &msc->accessibility)) + return cpu; + + return cpumask_first_and(&msc->accessibility, cpu_online_mask); +} + +static int mpam_touch_msc(struct mpam_msc *msc, int (*fn)(void *a), void *arg) +{ + lockdep_assert_irqs_enabled(); + lockdep_assert_cpus_held(); + lockdep_assert_held(&msc->lock); + + return smp_call_on_cpu(mpam_get_msc_preferred_cpu(msc), fn, arg, true); +} + +static void mpam_reset_msc(struct mpam_msc *msc, bool online) +{ + int idx; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + + /* + * Set in_reset_state when coming online. The reset state + * for non-zero partid may be lost while the CPUs are offline. + */ + ris->in_reset_state = online; + + if (mpam_is_enabled() && !online) + mpam_touch_msc(msc, &mpam_save_mbwu_state, ris); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_reprogram_msc(struct mpam_msc *msc) +{ + int idx; + u16 partid; + bool reset; + struct mpam_config *cfg; + struct mpam_msc_ris *ris; + + lockdep_assert_held(&msc->lock); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &msc->ris, msc_list) { + if (!mpam_is_enabled() && !ris->in_reset_state) { + mpam_touch_msc(msc, &mpam_reset_ris, ris); + ris->in_reset_state = true; + continue; + } + + reset = true; + for (partid = 0; partid <= mpam_partid_max; partid++) { + cfg = &ris->comp->cfg[partid]; + if (cfg->features) + reset = false; + + mpam_reprogram_ris_partid(ris, partid, cfg); + } + ris->in_reset_state = reset; + + if (mpam_has_feature(mpam_feat_msmon_mbwu, &ris->props)) + mpam_touch_msc(msc, &mpam_restore_mbwu_state, ris); + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void _enable_percpu_irq(void *_irq) +{ + int *irq = _irq; + enable_percpu_irq(*irq, IRQ_TYPE_NONE); +} + +static int mpam_cpu_online(unsigned int cpu) +{ + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + _enable_percpu_irq(&msc->reenable_error_ppi); + + if (atomic_fetch_inc(&msc->online_refs) == 0) + mpam_reprogram_msc(msc); + mutex_unlock(&msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + if (mpam_is_enabled()) + mpam_resctrl_online_cpu(cpu); + + return 0; +} + +/* Before mpam is enabled, try to probe new MSC */ +static int mpam_discovery_cpu_online(unsigned int cpu) +{ + int err = 0; + struct mpam_msc *msc; + bool new_device_probed = false; + + if (mpam_is_enabled()) + return 0; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (!msc->probed) + err = mpam_msc_hw_probe(msc); + mutex_unlock(&msc->lock); + + if (!err) + new_device_probed = true; + else + break; // mpam_broken + } + mutex_unlock(&mpam_list_lock); + + if (new_device_probed && !err) + schedule_work(&mpam_enable_work); + + if (err < 0) + return err; + + return mpam_cpu_online(cpu); +} + +static int mpam_cpu_offline(unsigned int cpu) +{ + int idx; + struct mpam_msc *msc; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(msc, &mpam_all_msc, glbl_list) { + if (!cpumask_test_cpu(cpu, &msc->accessibility)) + continue; + + mutex_lock(&msc->lock); + if (msc->reenable_error_ppi) + disable_percpu_irq(msc->reenable_error_ppi); + + if (atomic_dec_and_test(&msc->online_refs)) + mpam_reset_msc(msc, false); + mutex_unlock(&msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + if (mpam_is_enabled()) + mpam_resctrl_offline_cpu(cpu); + + return 0; +} + +static void mpam_register_cpuhp_callbacks(int (*online)(unsigned int online)) +{ + mutex_lock(&mpam_cpuhp_state_lock); + mpam_cpuhp_state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mpam:online", + online, mpam_cpu_offline); + if (mpam_cpuhp_state <= 0) { + pr_err("Failed to register cpuhp callbacks"); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); +} + +static int __setup_ppi(struct mpam_msc *msc) +{ + int cpu; + + msc->error_dev_id = alloc_percpu_gfp(struct mpam_msc *, GFP_KERNEL); + if (!msc->error_dev_id) + return -ENOMEM; + + for_each_cpu(cpu, &msc->accessibility) { + struct mpam_msc *empty = *per_cpu_ptr(msc->error_dev_id, cpu); + if (empty != NULL) { + pr_err_once("%s shares PPI with %s!\n", dev_name(&msc->pdev->dev), + dev_name(&empty->pdev->dev)); + return -EBUSY; + } + *per_cpu_ptr(msc->error_dev_id, cpu) = msc; + } + + return 0; +} + +static int mpam_msc_setup_error_irq(struct mpam_msc *msc) +{ + int irq; + + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + return 0; + + /* Allocate and initialise the percpu device pointer for PPI */ + if (irq_is_percpu(irq)) + + return __setup_ppi(msc); + + /* sanity check: shared interrupts can be routed anywhere? */ + if (!cpumask_equal(&msc->accessibility, cpu_possible_mask)) { + pr_err_once("msc:%u is a private resource with a shared error interrupt", + msc->id); + return -EINVAL; + } + + return 0; +} + +static enum mpam_machine_type mpam_dt_get_machine_type(void) +{ + /* FIXME: not supported yet */ + return MPAM_DEFAULT_MACHINE; +} + +static int mpam_dt_count_msc(void) +{ + int count = 0; + struct device_node *np; + + for_each_compatible_node(np, NULL, "arm,mpam-msc") + count++; + + return count; +} + +static int mpam_dt_parse_resource(struct mpam_msc *msc, struct device_node *np, + u32 ris_idx) +{ + int err = 0; + u32 level = 0; + unsigned long cache_id; + struct device_node *cache; + + do { + if (of_device_is_compatible(np, "arm,mpam-cache")) { + cache = of_parse_phandle(np, "arm,mpam-device", 0); + if (!cache) { + pr_err("Failed to read phandle\n"); + break; + } + } else if (of_device_is_compatible(np->parent, "cache")) { + cache = np->parent; + } else { + /* For now, only caches are supported */ + cache = NULL; + break; + } + + err = of_property_read_u32(cache, "cache-level", &level); + if (err) { + pr_err("Failed to read cache-level\n"); + break; + } + + cache_id = cache_of_get_id(cache); + if (cache_id == ~0UL) { + err = -ENOENT; + break; + } + + err = mpam_ris_create(msc, ris_idx, MPAM_CLASS_CACHE, level, + cache_id); + } while (0); + of_node_put(cache); + + return err; +} + + +static int mpam_dt_parse_resources(struct mpam_msc *msc, void *ignored) +{ + int err, num_ris = 0; + const u32 *ris_idx_p; + struct device_node *iter, *np; + + np = msc->pdev->dev.of_node; + for_each_child_of_node(np, iter) { + ris_idx_p = of_get_property(iter, "reg", NULL); + if (ris_idx_p) { + num_ris++; + err = mpam_dt_parse_resource(msc, iter, *ris_idx_p); + if (err) { + of_node_put(iter); + return err; + } + } + } + + if (!num_ris) + mpam_dt_parse_resource(msc, np, 0); + + return err; +} + +static int get_msc_affinity(struct mpam_msc *msc) +{ + struct device_node *parent; + u32 affinity_id; + int err; + + if (!acpi_disabled) { + err = device_property_read_u32(&msc->pdev->dev, "cpu_affinity", + &affinity_id); + if (err) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + err = acpi_pptt_get_cpus_from_container(affinity_id, + &msc->accessibility); + } + + return err; + } + + /* This depends on the path to of_node */ + parent = of_get_parent(msc->pdev->dev.of_node); + if (parent == of_root) { + cpumask_copy(&msc->accessibility, cpu_possible_mask); + err = 0; + } else { + if (of_device_is_compatible(parent, "cache")) { + err = get_cpumask_from_cache(parent, + &msc->accessibility); + } else { + err = -EINVAL; + pr_err("Cannot determine accessibility of MSC: %s\n", + dev_name(&msc->pdev->dev)); + } + } + of_node_put(parent); + + return err; +} + +static int fw_num_msc; + +static void mpam_pcc_rx_callback(struct mbox_client *cl, void *msg) +{ + /* TODO: wake up tasks blocked on this MSC's PCC channel */ +} + +static int mpam_msc_drv_remove(struct platform_device *pdev) +{ + struct mpam_msc *msc = platform_get_drvdata(pdev); + + if (!msc) + return 0; + + mutex_lock(&mpam_list_lock); + mpam_num_msc--; + platform_set_drvdata(pdev, NULL); + list_del_rcu(&msc->glbl_list); + mpam_msc_destroy(msc); + synchronize_srcu(&mpam_srcu); + mutex_unlock(&mpam_list_lock); + + return 0; +} + +static int mpam_msc_drv_probe(struct platform_device *pdev) +{ + int err; + pgprot_t prot; + void * __iomem io; + struct mpam_msc *msc; + struct resource *msc_res; + void *plat_data = pdev->dev.platform_data; + + mutex_lock(&mpam_list_lock); + do { + msc = devm_kzalloc(&pdev->dev, sizeof(*msc), GFP_KERNEL); + if (!msc) { + err = -ENOMEM; + break; + } + + INIT_LIST_HEAD_RCU(&msc->glbl_list); + msc->pdev = pdev; + + err = device_property_read_u32(&pdev->dev, "arm,not-ready-us", + &msc->nrdy_usec); + if (err) { + /* This will prevent CSU monitors being usable */ + msc->nrdy_usec = 0; + } + + err = get_msc_affinity(msc); + if (err) + break; + if (cpumask_empty(&msc->accessibility)) { + pr_err_once("msc:%u is not accessible from any CPU!", + msc->id); + err = -EINVAL; + break; + } + + mutex_init(&msc->lock); + msc->id = mpam_num_msc++; + INIT_LIST_HEAD_RCU(&msc->ris); + spin_lock_init(&msc->part_sel_lock); + spin_lock_init(&msc->mon_sel_lock); + + err = mpam_msc_setup_error_irq(msc); + if (err) { + msc = ERR_PTR(err); + break; + } + + if (device_property_read_u32(&pdev->dev, "pcc-channel", + &msc->pcc_subspace_id)) + msc->iface = MPAM_IFACE_MMIO; + else + msc->iface = MPAM_IFACE_PCC; + + if (msc->iface == MPAM_IFACE_MMIO) { + io = devm_platform_get_and_ioremap_resource(pdev, 0, + &msc_res); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + err = PTR_ERR(io); + break; + } + msc->mapped_hwpage_sz = msc_res->end - msc_res->start + 1; + msc->mapped_hwpage = io; + if (msc->mapped_hwpage_sz < MPAM_MIN_MMIO_SIZE) { + pr_err("MSC MMIO space size is too small\n"); + err = -EINVAL; + break; + } + } else if (msc->iface == MPAM_IFACE_PCC) { + msc->pcc_cl.dev = &pdev->dev; + msc->pcc_cl.rx_callback = mpam_pcc_rx_callback; + msc->pcc_cl.tx_block = false; + msc->pcc_cl.tx_tout = 1000; /* 1s */ + msc->pcc_cl.knows_txdone = false; + + msc->pcc_chan = pcc_mbox_request_channel(&msc->pcc_cl, + msc->pcc_subspace_id); + if (IS_ERR(msc->pcc_chan)) { + pr_err("Failed to request MSC PCC channel\n"); + err = PTR_ERR(msc->pcc_chan); + break; + } + + prot = __acpi_get_mem_attribute(msc->pcc_chan->shmem_base_addr); + io = ioremap_prot(msc->pcc_chan->shmem_base_addr, + msc->pcc_chan->shmem_size, pgprot_val(prot)); + if (IS_ERR(io)) { + pr_err("Failed to map MSC base address\n"); + pcc_mbox_free_channel(msc->pcc_chan); + err = PTR_ERR(io); + break; + } + + /* TODO: issue a read to update the registers */ + + msc->mapped_hwpage_sz = msc->pcc_chan->shmem_size; + msc->mapped_hwpage = io + sizeof(struct acpi_pcct_shared_memory); + } + + list_add_rcu(&msc->glbl_list, &mpam_all_msc); + platform_set_drvdata(pdev, msc); + } while (0); + mutex_unlock(&mpam_list_lock); + + if (!err) { + /* Create RIS entries described by firmware */ + if (!acpi_disabled) + err = acpi_mpam_parse_resources(msc, plat_data); + else + err = mpam_dt_parse_resources(msc, plat_data); + } + + if (err) + mpam_msc_drv_remove(pdev); + + if (!err && fw_num_msc == mpam_num_msc) + mpam_register_cpuhp_callbacks(&mpam_discovery_cpu_online); + + return err; +} + +/* + * If a resource doesn't match class feature/configuration, do the right thing. + * For 'num' properties we can just take the minimum. + * For properties where the mismatched unused bits would make a difference, we + * nobble the class feature, as we can't configure all the resources. + * e.g. The L3 cache is composed of two resources with 13 and 17 portion + * bitmaps respectively. + */ +static void +__resource_props_mismatch(struct mpam_msc_ris *ris, struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + struct mpam_props *rprops = &ris->props; + + lockdep_assert_held(&mpam_list_lock); /* we modify class */ + + /* Clear missing features */ + cprops->features &= rprops->features; + + /* Clear incompatible features */ + if (cprops->cpbm_wd != rprops->cpbm_wd) + mpam_clear_feature(mpam_feat_cpor_part, &cprops->features); + if (cprops->mbw_pbm_bits != rprops->mbw_pbm_bits) + mpam_clear_feature(mpam_feat_mbw_part, &cprops->features); + + /* bwa_wd is a count of bits, fewer bits means less precision */ + if (cprops->bwa_wd != rprops->bwa_wd) + cprops->bwa_wd = min(cprops->bwa_wd, rprops->bwa_wd); + + /* For num properties, take the minimum */ + if (cprops->num_csu_mon != rprops->num_csu_mon) + cprops->num_csu_mon = min(cprops->num_csu_mon, rprops->num_csu_mon); + if (cprops->num_mbwu_mon != rprops->num_mbwu_mon) + cprops->num_mbwu_mon = min(cprops->num_mbwu_mon, rprops->num_mbwu_mon); + + if (cprops->intpri_wd != rprops->intpri_wd) + cprops->intpri_wd = min(cprops->intpri_wd, rprops->intpri_wd); + if (cprops->dspri_wd != rprops->dspri_wd) + cprops->dspri_wd = min(cprops->dspri_wd, rprops->dspri_wd); + + /* {int,ds}pri may not have differing 0-low behaviour */ + if (mpam_has_feature(mpam_feat_intpri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_intpri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_intpri_part, &cprops->features); + if (mpam_has_feature(mpam_feat_dspri_part_0_low, cprops) != + mpam_has_feature(mpam_feat_dspri_part_0_low, rprops)) + mpam_clear_feature(mpam_feat_dspri_part, &cprops->features); +} + +/* + * Copy the first component's first resources's properties and features to the + * class. __resource_props_mismatch() will remove conflicts. + * It is not possible to have a class with no components, or a component with + * no resources. + */ +static void mpam_enable_init_class_features(struct mpam_class *class) +{ + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + comp = list_first_entry_or_null(&class->components, + struct mpam_component, class_list); + if (WARN_ON(!comp)) + return; + + ris = list_first_entry_or_null(&comp->ris, + struct mpam_msc_ris, comp_list); + if (WARN_ON(!ris)) + return; + + class->props = ris->props; +} + +/* Merge all the common resource features into class. */ +static void mpam_enable_merge_features(void) +{ + struct mpam_msc_ris *ris; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + mpam_enable_init_class_features(class); + + list_for_each_entry(comp, &class->components, class_list) { + list_for_each_entry(ris, &comp->ris, comp_list) { + __resource_props_mismatch(ris, class); + + class->nrdy_usec = max(class->nrdy_usec, + ris->msc->nrdy_usec); + } + } + } +} + +static char *mpam_errcode_names[16] = { + [0] = "No error", + [1] = "PARTID_SEL_Range", + [2] = "Req_PARTID_Range", + [3] = "MSMONCFG_ID_RANGE", + [4] = "Req_PMG_Range", + [5] = "Monitor_Range", + [6] = "intPARTID_Range", + [7] = "Unexpected_INTERNAL", + [8] = "Undefined_RIS_PART_SEL", + [9] = "RIS_No_Control", + [10] = "Undefined_RIS_MON_SEL", + [11] = "RIS_No_Monitor", + [12 ... 15] = "Reserved" +}; + +static int mpam_enable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(1, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static int mpam_disable_msc_ecr(void *_msc) +{ + struct mpam_msc *msc = _msc; + + writel_relaxed(0, msc->mapped_hwpage + MPAMF_ECR); + + return 0; +} + +static irqreturn_t __mpam_irq_handler(int irq, struct mpam_msc *msc) +{ + u64 reg; + u16 partid; + u8 errcode, pmg, ris; + + if (WARN_ON_ONCE(!msc) || + WARN_ON_ONCE(!cpumask_test_cpu(smp_processor_id(), + &msc->accessibility))) + return IRQ_NONE; + + reg = mpam_msc_read_esr(msc); + + errcode = FIELD_GET(MPAMF_ESR_ERRCODE, reg); + if (!errcode) + return IRQ_NONE; + + /* Clear level triggered irq */ + mpam_msc_zero_esr(msc); + + partid = FIELD_GET(MPAMF_ESR_PARTID_OR_MON, reg); + pmg = FIELD_GET(MPAMF_ESR_PMG, reg); + ris = FIELD_GET(MPAMF_ESR_PMG, reg); + + pr_err("error irq from msc:%u '%s', partid:%u, pmg: %u, ris: %u\n", + msc->id, mpam_errcode_names[errcode], partid, pmg, ris); + + /* + * To prevent this interrupt from repeatedly cancelling the scheduled + * work to disable mpam, disable the error interrupt. + */ + mpam_disable_msc_ecr(msc); + + schedule_work(&mpam_broken_work); + + return IRQ_HANDLED; +} + +static irqreturn_t mpam_ppi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = *(struct mpam_msc **)dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static irqreturn_t mpam_spi_handler(int irq, void *dev_id) +{ + struct mpam_msc *msc = dev_id; + + return __mpam_irq_handler(irq, msc); +} + +static int mpam_register_irqs(void) +{ + int err, irq; + struct mpam_msc *msc; + + lockdep_assert_cpus_held(); + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + /* The MPAM spec says the interrupt can be SPI, PPI or LPI */ + /* We anticipate sharing the interrupt with other MSCs */ + if (irq_is_percpu(irq)) { + err = request_percpu_irq(irq, &mpam_ppi_handler, + "mpam:msc:error", + msc->error_dev_id); + if (err) + return err; + + mutex_lock(&msc->lock); + msc->reenable_error_ppi = irq; + smp_call_function_many(&msc->accessibility, + &_enable_percpu_irq, &irq, + true); + mutex_unlock(&msc->lock); + } else { + err = devm_request_irq(&msc->pdev->dev, irq, + &mpam_spi_handler, IRQF_SHARED, + "mpam:msc:error", msc); + if (err) + return err; + } + + mutex_lock(&msc->lock); + msc->error_irq_requested = true; + mpam_touch_msc(msc, mpam_enable_msc_ecr, msc); + msc->error_irq_hw_enabled = true; + mutex_unlock(&msc->lock); + } + + return 0; +} + +static void mpam_unregister_irqs(void) +{ + int irq; + struct mpam_msc *msc; + + cpus_read_lock(); + /* take the lock as free_irq() can sleep */ + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + irq = platform_get_irq_byname_optional(msc->pdev, "error"); + if (irq <= 0) + continue; + + mutex_lock(&msc->lock); + if (msc->error_irq_hw_enabled) { + mpam_touch_msc(msc, mpam_disable_msc_ecr, msc); + msc->error_irq_hw_enabled = false; + } + + if (msc->error_irq_requested) { + if (irq_is_percpu(irq)) { + msc->reenable_error_ppi = 0; + free_percpu_irq(irq, msc->error_dev_id); + } else { + devm_free_irq(&msc->pdev->dev, irq, msc); + } + msc->error_irq_requested = false; + } + mutex_unlock(&msc->lock); + } + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); +} + +static void __destroy_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + kfree(comp->cfg); + list_for_each_entry(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + mbwu_state = ris->mbwu_state; + ris->mbwu_state = NULL; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + + kfree(mbwu_state); + } +} + +static int __allocate_component_cfg(struct mpam_component *comp) +{ + unsigned long flags; + struct mpam_msc_ris *ris; + struct msmon_mbwu_state *mbwu_state; + + if (comp->cfg) + return 0; + + comp->cfg = kcalloc(mpam_partid_max + 1, sizeof(*comp->cfg), GFP_KERNEL); + if (!comp->cfg) + return -ENOMEM; + + list_for_each_entry(ris, &comp->ris, comp_list) { + if (!ris->props.num_mbwu_mon) + continue; + + mbwu_state = kcalloc(ris->props.num_mbwu_mon, + sizeof(*ris->mbwu_state), GFP_KERNEL); + if (!mbwu_state) { + __destroy_component_cfg(comp); + return -ENOMEM; + } + + mutex_lock(&ris->msc->lock); + spin_lock_irqsave(&ris->msc->mon_sel_lock, flags); + ris->mbwu_state = mbwu_state; + spin_unlock_irqrestore(&ris->msc->mon_sel_lock, flags); + mutex_unlock(&ris->msc->lock); + } + + return 0; +} + +static int mpam_allocate_config(void) +{ + int err = 0; + struct mpam_class *class; + struct mpam_component *comp; + + lockdep_assert_held(&mpam_list_lock); + + list_for_each_entry(class, &mpam_classes, classes_list) { + list_for_each_entry(comp, &class->components, class_list) { + err = __allocate_component_cfg(comp); + if (err) + return err; + } + } + + return 0; +} + +static void mpam_enable_once(void) +{ + int err; + + /* + * If all the MSC have been probed, enabling the IRQs happens next. + * That involves cross-calling to a CPU that can reach the MSC, and + * the locks must be taken in this order: + */ + cpus_read_lock(); + mutex_lock(&mpam_list_lock); + do { + mpam_enable_merge_features(); + + err = mpam_allocate_config(); + if (err) { + pr_err("Failed to allocate configuration arrays.\n"); + break; + } + + err = mpam_register_irqs(); + if (err) { + pr_warn("Failed to register irqs: %d\n", err); + break; + } + } while (0); + mutex_unlock(&mpam_list_lock); + cpus_read_unlock(); + + if (!err) { + err = mpam_resctrl_setup(); + if (err) + pr_err("Failed to initialise resctrl: %d\n", err); + } + + if (err) { + schedule_work(&mpam_broken_work); + return; + } + + mutex_lock(&mpam_cpuhp_state_lock); + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + mutex_unlock(&mpam_cpuhp_state_lock); + + /* + * Once the cpuhp callbacks have been changed, mpam_partid_max can no + * longer change. + */ + spin_lock(&partid_max_lock); + partid_max_published = true; + spin_unlock(&partid_max_lock); + + static_branch_enable(&mpam_enabled); + mpam_register_cpuhp_callbacks(mpam_cpu_online); + + pr_info("MPAM enabled with %u partid and %u pmg\n", + READ_ONCE(mpam_partid_max) + 1, READ_ONCE(mpam_pmg_max) + 1); +} + +void mpam_reset_class(struct mpam_class *class) +{ + int idx; + struct mpam_msc_ris *ris; + struct mpam_component *comp; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(comp, &class->components, class_list) { + memset(comp->cfg, 0, ((mpam_partid_max + 1) * sizeof(*comp->cfg))); + + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, mpam_reset_ris, ris); + mutex_unlock(&ris->msc->lock); + ris->in_reset_state = true; + } + } + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Called in response to an error IRQ. + * All of MPAMs errors indicate a software bug, restore any modified + * controls to their reset values. + */ +void mpam_disable(struct work_struct *ignored) +{ + int idx; + struct mpam_class *class; + + mutex_lock(&mpam_cpuhp_state_lock); + if (mpam_cpuhp_state) { + cpuhp_remove_state(mpam_cpuhp_state); + mpam_cpuhp_state = 0; + } + mutex_unlock(&mpam_cpuhp_state_lock); + + mpam_resctrl_exit(); + + static_branch_disable(&mpam_enabled); + + mpam_unregister_irqs(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); +} + +/* + * Enable mpam once all devices have been probed. + * Scheduled by mpam_discovery_cpu_online() once all devices have been created. + * Also scheduled when new devices are probed when new CPUs come online. + */ +void mpam_enable(struct work_struct *work) +{ + static atomic_t once; + struct mpam_msc *msc; + bool all_devices_probed = true; + + mutex_lock(&mpam_list_lock); + list_for_each_entry(msc, &mpam_all_msc, glbl_list) { + mutex_lock(&msc->lock); + if (!msc->probed) + all_devices_probed = false; + mutex_unlock(&msc->lock); + + if (!all_devices_probed) + break; + } + mutex_unlock(&mpam_list_lock); + + if (all_devices_probed && !atomic_fetch_inc(&once)) + mpam_enable_once(); +} + +struct mpam_write_config_arg { + struct mpam_msc_ris *ris; + struct mpam_component *comp; + u16 partid; +}; + +static int __write_config(void *arg) +{ + struct mpam_write_config_arg *c = arg; + + mpam_reprogram_ris_partid(c->ris, c->partid, &c->comp->cfg[c->partid]); + + return 0; +} + +/* TODO: split into write_config/sync_config */ +/* TODO: add config_dirty bitmap to drive sync_config */ +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg) +{ + struct mpam_write_config_arg arg; + struct mpam_msc_ris *ris; + int idx; + + lockdep_assert_cpus_held(); + + if (!memcmp(&comp->cfg[partid], cfg, sizeof(*cfg))) + return 0; + + comp->cfg[partid] = *cfg; + arg.comp = comp; + arg.partid = partid; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(ris, &comp->ris, comp_list) { + arg.ris = ris; + mutex_lock(&ris->msc->lock); + mpam_touch_msc(ris->msc, __write_config, &arg); + mutex_unlock(&ris->msc->lock); + } + srcu_read_unlock(&mpam_srcu, idx); + + return 0; +} + +static const struct of_device_id mpam_of_match[] = { + { .compatible = "arm,mpam-msc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mpam_of_match); + +static struct platform_driver mpam_msc_driver = { + .driver = { + .name = "mpam_msc", + .of_match_table = of_match_ptr(mpam_of_match), + }, + .probe = mpam_msc_drv_probe, + .remove = mpam_msc_drv_remove, +}; + +/* + * MSC that are hidden under caches are not created as platform devices + * as there is no cache driver. Caches are also special-cased in + * get_msc_affinity(). + */ +static void mpam_dt_create_foundling_msc(void) +{ + int err; + struct device_node *cache; + + for_each_compatible_node(cache, NULL, "cache") { + err = of_platform_populate(cache, mpam_of_match, NULL, NULL); + if (err) { + pr_err("Failed to create MSC devices under caches\n"); + } + } +} + +static int __init arm64_mpam_register_cpus(void) +{ + u64 mpamidr = read_sysreg_s(SYS_MPAMIDR_EL1); + u16 partid_max = FIELD_GET(MPAMIDR_PARTID_MAX, mpamidr); + u8 pmg_max = FIELD_GET(MPAMIDR_PMG_MAX, mpamidr); + + return mpam_register_requestor(partid_max, pmg_max); +} + +static int __init mpam_msc_driver_init(void) +{ + bool mpam_not_available = false; + int err; + + if (!mpam_cpus_have_feature()) + return -EOPNOTSUPP; + + init_srcu_struct(&mpam_srcu); + + if (!acpi_disabled) + mpam_current_machine = acpi_mpam_get_machine_type(); + else + mpam_current_machine = mpam_dt_get_machine_type(); + + if (!acpi_disabled) + fw_num_msc = acpi_mpam_count_msc(); + else + fw_num_msc = mpam_dt_count_msc(); + + if (fw_num_msc <= 0) { + pr_err("No MSC devices found in firmware\n"); + return -EINVAL; + } + + /* + * Access MPAM system registers after MPAM ACPI table is parsed, since + * some BIOSs disable MPAM system registers accessing but export MPAM in + * ID_AA64PFR0_EL1. So we can only rely on the MPAM ACPI table to + * determine whether MPAM feature is enabled. + */ + err = arm64_mpam_register_cpus(); + if (err) + return err; + + /* + * If the MPAM CPU interface is not implemented, or reserved by + * firmware, there is no point touching the rest of the hardware. + */ + spin_lock(&partid_max_lock); + if (!partid_max_init || (!mpam_partid_max && !mpam_pmg_max)) + mpam_not_available = true; + spin_unlock(&partid_max_lock); + + if (mpam_not_available) + return 0; + + if (acpi_disabled) + mpam_dt_create_foundling_msc(); + + return platform_driver_register(&mpam_msc_driver); +} +/* Must occur after arm64_mpam_register_cpus() from arch_initcall() */ +subsys_initcall(mpam_msc_driver_init); diff --git a/drivers/platform/mpam/mpam_internal.h b/drivers/platform/mpam/mpam_internal.h new file mode 100644 index 0000000000000000000000000000000000000000..d84413e5e03150901f223cf90e2df82580700a76 --- /dev/null +++ b/drivers/platform/mpam/mpam_internal.h @@ -0,0 +1,585 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef MPAM_INTERNAL_H +#define MPAM_INTERNAL_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +DECLARE_STATIC_KEY_FALSE(mpam_enabled); + +/* Value to indicate the allocated monitor is derived from the RMID index. */ +#define USE_RMID_IDX (U16_MAX + 1) + +/* + * Only these event configuration bits are supported. MPAM can't know if + * data is being written back, these will show up as a write. + */ +#define MPAM_RESTRL_EVT_CONFIG_VALID (READS_TO_LOCAL_MEM | NON_TEMP_WRITE_TO_LOCAL_MEM) + +static inline bool mpam_is_enabled(void) +{ + return static_branch_likely(&mpam_enabled); +} + +struct mpam_msc +{ + /* member of mpam_all_msc */ + struct list_head glbl_list; + + int id; + struct platform_device *pdev; + + /* Not modified after mpam_is_enabled() becomes true */ + enum mpam_msc_iface iface; + u32 pcc_subspace_id; + struct mbox_client pcc_cl; + struct pcc_mbox_chan *pcc_chan; + u32 nrdy_usec; + cpumask_t accessibility; + bool has_extd_esr; + + int reenable_error_ppi; + struct mpam_msc * __percpu *error_dev_id; + + atomic_t online_refs; + + struct mutex lock; + bool probed; + bool error_irq_requested; + bool error_irq_hw_enabled; + u16 partid_max; + u8 pmg_max; + unsigned long ris_idxs[128 / BITS_PER_LONG]; + u32 ris_max; + + /* mpam_msc_ris of this component */ + struct list_head ris; + + /* + * part_sel_lock protects access to the MSC hardware registers that are + * affected by MPAMCFG_PART_SEL. (including the ID registers) + * If needed, take msc->lock first. + */ + spinlock_t part_sel_lock; + spinlock_t mon_sel_lock; + void __iomem * mapped_hwpage; + size_t mapped_hwpage_sz; +}; + +/* + * When we compact the supported features, we don't care what they are. + * Storing them as a bitmap makes life easy. + */ +typedef u32 mpam_features_t; + +/* Bits for mpam_features_t */ +enum mpam_device_features { + mpam_feat_ccap_part = 0, + mpam_feat_cpor_part, + mpam_feat_mbw_part, + mpam_feat_mbw_min, + mpam_feat_mbw_max, + mpam_feat_mbw_prop, + mpam_feat_intpri_part, + mpam_feat_intpri_part_0_low, + mpam_feat_dspri_part, + mpam_feat_dspri_part_0_low, + mpam_feat_msmon, + mpam_feat_msmon_csu, + mpam_feat_msmon_csu_capture, + /* + * Having mpam_feat_msmon_mbwu set doesn't mean the regular 31 bit MBWU + * counter would be used. The exact counter used is decided based on the + * status of mpam_feat_msmon_mbwu_l/mpam_feat_msmon_mbwu_lwd as well. + */ + mpam_feat_msmon_mbwu, + mpam_feat_msmon_mbwu_44counter, + mpam_feat_msmon_mbwu_63counter, + mpam_feat_msmon_mbwu_capture, + mpam_feat_msmon_mbwu_rwbw, + mpam_feat_msmon_capt, + mpam_feat_impl_msmon_mbwu, + mpam_feat_partid_nrw, + MPAM_FEATURE_LAST, +}; +#define MPAM_ALL_FEATURES ((1<features) +#define mpam_set_feature(_feat, x) ((x)->features |= (1<<_feat)) + +static inline void mpam_clear_feature(enum mpam_device_features feat, + mpam_features_t *supported) +{ + *supported &= ~(1<lock. + * Changes to reset_on_next_read, prev_val and correction are protected by the + * msc's mon_sel_lock. + */ +struct msmon_mbwu_state { + bool enabled; + bool reset_on_next_read; + struct mon_cfg cfg; + + /* The value last read from the hardware. Used to detect overflow. */ + u64 prev_val; + + /* + * The value to add to the new reading to account for power management, + * and shifts to trigger the overflow interrupt. + */ + u64 correction; +}; + +struct mpam_msc_ris { + u8 ris_idx; + u64 idr; + struct mpam_props props; + bool in_reset_state; + + cpumask_t affinity; + + /* member of mpam_component:ris */ + struct list_head comp_list; + + /* member of mpam_msc:ris */ + struct list_head msc_list; + + /* parents: */ + struct mpam_msc *msc; + struct mpam_component *comp; + + /* msmon mbwu configuration is preserved over reset */ + struct msmon_mbwu_state *mbwu_state; +}; + +struct mpam_resctrl_dom { + struct mpam_component *comp; + struct rdt_domain resctrl_dom; + + u32 mbm_local_evt_cfg; +}; + +struct mpam_resctrl_res { + struct mpam_class *class; + struct rdt_resource resctrl_res; +}; + +static inline int mpam_alloc_csu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_csu_mon, 0, cprops->num_csu_mon - 1, + GFP_KERNEL); +} + +static inline void mpam_free_csu_mon(struct mpam_class *class, int csu_mon) +{ + ida_free(&class->ida_csu_mon, csu_mon); +} + +static inline int mpam_alloc_mbwu_mon(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return -EOPNOTSUPP; + + return ida_alloc_range(&class->ida_mbwu_mon, 0, + cprops->num_mbwu_mon - 1, GFP_KERNEL); +} + +static inline void mpam_free_mbwu_mon(struct mpam_class *class, int mbwu_mon) +{ + ida_free(&class->ida_mbwu_mon, mbwu_mon); +} + +/* List of all classes */ +extern struct list_head mpam_classes; +extern struct srcu_struct mpam_srcu; + +/* System wide partid/pmg values */ +extern u16 mpam_partid_max; +extern u8 mpam_pmg_max; + +/* Scheduled work callback to enable mpam once all MSC have been probed */ +void mpam_enable(struct work_struct *work); +void mpam_disable(struct work_struct *work); + +void mpam_reset_class(struct mpam_class *class); + +int mpam_apply_config(struct mpam_component *comp, u16 partid, + struct mpam_config *cfg); + +int mpam_msmon_read(struct mpam_component *comp, struct mon_cfg *ctx, + enum mpam_device_features, u64 *val); +void mpam_msmon_reset_mbwu(struct mpam_component *comp, struct mon_cfg *ctx); +void mpam_msmon_reset_all_mbwu(struct mpam_component *comp); + +int mpam_resctrl_online_cpu(unsigned int cpu); +int mpam_resctrl_offline_cpu(unsigned int cpu); + +int mpam_resctrl_setup(void); +void mpam_resctrl_exit(void); + +/* + * MPAM MSCs have the following register layout. See: + * Arm Architecture Reference Manual Supplement - Memory System Resource + * Partitioning and Monitoring (MPAM), for Armv8-A. DDI 0598A.a + */ +#define MPAM_ARCHITECTURE_V1 0x10 +#define MPAM_MIN_MMIO_SIZE 0x3000 + +/* Memory mapped control pages: */ +/* ID Register offsets in the memory mapped page */ +#define MPAMF_IDR 0x0000 /* features id register */ +#define MPAMF_MSMON_IDR 0x0080 /* performance monitoring features */ +#define MPAMF_IMPL_IDR 0x0028 /* imp-def partitioning */ +#define MPAMF_CPOR_IDR 0x0030 /* cache-portion partitioning */ +#define MPAMF_CCAP_IDR 0x0038 /* cache-capacity partitioning */ +#define MPAMF_MBW_IDR 0x0040 /* mem-bw partitioning */ +#define MPAMF_PRI_IDR 0x0048 /* priority partitioning */ +#define MPAMF_CSUMON_IDR 0x0088 /* cache-usage monitor */ +#define MPAMF_MBWUMON_IDR 0x0090 /* mem-bw usage monitor */ +#define MPAMF_PARTID_NRW_IDR 0x0050 /* partid-narrowing */ +#define MPAMF_IIDR 0x0018 /* implementer id register */ +#define MPAMF_AIDR 0x0020 /* architectural id register */ + +/* Configuration and Status Register offsets in the memory mapped page */ +#define MPAMCFG_PART_SEL 0x0100 /* partid to configure: */ +#define MPAMCFG_CPBM 0x1000 /* cache-portion config */ +#define MPAMCFG_CMAX 0x0108 /* cache-capacity config */ +#define MPAMCFG_MBW_MIN 0x0200 /* min mem-bw config */ +#define MPAMCFG_MBW_MAX 0x0208 /* max mem-bw config */ +#define MPAMCFG_MBW_WINWD 0x0220 /* mem-bw accounting window config */ +#define MPAMCFG_MBW_PBM 0x2000 /* mem-bw portion bitmap config */ +#define MPAMCFG_PRI 0x0400 /* priority partitioning config */ +#define MPAMCFG_MBW_PROP 0x0500 /* mem-bw stride config */ +#define MPAMCFG_INTPARTID 0x0600 /* partid-narrowing config */ + +#define MSMON_CFG_MON_SEL 0x0800 /* monitor selector */ +#define MSMON_CFG_CSU_FLT 0x0810 /* cache-usage monitor filter */ +#define MSMON_CFG_CSU_CTL 0x0818 /* cache-usage monitor config */ +#define MSMON_CFG_MBWU_FLT 0x0820 /* mem-bw monitor filter */ +#define MSMON_CFG_MBWU_CTL 0x0828 /* mem-bw monitor config */ +#define MSMON_CSU 0x0840 /* current cache-usage */ +#define MSMON_CSU_CAPTURE 0x0848 /* last cache-usage value captured */ +#define MSMON_MBWU 0x0860 /* current mem-bw usage value */ +#define MSMON_MBWU_CAPTURE 0x0868 /* last mem-bw value captured */ +#define MSMON_MBWU_L 0x0880 /* current long mem-bw usage value */ +#define MSMON_MBWU_CAPTURE_L 0x0890 /* last long mem-bw value captured */ +#define MSMON_CAPT_EVNT 0x0808 /* signal a capture event */ +#define MPAMF_ESR 0x00F8 /* error status register */ +#define MPAMF_ECR 0x00F0 /* error control register */ + +/* MPAMF_IDR - MPAM features ID register */ +#define MPAMF_IDR_PARTID_MAX GENMASK(15, 0) +#define MPAMF_IDR_PMG_MAX GENMASK(23, 16) +#define MPAMF_IDR_HAS_CCAP_PART BIT(24) +#define MPAMF_IDR_HAS_CPOR_PART BIT(25) +#define MPAMF_IDR_HAS_MBW_PART BIT(26) +#define MPAMF_IDR_HAS_PRI_PART BIT(27) +#define MPAMF_IDR_HAS_EXT BIT(28) +#define MPAMF_IDR_HAS_IMPL_IDR BIT(29) +#define MPAMF_IDR_HAS_MSMON BIT(30) +#define MPAMF_IDR_HAS_PARTID_NRW BIT(31) +#define MPAMF_IDR_HAS_RIS BIT(32) +#define MPAMF_IDR_HAS_EXT_ESR BIT(38) +#define MPAMF_IDR_HAS_ESR BIT(39) +#define MPAMF_IDR_RIS_MAX GENMASK(59, 56) + + +/* MPAMF_MSMON_IDR - MPAM performance monitoring ID register */ +#define MPAMF_MSMON_IDR_MSMON_CSU BIT(16) +#define MPAMF_MSMON_IDR_MSMON_MBWU BIT(17) +#define MPAMF_MSMON_IDR_HAS_LOCAL_CAPT_EVNT BIT(31) + +/* MPAMF_CPOR_IDR - MPAM features cache portion partitioning ID register */ +#define MPAMF_CPOR_IDR_CPBM_WD GENMASK(15, 0) + +/* MPAMF_CCAP_IDR - MPAM features cache capacity partitioning ID register */ +#define MPAMF_CCAP_IDR_CMAX_WD GENMASK(5, 0) + +/* MPAMF_MBW_IDR - MPAM features memory bandwidth partitioning ID register */ +#define MPAMF_MBW_IDR_BWA_WD GENMASK(5, 0) +#define MPAMF_MBW_IDR_HAS_MIN BIT(10) +#define MPAMF_MBW_IDR_HAS_MAX BIT(11) +#define MPAMF_MBW_IDR_HAS_PBM BIT(12) +#define MPAMF_MBW_IDR_HAS_PROP BIT(13) +#define MPAMF_MBW_IDR_WINDWR BIT(14) +#define MPAMF_MBW_IDR_BWPBM_WD GENMASK(28, 16) + +/* MPAMF_PRI_IDR - MPAM features priority partitioning ID register */ +#define MPAMF_PRI_IDR_HAS_INTPRI BIT(0) +#define MPAMF_PRI_IDR_INTPRI_0_IS_LOW BIT(1) +#define MPAMF_PRI_IDR_INTPRI_WD GENMASK(9, 4) +#define MPAMF_PRI_IDR_HAS_DSPRI BIT(16) +#define MPAMF_PRI_IDR_DSPRI_0_IS_LOW BIT(17) +#define MPAMF_PRI_IDR_DSPRI_WD GENMASK(25, 20) + +/* MPAMF_CSUMON_IDR - MPAM cache storage usage monitor ID register */ +#define MPAMF_CSUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_CSUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_MBWUMON_IDR - MPAM memory bandwidth usage monitor ID register */ +#define MPAMF_MBWUMON_IDR_NUM_MON GENMASK(15, 0) +#define MPAMF_MBWUMON_IDR_HAS_RWBW BIT(28) +#define MPAMF_MBWUMON_IDR_LWD BIT(29) +#define MPAMF_MBWUMON_IDR_HAS_LONG BIT(30) +#define MPAMF_MBWUMON_IDR_HAS_CAPTURE BIT(31) + +/* MPAMF_PARTID_NRW_IDR - MPAM PARTID narrowing ID register */ +#define MPAMF_PARTID_NRW_IDR_INTPARTID_MAX GENMASK(15, 0) + +/* MPAMF_IIDR - MPAM implementation ID register */ +#define MPAMF_IIDR_PRODUCTID GENMASK(31, 20) +#define MPAMF_IIDR_PRODUCTID_SHIFT 20 +#define MPAMF_IIDR_VARIANT GENMASK(19, 16) +#define MPAMF_IIDR_VARIANT_SHIFT 16 +#define MPAMF_IIDR_REVISON GENMASK(15, 12) +#define MPAMF_IIDR_REVISON_SHIFT 12 +#define MPAMF_IIDR_IMPLEMENTER GENMASK(11, 0) +#define MPAMF_IIDR_IMPLEMENTER_SHIFT 0 + +/* MPAMF_AIDR - MPAM architecture ID register */ +#define MPAMF_AIDR_ARCH_MAJOR_REV GENMASK(7, 4) +#define MPAMF_AIDR_ARCH_MINOR_REV GENMASK(3, 0) + +/* MPAMCFG_PART_SEL - MPAM partition configuration selection register */ +#define MPAMCFG_PART_SEL_PARTID_SEL GENMASK(15, 0) +#define MPAMCFG_PART_SEL_INTERNAL BIT(16) +#define MPAMCFG_PART_SEL_RIS GENMASK(27, 24) + +/* MPAMCFG_CMAX - MPAM cache portion bitmap partition configuration register */ +#define MPAMCFG_CMAX_CMAX GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MIN - MPAM memory minimum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MIN_MIN GENMASK(15, 0) + +/* + * MPAMCFG_MBW_MAX - MPAM memory maximum bandwidth partitioning configuration + * register + */ +#define MPAMCFG_MBW_MAX_MAX GENMASK(15, 0) +#define MPAMCFG_MBW_MAX_HARDLIM BIT(31) + +/* + * MPAMCFG_MBW_WINWD - MPAM memory bandwidth partitioning window width + * register + */ +#define MPAMCFG_MBW_WINWD_US_FRAC GENMASK(7, 0) +#define MPAMCFG_MBW_WINWD_US_INT GENMASK(23, 8) + + +/* MPAMCFG_PRI - MPAM priority partitioning configuration register */ +#define MPAMCFG_PRI_INTPRI GENMASK(15, 0) +#define MPAMCFG_PRI_DSPRI GENMASK(31, 16) + +/* + * MPAMCFG_MBW_PROP - Memory bandwidth proportional stride partitioning + * configuration register + */ +#define MPAMCFG_MBW_PROP_STRIDEM1 GENMASK(15, 0) +#define MPAMCFG_MBW_PROP_EN BIT(31) + +/* + * MPAMCFG_INTPARTID - MPAM internal partition narrowing configuration register + */ +#define MPAMCFG_INTPARTID_INTPARTID GENMASK(15, 0) +#define MPAMCFG_INTPARTID_INTERNAL BIT(16) + +/* MSMON_CFG_MON_SEL - Memory system performance monitor selection register */ +#define MSMON_CFG_MON_SEL_MON_SEL GENMASK(7, 0) +#define MSMON_CFG_MON_SEL_RIS GENMASK(27, 24) + +/* MPAMF_ESR - MPAM Error Status Register */ +#define MPAMF_ESR_PARTID_OR_MON GENMASK(15, 0) +#define MPAMF_ESR_PMG GENMASK(23, 16) +#define MPAMF_ESR_ERRCODE GENMASK(27, 24) +#define MPAMF_ESR_OVRWR BIT(31) +#define MPAMF_ESR_RIS GENMASK(35, 32) + +/* MPAMF_ECR - MPAM Error Control Register */ +#define MPAMF_ECR_INTEN BIT(0) + +/* Error conditions in accessing memory mapped registers */ +#define MPAM_ERRCODE_NONE 0 +#define MPAM_ERRCODE_PARTID_SEL_RANGE 1 +#define MPAM_ERRCODE_REQ_PARTID_RANGE 2 +#define MPAM_ERRCODE_MSMONCFG_ID_RANGE 3 +#define MPAM_ERRCODE_REQ_PMG_RANGE 4 +#define MPAM_ERRCODE_MONITOR_RANGE 5 +#define MPAM_ERRCODE_INTPARTID_RANGE 6 +#define MPAM_ERRCODE_UNEXPECTED_INTERNAL 7 + +/* + * MSMON_CFG_CSU_FLT - Memory system performance monitor configure cache storage + * usage monitor filter register + */ +#define MSMON_CFG_CSU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_CSU_FLT_PMG GENMASK(23, 16) + +/* + * MSMON_CFG_CSU_CTL - Memory system performance monitor configure cache storage + * usage monitor control register + * MSMON_CFG_MBWU_CTL - Memory system performance monitor configure memory + * bandwidth usage monitor control register + */ +#define MSMON_CFG_x_CTL_TYPE GENMASK(7, 0) +#define MSMON_CFG_x_CTL_MATCH_PARTID BIT(16) +#define MSMON_CFG_x_CTL_MATCH_PMG BIT(17) +#define MSMON_CFG_x_CTL_SCLEN BIT(19) +#define MSMON_CFG_x_CTL_SUBTYPE GENMASK(23, 20) +#define MSMON_CFG_x_CTL_OFLOW_FRZ BIT(24) +#define MSMON_CFG_x_CTL_OFLOW_INTR BIT(25) +#define MSMON_CFG_x_CTL_OFLOW_STATUS BIT(26) +#define MSMON_CFG_x_CTL_CAPT_RESET BIT(27) +#define MSMON_CFG_x_CTL_CAPT_EVNT GENMASK(30, 28) +#define MSMON_CFG_x_CTL_EN BIT(31) + +#define MSMON_CFG_MBWU_CTL_TYPE_MBWU 0x42 +#define MSMON_CFG_MBWU_CTL_TYPE_CSU 0x43 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_NONE 0 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_READ 1 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_WRITE 2 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_BOTH 3 + +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MAX 3 +#define MSMON_CFG_MBWU_CTL_SUBTYPE_MASK 0x3 + +/* + * MSMON_CFG_MBWU_FLT - Memory system performance monitor configure memory + * bandwidth usage monitor filter register + */ +#define MSMON_CFG_MBWU_FLT_PARTID GENMASK(15, 0) +#define MSMON_CFG_MBWU_FLT_PMG GENMASK(23, 16) +#define MSMON_CFG_MBWU_FLT_RWBW GENMASK(31, 30) + +/* + * MSMON_CSU - Memory system performance monitor cache storage usage monitor + * register + * MSMON_CSU_CAPTURE - Memory system performance monitor cache storage usage + * capture register + * MSMON_MBWU - Memory system performance monitor memory bandwidth usage + * monitor register + * MSMON_MBWU_CAPTURE - Memory system performance monitor memory bandwidth usage + * capture register + */ +#define MSMON___VALUE GENMASK(30, 0) +#define MSMON___NRDY BIT(31) +#define MSMON___NRDY_L BIT(63) +#define MSMON___L_VALUE GENMASK(43, 0) +#define MSMON___LWD_VALUE GENMASK(62, 0) + +/* + * MSMON_CAPT_EVNT - Memory system performance monitoring capture event + * generation register + */ +#define MSMON_CAPT_EVNT_NOW BIT(0) + +/* Used for PTG Yitian710 specific MB monitoring feature */ +#define MBWU_MASK GENMASK(23, 0) +#define MBWU_WINWD_MAX GENMASK(22, 0) +#define MBWU_GET(v) ((v) & MBWU_MASK) +#define MPAMF_CUST_MBWC_OFFSET 0x08 +#define MPAMF_CUST_WINDW_OFFSET 0x0C + +#endif /* MPAM_INTERNAL_H */ diff --git a/drivers/platform/mpam/mpam_resctrl.c b/drivers/platform/mpam/mpam_resctrl.c new file mode 100644 index 0000000000000000000000000000000000000000..98b3b1baa91e1319289e6a2050e08519f3d84e1b --- /dev/null +++ b/drivers/platform/mpam/mpam_resctrl.c @@ -0,0 +1,1267 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#define pr_fmt(fmt) "mpam: resctrl: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mpam_internal.h" + +u64 mpam_resctrl_default_group; + +DECLARE_WAIT_QUEUE_HEAD(resctrl_mon_ctx_waiters); + +/* + * The classes we've picked to map to resctrl resources. + * Class pointer may be NULL. + */ +static struct mpam_resctrl_res mpam_resctrl_exports[RDT_NUM_RESOURCES]; + +static bool exposed_alloc_capable; +static bool exposed_mon_capable; +static struct mpam_class *mbm_local_class; +static struct mpam_class *mbm_total_class; +static struct mpam_class *mbm_bps_class; + +/* + * MPAM emulates CDP by setting different PARTID in the I/D fields of MPAM1_EL1. + * This applies globally to all traffic the CPU generates. + */ +static bool cdp_enabled; + +/* + * If resctrl_init() succeeded, resctrl_exit() can be used to remove support + * for the filesystem in the event of an error. + */ +static bool resctrl_enabled; + +/* + * mpam_resctrl_pick_caches() needs to know the size of the caches. cacheinfo + * populates this from a device_initcall(). mpam_resctrl_setup() must wait. + */ +static bool cacheinfo_ready; +static DECLARE_WAIT_QUEUE_HEAD(wait_cacheinfo_ready); + +/* A dummy mon context to use when the monitors were allocated up front */ +u32 __mon_is_rmid_idx = USE_RMID_IDX; +void *mon_is_rmid_idx = &__mon_is_rmid_idx; + +bool resctrl_arch_alloc_capable(void) +{ + return exposed_alloc_capable; +} + +bool resctrl_arch_mon_capable(void) +{ + return exposed_mon_capable; +} + +bool resctrl_arch_is_mbm_local_enabled(void) +{ + return mbm_local_class; +} + +bool resctrl_arch_is_mbm_total_enabled(void) +{ + return mbm_total_class; +} + +bool resctrl_arch_is_mbm_bps_enabled(void) +{ + return mbm_bps_class; +} + +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level rid) +{ + switch (rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + return cdp_enabled; + case RDT_RESOURCE_MBA: + default: + /* + * x86's MBA control doesn't support CDP, so user-space doesn't + * expect it. + */ + return false; + } +} + +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable) +{ + u64 regval; + u32 partid, partid_i, partid_d; + + cdp_enabled = enable; + + partid = RESCTRL_RESERVED_CLOSID; + + if (enable) { + partid_d = resctrl_get_config_index(partid, CDP_CODE); + partid_i = resctrl_get_config_index(partid, CDP_DATA); + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid_d) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid_i); + + } else { + regval = FIELD_PREP(MPAM_SYSREG_PARTID_D, partid) | + FIELD_PREP(MPAM_SYSREG_PARTID_I, partid); + } + + WRITE_ONCE(mpam_resctrl_default_group, regval); + + return 0; +} + +static bool mpam_resctrl_hide_cdp(enum resctrl_res_level rid) +{ + return cdp_enabled && !resctrl_arch_get_cdp_enabled(rid); +} + +/* + * MSC may raise an error interrupt if it sees an out or range partid/pmg, + * and go on to truncate the value. Regardless of what the hardware supports, + * only the system wide safe value is safe to use. + */ +u32 resctrl_arch_get_num_closid(struct rdt_resource *ignored) +{ + return mpam_partid_max + 1; +} + +u32 resctrl_arch_system_num_rmid_idx(void) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 num_partid = resctrl_arch_get_num_closid(NULL); + + return num_partid << closid_shift; +} + +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + return (closid << closid_shift) | rmid; +} + +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid) +{ + u8 closid_shift = fls(mpam_pmg_max); + u32 pmg_mask = ~(~0 << closid_shift); + + if (WARN_ON_ONCE(closid_shift > 8)) + closid_shift = 8; + + *closid = idx >> closid_shift; + *rmid = idx & pmg_mask; +} + +void resctrl_arch_sched_in(struct task_struct *tsk) +{ + lockdep_assert_preemption_disabled(); + + mpam_thread_switch(tsk); +} + +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg) +{ + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(pmg > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_cpu_defaults(cpu, closid, closid, pmg, pmg); + } else { + /* + * When CDP is enabled, resctrl halves the closid range and we + * use odd/even partid for one closid. + */ + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_cpu_defaults(cpu, partid_d, partid_i, pmg, pmg); + } +} + +void resctrl_arch_sync_cpu_defaults(void *info) +{ + struct resctrl_cpu_sync *r = info; + + lockdep_assert_preemption_disabled(); + + if (r) { + resctrl_arch_set_cpu_default_closid_rmid(smp_processor_id(), + r->closid, r->rmid); + } + + resctrl_arch_sched_in(current); +} + +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + + + if (WARN_ON_ONCE(closid > U16_MAX) || WARN_ON_ONCE(rmid > U8_MAX)) + return; + + if (!cdp_enabled) { + mpam_set_task_partid_pmg(tsk, closid, closid, rmid, rmid); + } else { + u32 partid_d = resctrl_get_config_index(closid, CDP_DATA); + u32 partid_i = resctrl_get_config_index(closid, CDP_CODE); + + mpam_set_task_partid_pmg(tsk, partid_d, partid_i, rmid, rmid); + } +} + +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return tsk_closid == closid; +} + +/* The task's pmg is not unique, the partid must be considered too */ +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid) +{ + u64 regval = mpam_get_regval(tsk); + u32 tsk_closid = FIELD_GET(MPAM_SYSREG_PARTID_D, regval); + u32 tsk_rmid = FIELD_GET(MPAM_SYSREG_PMG_D, regval); + + if (cdp_enabled) + tsk_closid >>= 1; + + return (tsk_closid == closid) && (tsk_rmid == rmid); +} + +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l) +{ + if (l >= RDT_NUM_RESOURCES) + return NULL; + + return &mpam_resctrl_exports[l].resctrl_res; +} + +static void *resctrl_arch_mon_ctx_alloc_no_wait(struct rdt_resource *r, + int evtid) +{ + struct mpam_resctrl_res *res; + u32 *ret = kmalloc(sizeof(*ret), GFP_KERNEL); + + if (!ret) + return ERR_PTR(-ENOMEM); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + *ret = mpam_alloc_csu_mon(res->class); + return ret; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + return mon_is_rmid_idx; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + return mon_is_rmid_idx; + return ERR_PTR(-EOPNOTSUPP); + } + + return ERR_PTR(-EOPNOTSUPP); +} + +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid) +{ + DEFINE_WAIT(wait); + void *ret; + + might_sleep(); + + do { + prepare_to_wait(&resctrl_mon_ctx_waiters, &wait, + TASK_INTERRUPTIBLE); + ret = resctrl_arch_mon_ctx_alloc_no_wait(r, evtid); + if (PTR_ERR(ret) == -ENOSPC) + schedule(); + } while (PTR_ERR(ret) == -ENOSPC && !signal_pending(current)); + finish_wait(&resctrl_mon_ctx_waiters, &wait); + + return ret; +} + +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, + void *arch_mon_ctx) +{ + struct mpam_resctrl_res *res; + u32 mon = *(u32 *)arch_mon_ctx; + + if (mon == USE_RMID_IDX) + return; + kfree(arch_mon_ctx); + arch_mon_ctx = NULL; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + + switch (evtid) { + case QOS_L3_OCCUP_EVENT_ID: + mpam_free_csu_mon(res->class, mon); + wake_up(&resctrl_mon_ctx_waiters); + return; + case QOS_L3_MBM_TOTAL_EVENT_ID: + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_MC_MBM_BPS_EVENT_ID: + return; + } +} + +static enum mon_filter_options resctrl_evt_config_to_mpam(u32 local_evt_cfg) +{ + switch (local_evt_cfg) { + case READS_TO_LOCAL_MEM: + return COUNT_READ; + case NON_TEMP_WRITE_TO_LOCAL_MEM: + return COUNT_WRITE; + default: + return COUNT_BOTH; + } +} + +int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx) +{ + int err; + u64 cdp_val; + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + u32 mon = *(u32 *)arch_mon_ctx; + enum mpam_device_features type; + + resctrl_arch_rmid_read_context_check(); + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + switch (eventid) { + case QOS_L3_OCCUP_EVENT_ID: + type = mpam_feat_msmon_csu; + break; + case QOS_L3_MBM_LOCAL_EVENT_ID: + case QOS_L3_MBM_TOTAL_EVENT_ID: + type = mpam_feat_msmon_mbwu; + break; + case QOS_MC_MBM_BPS_EVENT_ID: + if (mpam_current_machine == MPAM_YITIAN710) + type = mpam_feat_impl_msmon_mbwu; + break; + default: + return -EINVAL; + } + + if (mon == USE_RMID_IDX) + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + else + cfg.mon = mon; + + cfg.match_pmg = true; + cfg.pmg = rmid; + cfg.opts = resctrl_evt_config_to_mpam(dom->mbm_local_evt_cfg); + + if (cdp_enabled) { + cfg.partid = closid << 1; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + if (err) + return err; + + cfg.partid += 1; + err = mpam_msmon_read(dom->comp, &cfg, type, &cdp_val); + if (!err) + *val += cdp_val; + } else { + cfg.partid = closid; + err = mpam_msmon_read(dom->comp, &cfg, type, val); + } + + return err; +} + +void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid, enum resctrl_event_id eventid) +{ + struct mon_cfg cfg; + struct mpam_resctrl_dom *dom; + + if (eventid != QOS_L3_MBM_LOCAL_EVENT_ID) + return; + + cfg.mon = resctrl_arch_rmid_idx_encode(closid, rmid); + cfg.match_pmg = true; + cfg.pmg = rmid; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cdp_enabled) { + cfg.partid = closid << 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + + cfg.partid += 1; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } else { + cfg.partid = closid; + mpam_msmon_reset_mbwu(dom->comp, &cfg); + } +} + +/* + * The rmid realloc threshold should be for the smallest cache exposed to + * resctrl. + */ +static void update_rmid_limits(unsigned int size) +{ + u32 num_unique_pmg = resctrl_arch_system_num_rmid_idx(); + + if (WARN_ON_ONCE(!size)) + return; + + if (resctrl_rmid_realloc_limit && size > resctrl_rmid_realloc_limit) + return; + + resctrl_rmid_realloc_limit = size; + resctrl_rmid_realloc_threshold = size / num_unique_pmg; +} + +static bool cache_has_usable_cpor(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_cpor_part, cprops)) + return false; + + /* TODO: Scaling is not yet supported */ + return (class->props.cpbm_wd <= RESCTRL_MAX_CBM); +} + +static bool cache_has_usable_csu(struct mpam_class *class) +{ + struct mpam_props *cprops; + + if (!class) + return false; + + cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_csu, cprops)) + return false; + + /* + * CSU counters settle on the value, so we can get away with + * having only one. + */ + if (!cprops->num_csu_mon) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +bool resctrl_arch_is_llc_occupancy_enabled(void) +{ + return cache_has_usable_csu(mpam_resctrl_exports[RDT_RESOURCE_L3].class); +} + +static bool class_has_usable_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_msmon_mbwu, cprops)) + return false; + + /* + * resctrl expects the bandwidth counters to be free running, + * which means we need as many monitors as resctrl has + * control/monitor groups. + */ + if (cprops->num_mbwu_mon < resctrl_arch_system_num_rmid_idx()) + return false; + + return (mpam_partid_max > 1) || (mpam_pmg_max != 0); +} + +static bool class_has_usable_impl_mbwu(struct mpam_class *class) +{ + struct mpam_props *cprops = &class->props; + + if (!mpam_has_feature(mpam_feat_impl_msmon_mbwu, cprops)) + return false; + + return true; +} + +static bool mba_class_use_mbw_part(struct mpam_props *cprops) +{ + /* TODO: Scaling is not yet supported */ + return (mpam_has_feature(mpam_feat_mbw_part, cprops) && + cprops->mbw_pbm_bits < MAX_MBA_BW); +} + +static bool class_has_usable_mba(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops) || + mpam_has_feature(mpam_feat_mbw_max, cprops)) + return true; + + return false; +} + +/* + * Calculate the percentage change from each implemented bit in the control + * This can return 0 when BWA_WD is greater than 6. (100 / (1<<7) == 0) + */ +static u32 get_mba_granularity(struct mpam_props *cprops) +{ + if (mba_class_use_mbw_part(cprops)) { + return MAX_MBA_BW / cprops->mbw_pbm_bits; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + /* + * bwa_wd is the number of bits implemented in the 0.xxx + * fixed point fraction. 1 bit is 50%, 2 is 25% etc. + */ + return max_t(u32, 1, (MAX_MBA_BW / BIT(cprops->bwa_wd))); + } + + return 0; +} + +static u32 mbw_pbm_to_percent(unsigned long mbw_pbm, struct mpam_props *cprops) +{ + u32 bit, result = 0, granularity = get_mba_granularity(cprops); + + for_each_set_bit(bit, &mbw_pbm, cprops->mbw_pbm_bits % 32) { + result += granularity; + } + + return result; +} + +static u32 mbw_max_to_percent(u16 mbw_max, struct mpam_props *cprops) +{ + u8 bit; + u32 divisor = 2, value = 0; + + for (bit = 15; bit; bit--) { + if (mbw_max & BIT(bit)) + /* + * Left shift by 16 bits to preserve the precision of + * the division operation. + */ + value += (MAX_MBA_BW << 16) / divisor; + divisor <<= 1; + } + + /* Use the upper bound of the fixed-point fraction. */ + value = (value + (MAX_MBA_BW << (16 - cprops->bwa_wd))) >> 16; + + return value; +} + +static u32 percent_to_mbw_pbm(u8 pc, struct mpam_props *cprops) +{ + u32 granularity = get_mba_granularity(cprops); + u8 num_bits = pc / granularity; + + if (!num_bits) + return 0; + + /* TODO: pick bits at random to avoid contention */ + return (1 << num_bits) - 1; +} + +static u16 percent_to_mbw_max(u8 pc, struct mpam_props *cprops) +{ + u8 bit; + u32 granularity, pc_ls, divisor = 2, value = 0; + + if (WARN_ON_ONCE(cprops->bwa_wd > 15)) + return MAX_MBA_BW; + + /* Set the pc value to be a multiple of granularity. */ + granularity = get_mba_granularity(cprops); + pc = roundup(pc, (u8) granularity); + if (pc > 100) + pc = 100; + + /* + * Left shift by 16 bits to preserve the precision of the division + * operation. + */ + pc_ls = (u32) pc << 16; + + for (bit = 15; bit; bit--) { + if (pc_ls >= (MAX_MBA_BW << 16) / divisor) { + pc_ls -= (MAX_MBA_BW << 16) / divisor; + value |= BIT(bit); + } + divisor <<= 1; + + if (!pc_ls || !((MAX_MBA_BW << 16) / divisor)) + break; + } + + value &= GENMASK(15, 15 - cprops->bwa_wd + 1); + + return value; +} + +/* Test whether we can export MPAM_CLASS_CACHE:{2,3}? */ +static void mpam_resctrl_pick_caches(void) +{ + int idx; + unsigned int cache_size; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + bool has_cpor = cache_has_usable_cpor(class); + + if (class->type != MPAM_CLASS_CACHE) { + pr_debug("pick_caches: Class is not a cache\n"); + continue; + } + + if (class->level != 2 && class->level != 3) { + pr_debug("pick_caches: not L2 or L3\n"); + continue; + } + + if (class->level == 2 && !has_cpor) { + pr_debug("pick_caches: L2 missing CPOR\n"); + continue; + } else if (!has_cpor && !cache_has_usable_csu(class)) { + pr_debug("pick_caches: Cache misses CPOR and CSU\n"); + continue; + } + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) { + pr_debug("pick_caches: Class has missing CPUs\n"); + continue; + } + + /* Assume cache levels are the same size for all CPUs... */ + cache_size = get_cpu_cacheinfo_size(smp_processor_id(), class->level); + if (!cache_size) { + pr_debug("pick_caches: Could not read cache size\n"); + continue; + } + + if (mpam_has_feature(mpam_feat_msmon_csu, cprops)) + update_rmid_limits(cache_size); + + if (class->level == 2) { + res = &mpam_resctrl_exports[RDT_RESOURCE_L2]; + res->resctrl_res.name = "L2"; + } else { + res = &mpam_resctrl_exports[RDT_RESOURCE_L3]; + res->resctrl_res.name = "L3"; + } + res->class = class; + } + srcu_read_unlock(&mpam_srcu, idx); +} + +static void mpam_resctrl_pick_mba(void) +{ + struct mpam_class *class, *candidate_class = NULL; + struct mpam_resctrl_res *res; + int idx; + + lockdep_assert_cpus_held(); + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) { + struct mpam_props *cprops = &class->props; + + if (class->level < 3) + continue; + + if (!class_has_usable_mba(cprops)) + continue; + + if (!cpumask_equal(&class->affinity, cpu_possible_mask)) + continue; + + /* + * mba_sc reads the mbm_local counter, and waggles the MBA controls. + * mbm_local is implicitly part of the L3, pick a resouce to be MBA + * that as close as possible to the L3. + */ + if (!candidate_class || class->level < candidate_class->level) + candidate_class = class; + } + srcu_read_unlock(&mpam_srcu, idx); + + if (candidate_class) { + res = &mpam_resctrl_exports[RDT_RESOURCE_MBA]; + res->class = candidate_class; + res->resctrl_res.name = "MB"; + } +} + +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt) +{ + struct mpam_props *cprops; + + switch (evt) { + case QOS_L3_MBM_LOCAL_EVENT_ID: + if (!mbm_local_class) + return false; + cprops = &mbm_local_class->props; + + return mpam_has_feature(mpam_feat_msmon_mbwu_rwbw, cprops); + default: + return false; + } +} + +void resctrl_arch_mon_event_config_read(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + mon_info->mon_config = dom->mbm_local_evt_cfg & MAX_EVT_CONFIG_BITS; +} + +void resctrl_arch_mon_event_config_write(void *info) +{ + struct mpam_resctrl_dom *dom; + struct resctrl_mon_config_info *mon_info = info; + + if (mon_info->mon_config & ~MPAM_RESTRL_EVT_CONFIG_VALID) { + mon_info->err = -EOPNOTSUPP; + return; + } + + dom = container_of(mon_info->d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = mon_info->mon_config & MPAM_RESTRL_EVT_CONFIG_VALID; +} + +void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d) +{ + struct mpam_resctrl_dom *dom; + + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + mpam_msmon_reset_all_mbwu(dom->comp); +} + +static int mpam_resctrl_resource_init(struct mpam_resctrl_res *res) +{ + struct mpam_class *class = res->class; + struct rdt_resource *r = &res->resctrl_res; + bool has_mbwu = class_has_usable_mbwu(class); + + /* Is this one of the two well-known caches? */ + if (res->resctrl_res.rid == RDT_RESOURCE_L2 || + res->resctrl_res.rid == RDT_RESOURCE_L3) { + bool has_csu = cache_has_usable_csu(class); + + r->cache_level = class->level; + + /* TODO: Scaling is not yet supported */ + r->cache.cbm_len = class->props.cpbm_wd; + r->cache.arch_has_sparse_bitmasks = true; + + /* mpam_devices will reject empty bitmaps */ + r->cache.min_cbm_bits = 1; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%0*x"; + r->fflags = RFTYPE_RES_CACHE; + r->default_ctrl = BIT_MASK(class->props.cpbm_wd) - 1; + r->data_width = (class->props.cpbm_wd + 3) / 4; + + /* + * Which bits are shared with other ...things... + * Unknown devices use partid-0 which uses all the bitmap + * fields. Until we configured the SMMU and GIC not to do this + * 'all the bits' is the correct answer here. + */ + r->cache.shareable_bits = r->default_ctrl; + + if (mpam_has_feature(mpam_feat_cpor_part, &class->props)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + /* + * MBWU counters may be 'local' or 'total' depending on where + * they are in the topology. Counters on caches are assumed to + * be local. If it's on the memory controller, its assumed to + * be global. + */ + if (has_mbwu && class->level >= 3) { + mbm_local_class = class; + r->mon_capable = true; + } + + /* + * CSU counters only make sense on a cache. The file is called + * llc_occupancy, but its expected to the on the L3. + */ + if (has_csu && class->type == MPAM_CLASS_CACHE && + class->level == 3) { + r->mon_capable = true; + } + } else if (res->resctrl_res.rid == RDT_RESOURCE_MBA) { + struct mpam_props *cprops = &class->props; + + /* TODO: kill these properties off as they are derivatives */ + r->format_str = "%d=%*u"; + r->fflags = RFTYPE_RES_MB; + r->default_ctrl = MAX_MBA_BW; + r->data_width = 3; + + r->membw.delay_linear = true; + r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED; + r->membw.bw_gran = get_mba_granularity(cprops); + r->membw.min_bw = r->membw.bw_gran; + + /* Round up to at least 1% */ + if (!r->membw.bw_gran) + r->membw.bw_gran = 1; + + if (class_has_usable_mba(cprops)) { + r->alloc_capable = true; + exposed_alloc_capable = true; + } + + if (has_mbwu && class->type == MPAM_CLASS_MEMORY) { + mbm_total_class = class; + r->mon_capable = true; + } else if (class_has_usable_impl_mbwu(class)) { + r->mon_capable = true; + if (mpam_current_machine == MPAM_YITIAN710) + mbm_bps_class = class; + } + } + + if (r->mon_capable) { + exposed_mon_capable = true; + + /* + * Unfortunately, num_rmid doesn't mean anything for + * mpam, and its exposed to user-space! + * num-rmid is supposed to mean the number of groups + * that can be created, both control or monitor groups. + * For mpam, each control group has its own pmg/rmid + * space. + */ + r->num_rmid = 1; + } + + return 0; +} + +int mpam_resctrl_setup(void) +{ + int err = 0; + struct mpam_resctrl_res *res; + enum resctrl_res_level i; + + wait_event(wait_cacheinfo_ready, cacheinfo_ready); + + cpus_read_lock(); + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + INIT_LIST_HEAD(&res->resctrl_res.domains); + INIT_LIST_HEAD(&res->resctrl_res.evt_list); + res->resctrl_res.rid = i; + } + + mpam_resctrl_pick_caches(); + mpam_resctrl_pick_mba(); + /* TODO: mpam_resctrl_pick_counters(); */ + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + if (!res->class) + continue; // dummy resource + + err = mpam_resctrl_resource_init(res); + if (err) + break; + } + cpus_read_unlock(); + + if (!err && !exposed_alloc_capable && !exposed_mon_capable) + err = -EOPNOTSUPP; + + if (!err) { + if (!is_power_of_2(mpam_pmg_max + 1)) { + /* + * If not all the partid*pmg values are valid indexes, + * resctrl may allocate pmg that don't exist. This + * should cause an error interrupt. + */ + pr_warn("Number of PMG is not a power of 2! resctrl may misbehave"); + } + + err = resctrl_init(); + if (!err) + WRITE_ONCE(resctrl_enabled, true); + } + + return err; +} + +void mpam_resctrl_exit(void) +{ + if (!READ_ONCE(resctrl_enabled)) + return; + + WRITE_ONCE(resctrl_enabled, false); + resctrl_exit(); +} + +u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type type) +{ + u32 partid; + struct mpam_config *cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + enum mpam_device_features configured_by; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return r->default_ctrl; + + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + if (mpam_resctrl_hide_cdp(r->rid)) + partid = resctrl_get_config_index(closid, CDP_CODE); + else + partid = resctrl_get_config_index(closid, type); + cfg = &dom->comp->cfg[partid]; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + configured_by = mpam_feat_cpor_part; + break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + configured_by = mpam_feat_mbw_part; + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + configured_by = mpam_feat_mbw_max; + break; + } + fallthrough; + default: + return -EINVAL; + } + + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r) || + !mpam_has_feature(configured_by, cfg)) + return r->default_ctrl; + + switch (configured_by) { + case mpam_feat_cpor_part: + /* TODO: Scaling is not yet supported */ + return cfg->cpbm; + case mpam_feat_mbw_part: + /* TODO: Scaling is not yet supported */ + return mbw_pbm_to_percent(cfg->mbw_pbm, cprops); + case mpam_feat_mbw_max: + return mbw_max_to_percent(cfg->mbw_max, cprops); + default: + return -EINVAL; + } +} + +int resctrl_arch_update_one(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, enum resctrl_conf_type t, u32 cfg_val) +{ + int err; + u32 partid; + struct mpam_config cfg; + struct mpam_props *cprops; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + /* + * NOTE: don't check the CPU as mpam_apply_config() doesn't care, + * and resctrl_arch_update_domains() depends on this. + */ + res = container_of(r, struct mpam_resctrl_res, resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + cprops = &res->class->props; + + partid = resctrl_get_config_index(closid, t); + if (!r->alloc_capable || partid >= resctrl_arch_get_num_closid(r)) + return -EINVAL; + + switch (r->rid) { + case RDT_RESOURCE_L2: + case RDT_RESOURCE_L3: + /* TODO: Scaling is not yet supported */ + cfg.cpbm = cfg_val; + mpam_set_feature(mpam_feat_cpor_part, &cfg); + break; + case RDT_RESOURCE_MBA: + if (mba_class_use_mbw_part(cprops)) { + cfg.mbw_pbm = percent_to_mbw_pbm(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_part, &cfg); + break; + } else if (mpam_has_feature(mpam_feat_mbw_max, cprops)) { + cfg.mbw_max = percent_to_mbw_max(cfg_val, cprops); + mpam_set_feature(mpam_feat_mbw_max, &cfg); + break; + } + fallthrough; + default: + return -EINVAL; + } + + /* + * When CDP is enabled, but the resource doesn't support it, we need to + * apply the same configuration to the other partid. + */ + if (mpam_resctrl_hide_cdp(r->rid)) { + partid = resctrl_get_config_index(closid, CDP_CODE); + err = mpam_apply_config(dom->comp, partid, &cfg); + if (err) + return err; + + partid = resctrl_get_config_index(closid, CDP_DATA); + return mpam_apply_config(dom->comp, partid, &cfg); + + } else { + return mpam_apply_config(dom->comp, partid, &cfg); + } +} + +/* TODO: this is IPI heavy */ +int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid) +{ + int err = 0; + struct rdt_domain *d; + enum resctrl_conf_type t; + struct resctrl_staged_config *cfg; + + lockdep_assert_cpus_held(); + lockdep_assert_irqs_enabled(); + + list_for_each_entry(d, &r->domains, list) { + for (t = 0; t < CDP_NUM_TYPES; t++) { + cfg = &d->staged_config[t]; + if (!cfg->have_new_ctrl) + continue; + + err = resctrl_arch_update_one(r, d, closid, t, + cfg->new_ctrl); + if (err) + return err; + } + } + + return err; +} + +void resctrl_arch_reset_resources(void) +{ + int i, idx; + struct mpam_class *class; + struct mpam_resctrl_res *res; + + lockdep_assert_cpus_held(); + + if (!mpam_is_enabled()) + return; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + if (!res->resctrl_res.alloc_capable) + continue; + + idx = srcu_read_lock(&mpam_srcu); + list_for_each_entry_rcu(class, &mpam_classes, classes_list) + mpam_reset_class(class); + srcu_read_unlock(&mpam_srcu, idx); + } +} + +static struct mpam_resctrl_dom * +mpam_resctrl_alloc_domain(unsigned int cpu, struct mpam_resctrl_res *res) +{ + struct mpam_resctrl_dom *dom; + struct mpam_class *class = res->class; + struct mpam_component *comp_iter, *comp; + + comp = NULL; + list_for_each_entry(comp_iter, &class->components, class_list) { + if (cpumask_test_cpu(cpu, &comp_iter->affinity)) { + comp = comp_iter; + break; + } + } + + /* cpu with unknown exported component? */ + if (WARN_ON_ONCE(!comp)) + return ERR_PTR(-EINVAL); + + dom = kzalloc_node(sizeof(*dom), GFP_KERNEL, cpu_to_node(cpu)); + if (!dom) + return ERR_PTR(-ENOMEM); + + dom->comp = comp; + INIT_LIST_HEAD(&dom->resctrl_dom.list); + dom->resctrl_dom.id = comp->comp_id; + dom->mbm_local_evt_cfg = MPAM_RESTRL_EVT_CONFIG_VALID; + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + + /* TODO: this list should be sorted */ + list_add_tail(&dom->resctrl_dom.list, &res->resctrl_res.domains); + + return dom; +} + +/* Like resctrl_get_domain_from_cpu(), but for offline CPUs */ +static struct mpam_resctrl_dom * +mpam_get_domain_from_cpu(int cpu, struct mpam_resctrl_res *res) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &res->resctrl_res.domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + if (cpumask_test_cpu(cpu, &dom->comp->affinity)) + return dom; + } + + return NULL; +} + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id) +{ + struct rdt_domain *d; + struct mpam_resctrl_dom *dom; + + lockdep_assert_cpus_held(); + + list_for_each_entry(d, &r->domains, list) { + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + if (dom->comp->comp_id == id) + return &dom->resctrl_dom; + } + + return NULL; +} + +int mpam_resctrl_online_cpu(unsigned int cpu) +{ + int i, err; + struct mpam_resctrl_dom *dom; + struct mpam_resctrl_res *res; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy_resource; + + dom = mpam_get_domain_from_cpu(cpu, res); + if (dom) { + cpumask_set_cpu(cpu, &dom->resctrl_dom.cpu_mask); + continue; + } + + dom = mpam_resctrl_alloc_domain(cpu, res); + if (IS_ERR(dom)) + return PTR_ERR(dom); + err = resctrl_online_domain(&res->resctrl_res, &dom->resctrl_dom); + if (err) + return err; + } + + resctrl_online_cpu(cpu); + return 0; +} + +int mpam_resctrl_offline_cpu(unsigned int cpu) +{ + int i; + struct rdt_domain *d; + struct mpam_resctrl_res *res; + struct mpam_resctrl_dom *dom; + + resctrl_offline_cpu(cpu); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + res = &mpam_resctrl_exports[i]; + + if (!res->class) + continue; // dummy resource + + d = resctrl_get_domain_from_cpu(cpu, &res->resctrl_res); + dom = container_of(d, struct mpam_resctrl_dom, resctrl_dom); + + /* The last one standing was ahead of us... */ + if (WARN_ON_ONCE(!d)) + continue; + + cpumask_clear_cpu(cpu, &d->cpu_mask); + + if (!cpumask_empty(&d->cpu_mask)) + continue; + + resctrl_offline_domain(&res->resctrl_res, &dom->resctrl_dom); + list_del(&d->list); + kfree(dom); + } + + return 0; +} + +static int __init __cacheinfo_ready(void) +{ + cacheinfo_ready = true; + wake_up(&wait_cacheinfo_ready); + + return 0; +} +device_initcall_sync(__cacheinfo_ready); diff --git a/drivers/platform/sw64/Makefile b/drivers/platform/sw64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..28922224fb1763030b979bb8cd2ce204d41dfd80 --- /dev/null +++ b/drivers/platform/sw64/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_PLATFORM_XUELANG) += legacy_xuelang.o diff --git a/drivers/platform/sw64/legacy_xuelang.c b/drivers/platform/sw64/legacy_xuelang.c new file mode 100644 index 0000000000000000000000000000000000000000..8a63d9edf9f230a6137c28e641c966aa5b72d16b --- /dev/null +++ b/drivers/platform/sw64/legacy_xuelang.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +#include +#include + +static void vt_mode_kill_arch(int mode) +{ + hcall(HCALL_SET_CLOCKEVENT, 0, 0, 0); + + switch (mode) { + case LINUX_REBOOT_CMD_RESTART: + hcall(HCALL_RESTART, 0, 0, 0); + mb(); + break; + case LINUX_REBOOT_CMD_HALT: + case LINUX_REBOOT_CMD_POWER_OFF: + hcall(HCALL_SHUTDOWN, 0, 0, 0); + mb(); + break; + default: + break; + } +} + +void sw64_halt(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_HALT); +} + +void sw64_poweroff(void) +{ + if (is_in_host()) + cpld_write(0x64, 0x00, 0xf0); + else + vt_mode_kill_arch(LINUX_REBOOT_CMD_POWER_OFF); +} + +void sw64_restart(void) +{ + if (is_in_host()) { + fix_jm585_reset(); + cpld_write(0x64, 0x00, 0xc3); + } else + vt_mode_kill_arch(LINUX_REBOOT_CMD_RESTART); +} + +static int sw64_reset_init(void) +{ +#ifdef CONFIG_EFI + if (BIOS_SUPPORT_RESET_CLALLBACK((void *)bios_version)) + return 0; +#endif + pm_restart = sw64_restart; + pm_power_off = sw64_poweroff; + pm_halt = sw64_halt; + return 0; +} +subsys_initcall(sw64_reset_init); diff --git a/drivers/platform/x86/intel/ifs/core.c b/drivers/platform/x86/intel/ifs/core.c index 306f886b52d2084e90c03108d489713c8050b652..7b11198d85a19a98864cca0bc0e434072351b497 100644 --- a/drivers/platform/x86/intel/ifs/core.c +++ b/drivers/platform/x86/intel/ifs/core.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation. */ +#include #include #include #include @@ -10,13 +11,16 @@ #include "ifs.h" -#define X86_MATCH(model) \ +#define X86_MATCH(model, array_gen) \ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \ - INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, NULL) + INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen) static const struct x86_cpu_id ifs_cpu_ids[] __initconst = { - X86_MATCH(SAPPHIRERAPIDS_X), - X86_MATCH(EMERALDRAPIDS_X), + X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0), + X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0), + X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0), + X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1), {} }; MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids); @@ -94,6 +98,9 @@ static int __init ifs_init(void) for (i = 0; i < IFS_NUMTESTS; i++) { if (!(msrval & BIT(ifs_devices[i].test_caps->integrity_cap_bit))) continue; + ifs_devices[i].rw_data.generation = FIELD_GET(MSR_INTEGRITY_CAPS_SAF_GEN_MASK, + msrval); + ifs_devices[i].rw_data.array_gen = (u32)m->driver_data; ret = misc_register(&ifs_devices[i].misc); if (ret) goto err_exit; diff --git a/drivers/platform/x86/intel/ifs/ifs.h b/drivers/platform/x86/intel/ifs/ifs.h index 93191855890f2cddde4b28bab5bf73a23dabccd7..56b9f3e3cf76a02fa28a1035d9833235449fe9d3 100644 --- a/drivers/platform/x86/intel/ifs/ifs.h +++ b/drivers/platform/x86/intel/ifs/ifs.h @@ -137,6 +137,10 @@ #define MSR_CHUNKS_AUTHENTICATION_STATUS 0x000002c5 #define MSR_ACTIVATE_SCAN 0x000002c6 #define MSR_SCAN_STATUS 0x000002c7 +#define MSR_ARRAY_TRIGGER 0x000002d6 +#define MSR_ARRAY_STATUS 0x000002d7 +#define MSR_SAF_CTRL 0x000004f0 + #define SCAN_NOT_TESTED 0 #define SCAN_TEST_PASS 1 #define SCAN_TEST_FAIL 2 @@ -144,6 +148,9 @@ #define IFS_TYPE_SAF 0 #define IFS_TYPE_ARRAY_BIST 1 +#define ARRAY_GEN0 0 +#define ARRAY_GEN1 1 + /* MSR_SCAN_HASHES_STATUS bit fields */ union ifs_scan_hashes_status { u64 data; @@ -158,6 +165,19 @@ union ifs_scan_hashes_status { }; }; +union ifs_scan_hashes_status_gen2 { + u64 data; + struct { + u16 chunk_size; + u16 num_chunks; + u32 error_code :8; + u32 chunks_in_stride :9; + u32 rsvd :2; + u32 max_core_limit :12; + u32 valid :1; + }; +}; + /* MSR_CHUNKS_AUTH_STATUS bit fields */ union ifs_chunks_auth_status { u64 data; @@ -170,13 +190,31 @@ union ifs_chunks_auth_status { }; }; +union ifs_chunks_auth_status_gen2 { + u64 data; + struct { + u16 valid_chunks; + u16 total_chunks; + u32 error_code :8; + u32 rsvd2 :24; + }; +}; + /* MSR_ACTIVATE_SCAN bit fields */ union ifs_scan { u64 data; struct { - u32 start :8; - u32 stop :8; - u32 rsvd :16; + union { + struct { + u8 start; + u8 stop; + u16 rsvd; + } gen0; + struct { + u16 start; + u16 stop; + } gen2; + }; u32 delay :31; u32 sigmce :1; }; @@ -186,9 +224,17 @@ union ifs_scan { union ifs_status { u64 data; struct { - u32 chunk_num :8; - u32 chunk_stop_index :8; - u32 rsvd1 :16; + union { + struct { + u8 chunk_num; + u8 chunk_stop_index; + u16 rsvd1; + } gen0; + struct { + u16 chunk_num; + u16 chunk_stop_index; + } gen2; + }; u32 error_code :8; u32 rsvd2 :22; u32 control_error :1; @@ -229,6 +275,9 @@ struct ifs_test_caps { * @status: it holds simple status pass/fail/untested * @scan_details: opaque scan status code from h/w * @cur_batch: number indicating the currently loaded test file + * @generation: IFS test generation enumerated by hardware + * @chunk_size: size of a test chunk + * @array_gen: test generation of array test */ struct ifs_data { int loaded_version; @@ -238,6 +287,9 @@ struct ifs_data { int status; u64 scan_details; u32 cur_batch; + u32 generation; + u32 chunk_size; + u32 array_gen; }; struct ifs_work { diff --git a/drivers/platform/x86/intel/ifs/load.c b/drivers/platform/x86/intel/ifs/load.c index cefd0d886cfd4d61747d6fae9fd8270b2f2e5320..2cf3b4a8813f9b30cb5a79aaf2ee6acee2474c68 100644 --- a/drivers/platform/x86/intel/ifs/load.c +++ b/drivers/platform/x86/intel/ifs/load.c @@ -2,6 +2,7 @@ /* Copyright(c) 2022 Intel Corporation. */ #include +#include #include #include @@ -26,6 +27,11 @@ union meta_data { #define IFS_HEADER_SIZE (sizeof(struct microcode_header_intel)) #define META_TYPE_IFS 1 +#define INVALIDATE_STRIDE 0x1UL +#define IFS_GEN_STRIDE_AWARE 2 +#define AUTH_INTERRUPTED_ERROR 5 +#define IFS_AUTH_RETRY_CT 10 + static struct microcode_header_intel *ifs_header_ptr; /* pointer to the ifs image header */ static u64 ifs_hash_ptr; /* Address of ifs metadata (hash) */ static u64 ifs_test_image_ptr; /* 256B aligned address of test pattern */ @@ -44,7 +50,10 @@ static const char * const scan_hash_status[] = { static const char * const scan_authentication_status[] = { [0] = "No error reported", [1] = "Attempt to authenticate a chunk which is already marked as authentic", - [2] = "Chunk authentication error. The hash of chunk did not match expected value" + [2] = "Chunk authentication error. The hash of chunk did not match expected value", + [3] = "Reserved", + [4] = "Chunk outside the current stride", + [5] = "Authentication flow interrupted", }; #define MC_HEADER_META_TYPE_END (0) @@ -80,6 +89,23 @@ static struct metadata_header *find_meta_data(void *ucode, unsigned int meta_typ return NULL; } +static void hashcopy_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_hash_status)) + dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); + else + dev_err(dev, "Hash copy error : %s\n", scan_hash_status[err_code]); +} + +static void auth_err_message(struct device *dev, u32 err_code) +{ + if (err_code >= ARRAY_SIZE(scan_authentication_status)) + dev_err(dev, "invalid error code 0x%x for authentication\n", err_code); + else + dev_err(dev, "Chunk authentication error : %s\n", + scan_authentication_status[err_code]); +} + /* * To copy scan hashes and authenticate test chunks, the initiating cpu must point * to the EDX:EAX to the test image in linear address. @@ -109,11 +135,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (!hashes_status.valid) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_hash_status)) { - dev_err(dev, "invalid error code 0x%x for hash copy\n", err_code); - goto done; - } - dev_err(dev, "Hash copy error : %s", scan_hash_status[err_code]); + hashcopy_err_message(dev, err_code); goto done; } @@ -133,13 +155,7 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) if (err_code) { ifsd->loading_error = true; - if (err_code >= ARRAY_SIZE(scan_authentication_status)) { - dev_err(dev, - "invalid error code 0x%x for authentication\n", err_code); - goto done; - } - dev_err(dev, "Chunk authentication error %s\n", - scan_authentication_status[err_code]); + auth_err_message(dev, err_code); goto done; } } @@ -147,6 +163,102 @@ static void copy_hashes_authenticate_chunks(struct work_struct *work) complete(&ifs_done); } +static int get_num_chunks(int gen, union ifs_scan_hashes_status_gen2 status) +{ + return gen >= IFS_GEN_STRIDE_AWARE ? status.chunks_in_stride : status.num_chunks; +} + +static bool need_copy_scan_hashes(struct ifs_data *ifsd) +{ + return !ifsd->loaded || + ifsd->generation < IFS_GEN_STRIDE_AWARE || + ifsd->loaded_version != ifs_header_ptr->rev; +} + +static int copy_hashes_authenticate_chunks_gen2(struct device *dev) +{ + union ifs_scan_hashes_status_gen2 hashes_status; + union ifs_chunks_auth_status_gen2 chunk_status; + u32 err_code, valid_chunks, total_chunks; + int i, num_chunks, chunk_size; + union meta_data *ifs_meta; + int starting_chunk_nr; + struct ifs_data *ifsd; + u64 linear_addr, base; + u64 chunk_table[2]; + int retry_count; + + ifsd = ifs_get_data(dev); + + if (need_copy_scan_hashes(ifsd)) { + wrmsrl(MSR_COPY_SCAN_HASHES, ifs_hash_ptr); + rdmsrl(MSR_SCAN_HASHES_STATUS, hashes_status.data); + + /* enumerate the scan image information */ + chunk_size = hashes_status.chunk_size * SZ_1K; + err_code = hashes_status.error_code; + + num_chunks = get_num_chunks(ifsd->generation, hashes_status); + + if (!hashes_status.valid) { + hashcopy_err_message(dev, err_code); + return -EIO; + } + ifsd->loaded_version = ifs_header_ptr->rev; + ifsd->chunk_size = chunk_size; + } else { + num_chunks = ifsd->valid_chunks; + chunk_size = ifsd->chunk_size; + } + + if (ifsd->generation >= IFS_GEN_STRIDE_AWARE) { + wrmsrl(MSR_SAF_CTRL, INVALIDATE_STRIDE); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + if (chunk_status.valid_chunks != 0) { + dev_err(dev, "Couldn't invalidate installed stride - %d\n", + chunk_status.valid_chunks); + return -EIO; + } + } + + base = ifs_test_image_ptr; + ifs_meta = (union meta_data *)find_meta_data(ifs_header_ptr, META_TYPE_IFS); + starting_chunk_nr = ifs_meta->starting_chunk; + + /* scan data authentication and copy chunks to secured memory */ + for (i = 0; i < num_chunks; i++) { + retry_count = IFS_AUTH_RETRY_CT; + linear_addr = base + i * chunk_size; + + chunk_table[0] = starting_chunk_nr + i; + chunk_table[1] = linear_addr; + do { + wrmsrl(MSR_AUTHENTICATE_AND_COPY_CHUNK, (u64)chunk_table); + rdmsrl(MSR_CHUNKS_AUTHENTICATION_STATUS, chunk_status.data); + err_code = chunk_status.error_code; + } while (err_code == AUTH_INTERRUPTED_ERROR && --retry_count); + + if (err_code) { + ifsd->loading_error = true; + auth_err_message(dev, err_code); + return -EIO; + } + } + + valid_chunks = chunk_status.valid_chunks; + total_chunks = chunk_status.total_chunks; + + if (valid_chunks != total_chunks) { + ifsd->loading_error = true; + dev_err(dev, "Couldn't authenticate all the chunks. Authenticated %d total %d.\n", + valid_chunks, total_chunks); + return -EIO; + } + ifsd->valid_chunks = valid_chunks; + + return 0; +} + static int validate_ifs_metadata(struct device *dev) { struct ifs_data *ifsd = ifs_get_data(dev); @@ -179,6 +291,13 @@ static int validate_ifs_metadata(struct device *dev) return ret; } + if (ifs_meta->chunks_per_stride && + (ifs_meta->starting_chunk % ifs_meta->chunks_per_stride != 0)) { + dev_warn(dev, "Starting chunk num %u not a multiple of chunks_per_stride %u\n", + ifs_meta->starting_chunk, ifs_meta->chunks_per_stride); + return ret; + } + return 0; } @@ -199,7 +318,9 @@ static int scan_chunks_sanity_check(struct device *dev) return ret; ifsd->loading_error = false; - ifsd->loaded_version = ifs_header_ptr->rev; + + if (ifsd->generation > 0) + return copy_hashes_authenticate_chunks_gen2(dev); /* copy the scan hash and authenticate per package */ cpus_read_lock(); @@ -219,6 +340,7 @@ static int scan_chunks_sanity_check(struct device *dev) ifs_pkg_auth[curr_pkg] = 1; } ret = 0; + ifsd->loaded_version = ifs_header_ptr->rev; out: cpus_read_unlock(); @@ -227,7 +349,7 @@ static int scan_chunks_sanity_check(struct device *dev) static int image_sanity_check(struct device *dev, const struct microcode_header_intel *data) { - struct ucode_cpu_info uci; + struct cpu_signature sig; /* Provide a specific error message when loading an older/unsupported image */ if (data->hdrver != MC_HEADER_TYPE_IFS) { @@ -240,11 +362,9 @@ static int image_sanity_check(struct device *dev, const struct microcode_header_ return -EINVAL; } - intel_cpu_collect_info(&uci); + intel_collect_cpu_info(&sig); - if (!intel_find_matching_signature((void *)data, - uci.cpu_sig.sig, - uci.cpu_sig.pf)) { + if (!intel_find_matching_signature((void *)data, &sig)) { dev_err(dev, "cpu signature, processor flags not matching\n"); return -EINVAL; } @@ -260,6 +380,7 @@ int ifs_load_firmware(struct device *dev) { const struct ifs_test_caps *test = ifs_get_test_caps(dev); struct ifs_data *ifsd = ifs_get_data(dev); + unsigned int expected_size; const struct firmware *fw; char scan_path[64]; int ret = -EINVAL; @@ -274,6 +395,14 @@ int ifs_load_firmware(struct device *dev) goto done; } + expected_size = ((struct microcode_header_intel *)fw->data)->totalsize; + if (fw->size != expected_size) { + dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n", + expected_size, fw->size); + ret = -EINVAL; + goto release; + } + ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data); if (ret) goto release; diff --git a/drivers/platform/x86/intel/ifs/runtest.c b/drivers/platform/x86/intel/ifs/runtest.c index 43c864add778f82259a5969550f569cc393e276a..13ecd55c66680339af31b10484cf1929041f0a0e 100644 --- a/drivers/platform/x86/intel/ifs/runtest.c +++ b/drivers/platform/x86/intel/ifs/runtest.c @@ -40,6 +40,8 @@ enum ifs_status_err_code { IFS_UNASSIGNED_ERROR_CODE = 7, IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT = 8, IFS_INTERRUPTED_DURING_EXECUTION = 9, + IFS_UNASSIGNED_ERROR_CODE_0xA = 0xA, + IFS_CORRUPTED_CHUNK = 0xB, }; static const char * const scan_test_status[] = { @@ -55,6 +57,8 @@ static const char * const scan_test_status[] = { [IFS_EXCEED_NUMBER_OF_THREADS_CONCURRENT] = "Exceeded number of Logical Processors (LP) allowed to run Scan-At-Field concurrently", [IFS_INTERRUPTED_DURING_EXECUTION] = "Interrupt occurred prior to SCAN start", + [IFS_UNASSIGNED_ERROR_CODE_0xA] = "Unassigned error code 0xA", + [IFS_CORRUPTED_CHUNK] = "Scan operation aborted due to corrupted image. Try reloading", }; static void message_not_tested(struct device *dev, int cpu, union ifs_status status) @@ -123,6 +127,8 @@ static bool can_restart(union ifs_status status) case IFS_MISMATCH_ARGUMENTS_BETWEEN_THREADS: case IFS_CORE_NOT_CAPABLE_CURRENTLY: case IFS_UNASSIGNED_ERROR_CODE: + case IFS_UNASSIGNED_ERROR_CODE_0xA: + case IFS_CORRUPTED_CHUNK: break; } return false; @@ -171,21 +177,31 @@ static void ifs_test_core(int cpu, struct device *dev) union ifs_status status; unsigned long timeout; struct ifs_data *ifsd; + int to_start, to_stop; + int status_chunk; u64 msrvals[2]; int retries; ifsd = ifs_get_data(dev); - activate.rsvd = 0; + activate.gen0.rsvd = 0; activate.delay = IFS_THREAD_WAIT; activate.sigmce = 0; - activate.start = 0; - activate.stop = ifsd->valid_chunks - 1; + to_start = 0; + to_stop = ifsd->valid_chunks - 1; + + if (ifsd->generation) { + activate.gen2.start = to_start; + activate.gen2.stop = to_stop; + } else { + activate.gen0.start = to_start; + activate.gen0.stop = to_stop; + } timeout = jiffies + HZ / 2; retries = MAX_IFS_RETRIES; - while (activate.start <= activate.stop) { + while (to_start <= to_stop) { if (time_after(jiffies, timeout)) { status.error_code = IFS_SW_TIMEOUT; break; @@ -196,13 +212,14 @@ static void ifs_test_core(int cpu, struct device *dev) status.data = msrvals[1]; - trace_ifs_status(cpu, activate, status); + trace_ifs_status(cpu, to_start, to_stop, status.data); /* Some cases can be retried, give up for others */ if (!can_restart(status)) break; - if (status.chunk_num == activate.start) { + status_chunk = ifsd->generation ? status.gen2.chunk_num : status.gen0.chunk_num; + if (status_chunk == to_start) { /* Check for forward progress */ if (--retries == 0) { if (status.error_code == IFS_NO_ERROR) @@ -211,7 +228,11 @@ static void ifs_test_core(int cpu, struct device *dev) } } else { retries = MAX_IFS_RETRIES; - activate.start = status.chunk_num; + if (ifsd->generation) + activate.gen2.start = status_chunk; + else + activate.gen0.start = status_chunk; + to_start = status_chunk; } } @@ -308,6 +329,38 @@ static void ifs_array_test_core(int cpu, struct device *dev) ifsd->status = SCAN_TEST_PASS; } +#define ARRAY_GEN1_TEST_ALL_ARRAYS 0x0ULL +#define ARRAY_GEN1_STATUS_FAIL 0x1ULL + +static int do_array_test_gen1(void *status) +{ + int cpu = smp_processor_id(); + int first; + + first = cpumask_first(cpu_smt_mask(cpu)); + + if (cpu == first) { + wrmsrl(MSR_ARRAY_TRIGGER, ARRAY_GEN1_TEST_ALL_ARRAYS); + rdmsrl(MSR_ARRAY_STATUS, *((u64 *)status)); + } + + return 0; +} + +static void ifs_array_test_gen1(int cpu, struct device *dev) +{ + struct ifs_data *ifsd = ifs_get_data(dev); + u64 status = 0; + + stop_core_cpuslocked(cpu, do_array_test_gen1, &status); + ifsd->scan_details = status; + + if (status & ARRAY_GEN1_STATUS_FAIL) + ifsd->status = SCAN_TEST_FAIL; + else + ifsd->status = SCAN_TEST_PASS; +} + /* * Initiate per core test. It wakes up work queue threads on the target cpu and * its sibling cpu. Once all sibling threads wake up, the scan test gets executed and @@ -336,7 +389,10 @@ int do_core_test(int cpu, struct device *dev) ifs_test_core(cpu, dev); break; case IFS_TYPE_ARRAY_BIST: - ifs_array_test_core(cpu, dev); + if (ifsd->array_gen == ARRAY_GEN0) + ifs_array_test_core(cpu, dev); + else + ifs_array_test_gen1(cpu, dev); break; default: ret = -EINVAL; diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index d7502433c78aa318a1a8baf1b835d1d4e1e639f3..179c99a0e70661b0ca810e4147815cce4fd34c35 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -984,6 +984,13 @@ config RTC_DRV_ALPHA Direct support for the real-time clock found on every Alpha system, specifically MC146818 compatibles. If in doubt, say Y. +config RTC_DRV_SW64_VIRT + bool "SW64 Hypervisor based RTC" + depends on SW64 + default y + help + Get support for the Hypervisor based RTC on SW64 systems. + config RTC_DRV_DS1216 tristate "Dallas DS1216" depends on SNI_RM diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index fd209883ee2efd118c992f010fde9935c186c816..7711f79787acce325da642b9b9d3c03ea81c4c66 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_RTC_CLASS) += rtc-core.o obj-$(CONFIG_RTC_MC146818_LIB) += rtc-mc146818-lib.o rtc-core-y := class.o interface.o +ifdef CONFIG_RTC_DRV_SW64_VIRT +rtc-core-y += rtc-sw64-virt-platform.o +endif + rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += proc.o @@ -168,6 +172,7 @@ obj-$(CONFIG_RTC_DRV_ST_LPC) += rtc-st-lpc.o obj-$(CONFIG_RTC_DRV_STM32) += rtc-stm32.o obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o obj-$(CONFIG_RTC_DRV_SUN4V) += rtc-sun4v.o +obj-$(CONFIG_RTC_DRV_SW64_VIRT) += rtc-sw64-virt.o obj-$(CONFIG_RTC_DRV_SUN6I) += rtc-sun6i.o obj-$(CONFIG_RTC_DRV_SUNPLUS) += rtc-sunplus.o obj-$(CONFIG_RTC_DRV_SUNXI) += rtc-sunxi.o diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index f1c09f1db044c8481fca7ddb8801c952954e246e..dd472f08879158c1f8c1be5f0f3a3291e83400d1 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -8,6 +8,22 @@ #include #endif +#ifdef CONFIG_X86 +static inline bool follow_mc146818_divider_reset(void) +{ + if ((boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) && + (boot_cpu_data.x86 <= 7 && boot_cpu_data.x86_model <= 59)) + return false; + return true; +} +#else +static inline bool follow_mc146818_divider_reset(void) +{ + return true; +} +#endif + /* * Execute a function while the UIP (Update-in-progress) bit of the RTC is * unset. @@ -259,12 +275,13 @@ int mc146818_set_time(struct rtc_time *time) spin_lock_irqsave(&rtc_lock, flags); save_control = CMOS_READ(RTC_CONTROL); CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); - save_freq_select = CMOS_READ(RTC_FREQ_SELECT); - if (apply_amd_register_a_behavior()) - CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); - else - CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); - + if (follow_mc146818_divider_reset()) { + save_freq_select = CMOS_READ(RTC_FREQ_SELECT); + if (apply_amd_register_a_behavior()) + CMOS_WRITE((save_freq_select & ~RTC_AMD_BANK_SELECT), RTC_FREQ_SELECT); + else + CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT); + } #ifdef CONFIG_MACH_DECSTATION CMOS_WRITE(real_yrs, RTC_DEC_YEAR); #endif @@ -281,7 +298,8 @@ int mc146818_set_time(struct rtc_time *time) #endif CMOS_WRITE(save_control, RTC_CONTROL); - CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); + if (follow_mc146818_divider_reset()) + CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); spin_unlock_irqrestore(&rtc_lock, flags); diff --git a/drivers/rtc/rtc-sw64-guest.c b/drivers/rtc/rtc-sw64-guest.c new file mode 100644 index 0000000000000000000000000000000000000000..5d86ce20a1fbb8f493cb8dfeda0b917e3e8d1f2a --- /dev/null +++ b/drivers/rtc/rtc-sw64-guest.c @@ -0,0 +1,54 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) + +static int sw_guest_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + rtc_time64_to_tm(*ioaddr, tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw_guest_ops = { + .read_time = sw_guest_read_time, +}; + +static int __init rtc_sw_guest_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw_guest", + &rtc_sw_guest_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw_guest_driver = { + .driver = { + .name = "rtc_sw_guest", + }, +}; + +module_platform_driver_probe(rtc_sw_guest_driver, rtc_sw_guest_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("SW GUEST RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw_guest"); diff --git a/drivers/rtc/rtc-sw64-virt-platform.c b/drivers/rtc/rtc-sw64-virt-platform.c new file mode 100644 index 0000000000000000000000000000000000000000..3db9ff2f0e646b17463d5550f6f431cfbbbf9e88 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt-platform.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include + +static struct platform_device rtc_sw64_virt_device = { + .name = "rtc_sw64_virt", + .id = -1, +}; + +static int __init rtc_sw64_virt_init(void) +{ + if (is_in_host()) + return 0; + + if (platform_device_register(&rtc_sw64_virt_device) < 0) + pr_err("unable to register rtc device...\n"); + /* not necessarily an error */ + return 0; +} +module_init(rtc_sw64_virt_init); diff --git a/drivers/rtc/rtc-sw64-virt.c b/drivers/rtc/rtc-sw64-virt.c new file mode 100644 index 0000000000000000000000000000000000000000..23c93d7ddbae7281d04f4f86137018cc297b77f4 --- /dev/null +++ b/drivers/rtc/rtc-sw64-virt.c @@ -0,0 +1,77 @@ +// SPDX-License-Identifier: GPL-2.0 +/* rtc-sw64-virt.c: Hypervisor based RTC for SW64 systems. + * + * Copyright (C) 2021 Lu Feifei + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +#define RTC_IO_ADDR (0x804910000000ULL) +unsigned long vtime_old, vtime_new; + +static int sw64_virt_read_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + unsigned long vtime_now; + long vtime_offset; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + if (!vtime_new) { + rtc_time64_to_tm(*ioaddr, tm); + } else { + vtime_now = *ioaddr; + vtime_offset = vtime_new - vtime_old; + vtime_now += vtime_offset; + rtc_time64_to_tm(vtime_now, tm); + } + return 0; +} + +static int sw64_virt_set_time(struct device *dev, struct rtc_time *tm) +{ + unsigned long *ioaddr; + + ioaddr = ioremap(RTC_IO_ADDR, sizeof(long)); + vtime_old = *ioaddr; + + vtime_new = rtc_tm_to_time64(tm); + return 0; +} + +static const struct rtc_class_ops rtc_sw64_virt_ops = { + .read_time = sw64_virt_read_time, + .set_time = sw64_virt_set_time, +}; + +static int __init rtc_sw64_virt_probe(struct platform_device *pdev) +{ + struct rtc_device *rtc; + + rtc = devm_rtc_device_register(&pdev->dev, "sw64_virt", + &rtc_sw64_virt_ops, THIS_MODULE); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + + platform_set_drvdata(pdev, rtc); + return 0; +} + +static struct platform_driver rtc_sw64_virt_driver = { + .driver = { + .name = "rtc_sw64_virt", + }, +}; + +module_platform_driver_probe(rtc_sw64_virt_driver, rtc_sw64_virt_probe); + +MODULE_AUTHOR("Lu Feifei "); +MODULE_DESCRIPTION("Sunway virtual RTC driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:rtc_sw64_virt"); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 8a83f3fc2b865e7c6a430168c86b2f5fb5c5822e..d4b97f0a50131abac54b0a8652932e66de4acb3a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -3589,6 +3589,14 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, d_val.u.high != cpu_to_le32(UINT_MAX)) { smid = le16_to_cpu(reply_desc->SMID); +#ifdef CONFIG_SUBARCH_C3B + if (smid == 0xffff) { + smid = d_val.u.low >> 16; + if (smid == 0xffff) + break; + } +#endif + cmd_fusion = fusion->cmd_list[smid - 1]; scsi_io_req = (struct MPI2_RAID_SCSI_IO_REQUEST *) cmd_fusion->io_request; diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index bcbf840cd41c80aeff7616da9685602973408ff4..61cbbf6515346b3785a6d0d20dc797d733e4a1ce 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1178,6 +1178,12 @@ config SPI_AMD # # Add new SPI master controllers in alphabetical order above this line # +config SPI_CHIP3 + tristate "Memory-mapped io interface driver for SUNWAY CHIP3 SPI core" + depends on UNCORE_XUELANG + help + general driver for SPI controller core from DesignWare + comment "SPI Multiplexer support" diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 6af54842b9fa4f2b95355ada8418ad54173649b3..26bf16fcf890c229e60e7191a10f4c62ca505986 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ spi-dw-y := spi-dw-core.o spi-dw-$(CONFIG_SPI_DW_DMA) += spi-dw-dma.o obj-$(CONFIG_SPI_DW_BT1) += spi-dw-bt1.o obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o +obj-$(CONFIG_SPI_CHIP3) += spi-chip3.o spi-chip3-mmio.o obj-$(CONFIG_SPI_DW_PCI) += spi-dw-pci.o obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o obj-$(CONFIG_SPI_FALCON) += spi-falcon.o diff --git a/drivers/spi/spi-chip3-mmio.c b/drivers/spi/spi-chip3-mmio.c new file mode 100644 index 0000000000000000000000000000000000000000..a907f13d4ae5cf6fd3047febd81ca2594320bb5f --- /dev/null +++ b/drivers/spi/spi-chip3-mmio.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Memory-mapped interface driver for SUNWAY CHIP3 SPI Core + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +#define DRIVER_NAME "sunway_chip3_spi" + +struct chip3_spi_mmio { + struct chip3_spi dws; + struct clk *clk; + void *priv; +}; + +static int chip3_spi_mmio_probe(struct platform_device *pdev) +{ + int (*init_func)(struct platform_device *pdev, + struct chip3_spi_mmio *dwsmmio); + struct chip3_spi_mmio *dwsmmio; + struct chip3_spi *dws; + struct resource *mem; + int ret; + int num_cs; + + dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct chip3_spi_mmio), + GFP_KERNEL); + if (!dwsmmio) + return -ENOMEM; + + dws = &dwsmmio->dws; + + /* Get basic io resource and map it */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dws->regs = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dws->regs)) { + dev_err(&pdev->dev, "SPI region map failed\n"); + return PTR_ERR(dws->regs); + } + + dws->irq = platform_get_irq(pdev, 0); + if (dws->irq < 0) { + dev_err(&pdev->dev, "no irq resource?\n"); + return dws->irq; /* -ENXIO */ + } + + dwsmmio->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(dwsmmio->clk)) + return PTR_ERR(dwsmmio->clk); + ret = clk_prepare_enable(dwsmmio->clk); + if (ret) + return ret; + + dws->bus_num = pdev->id; + dws->max_freq = clk_get_rate(dwsmmio->clk); + + device_property_read_u32(&pdev->dev, "reg-io-width", + &dws->reg_io_width); + + num_cs = 4; + device_property_read_u32(&pdev->dev, "num-cs", &num_cs); + dws->num_cs = num_cs; + + if (pdev->dev.of_node) { + int i; + + for (i = 0; i < dws->num_cs; i++) { + int cs_gpio = of_get_named_gpio(pdev->dev.of_node, + "cs-gpios", i); + + if (cs_gpio == -EPROBE_DEFER) { + ret = cs_gpio; + goto out; + } + + if (gpio_is_valid(cs_gpio)) { + ret = devm_gpio_request(&pdev->dev, cs_gpio, + dev_name(&pdev->dev)); + if (ret) + goto out; + } + } + } + + init_func = device_get_match_data(&pdev->dev); + if (init_func) { + ret = init_func(pdev, dwsmmio); + if (ret) + goto out; + } + + ret = chip3_spi_add_host(&pdev->dev, dws); + if (ret) + goto out; + + platform_set_drvdata(pdev, dwsmmio); + + return 0; +out: + clk_disable_unprepare(dwsmmio->clk); + return ret; +} + +static int chip3_spi_mmio_remove(struct platform_device *pdev) +{ + struct chip3_spi_mmio *dwsmmio = platform_get_drvdata(pdev); + + chip3_spi_remove_host(&dwsmmio->dws); + clk_disable_unprepare(dwsmmio->clk); + + return 0; +} + +static const struct of_device_id chip3_spi_mmio_of_match[] = { + { .compatible = "sunway,chip3-spi", }, + { /* end of table */} +}; +MODULE_DEVICE_TABLE(of, chip3_spi_mmio_of_match); + +static struct platform_driver chip3_spi_mmio_driver = { + .probe = chip3_spi_mmio_probe, + .remove = chip3_spi_mmio_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = chip3_spi_mmio_of_match, + }, +}; +module_platform_driver(chip3_spi_mmio_driver); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Memory-mapped I/O interface driver for Sunway CHIP3"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.c b/drivers/spi/spi-chip3.c new file mode 100644 index 0000000000000000000000000000000000000000..8186c84eca8c3bdd411ff4dfd19fdc21f7b30a00 --- /dev/null +++ b/drivers/spi/spi-chip3.c @@ -0,0 +1,404 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SUNWAY CHIP3 SPI core controller driver + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "spi-chip3.h" + +/* Slave spi_dev related */ +struct chip_data { + u8 tmode; /* TR/TO/RO/EEPROM */ + u8 type; /* SPI/SSP/MicroWire */ + + u8 poll_mode; /* 1 means use poll mode */ + + u16 clk_div; /* baud rate divider */ + u32 speed_hz; /* baud rate */ + void (*cs_control)(u32 command); +}; + +static void chip3_spi_handle_err(struct spi_controller *master, + struct spi_message *msg) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + + spi_reset_chip(dws); +} + +static size_t chip3_spi_max_length(struct spi_device *spi) +{ + struct chip3_spi *dws = spi_controller_get_devdata(spi->master); + + return dws->fifo_len; +} + +static int chip3_spi_transfer_one_message(struct spi_controller *master, + struct spi_message *m) +{ + struct chip3_spi *dws = spi_controller_get_devdata(master); + struct spi_transfer *t = NULL; + u16 clk_div; + u32 freq; + u32 speed_hz; + u32 status; + u32 len = 0; + int ret = 0; + int i = 0; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(m->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_RECEIVE); + + spi_enable_chip(dws, 1); + + list_for_each_entry(t, &m->transfers, transfer_list) { + len += t->len; + /* Judge if data is overflow */ + if (len > dws->fifo_len) { + pr_err("SPI transfer overflow.\n"); + m->actual_length = 0; + m->status = -EIO; + ret = -EIO; + goto way_out; + } + + if (t->tx_buf) + memcpy(&dws->buf[len], t->tx_buf, t->len); + else + memset(&dws->buf[len], 0, t->len); + } + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(m->spi->chip_select)); + + do { + status = chip3_readl(dws, CHIP3_SPI_SR); + } while (status & SR_BUSY); + + list_for_each_entry(t, &m->transfers, transfer_list) { + if (t->rx_buf) { + for (i = 0; i < t->len; i++, t->rx_buf += 1) + *(u8 *)t->rx_buf = chip3_readl(dws, CHIP3_SPI_DR); + } else { + for (i = 0; i < t->len; i++) + chip3_readl(dws, CHIP3_SPI_DR); + } + } + + m->actual_length = len; + m->status = 0; + spi_finalize_current_message(master); + +way_out: + return ret; +} + +static int chip3_spi_adjust_mem_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + size_t len; + + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + op->data.nbytes = min((size_t)op->data.nbytes, (dws->fifo_len - len)); + if (!op->data.nbytes) + return -EINVAL; + + return 0; +} + +static int chip3_spi_init_mem_buf(struct chip3_spi *dws, + const struct spi_mem_op *op) +{ + int ret = 0; + int i, j, len; + + /* Calculate the total length of the transfer. */ + len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; + + /* Judge if data is overflow */ + if (len + op->data.nbytes > dws->fifo_len) { + ret = -EIO; + goto way_out; + } + + /* + * Collect the operation code, address and dummy bytes into the single + * buffer. If it's a transfer with data to be sent, also copy it into + * the single buffer. + */ + for (i = 0; i < sizeof(op->cmd.opcode); i++) + dws->buf[i] = op->cmd.opcode; + for (j = 0; j < op->addr.nbytes; i++, j++) + dws->buf[i] = op->addr.val >> (8 * (op->addr.nbytes - i)); + for (j = 0; j < op->dummy.nbytes; i++, j++) + dws->buf[i] = 0xff; + + if (op->data.dir == SPI_MEM_DATA_OUT) { + memcpy(&dws->buf[i], op->data.buf.out, op->data.nbytes); + len += op->data.nbytes; + } + + dws->tx_len = len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + dws->rx = op->data.buf.in; + dws->rx_len = op->data.nbytes; + } else { + dws->rx = NULL; + dws->rx_len = 0; + } + +way_out: + return ret; +} + +static int chip3_spi_exec_mem_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct chip3_spi *dws = spi_controller_get_devdata(mem->spi->controller); + u16 clk_div; + int ret = 0; + int i; + unsigned short value; + u32 freq; + u32 speed_hz; + + ret = chip3_spi_init_mem_buf(dws, op); + if (ret) + return ret; + + spi_enable_chip(dws, 0); + + /* Handle per transfer options for bpw and speed. */ + freq = clamp(mem->spi->max_speed_hz, 0U, dws->max_freq); + clk_div = (DIV_ROUND_UP(dws->max_freq, freq) + 1) & 0xfffe; + speed_hz = dws->max_freq / clk_div; + + if (dws->current_freq != speed_hz) { + spi_set_clk(dws, clk_div); + dws->current_freq = speed_hz; + } + + dws->n_bytes = 1; + + /* For poll mode just disable all interrupts */ + spi_mask_intr(dws, 0xff); + + if ((dws->tx_len != 0) && (dws->rx_len != 0)) { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_EEPROM_READ); + chip3_writel(dws, CHIP3_SPI_CTRL1, (dws->rx_len - 1)); + } else { + chip3_writel(dws, CHIP3_SPI_CTRL0, SPI_TRANSMIT_ONLY); + } + + spi_enable_chip(dws, 1); + + chip3_writel(dws, CHIP3_SPI_SER, 0x0); + for (i = 0; i < dws->tx_len; i++) + chip3_writel(dws, CHIP3_SPI_DR, dws->buf[i]); + chip3_writel(dws, CHIP3_SPI_SER, BIT(mem->spi->chip_select)); + + value = chip3_readl(dws, CHIP3_SPI_SR); + while (value & SR_BUSY) + value = chip3_readl(dws, CHIP3_SPI_SR); + + for (i = 0; i < dws->rx_len; dws->rx += dws->n_bytes, i++) + *(u8 *)dws->rx = chip3_readl(dws, CHIP3_SPI_DR); + + return ret; +} + +/* This may be called twice for each spi dev */ +static int chip3_spi_setup(struct spi_device *spi) +{ + struct chip3_spi_chip *chip_info = NULL; + struct chip_data *chip; + u32 poll_mode = 0; + struct device_node *np = spi->dev.of_node; + + /* Only alloc on first setup */ + chip = spi_get_ctldata(spi); + if (!chip) { + chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); + if (!chip) + return -ENOMEM; + spi_set_ctldata(spi, chip); + } + + /* + * Protocol drivers may change the chip settings, so... + * if chip_info exists, use it + */ + chip_info = spi->controller_data; + + /* chip_info doesn't always exist */ + if (chip_info) { + if (chip_info->cs_control) + chip->cs_control = chip_info->cs_control; + + chip->poll_mode = chip_info->poll_mode; + chip->type = chip_info->type; + } else { + if (np) { + of_property_read_u32(np, "poll_mode", &poll_mode); + chip->poll_mode = poll_mode; + } + + } + + chip->tmode = SPI_TMOD_TR; + return 0; +} + +static void chip3_spi_cleanup(struct spi_device *spi) +{ + struct chip_data *chip = spi_get_ctldata(spi); + + kfree(chip); + spi_set_ctldata(spi, NULL); +} + +/* Restart the controller, disable all interrupts, clean rx fifo */ +static void spi_hw_init(struct device *dev, struct chip3_spi *dws) +{ + spi_reset_chip(dws); + + /* + * Try to detect the FIFO depth if not set by interface driver, + * the depth could be from 2 to 256 from HW spec + */ + if (!dws->fifo_len) { + u32 fifo; + + for (fifo = 1; fifo < 256; fifo++) { + chip3_writel(dws, CHIP3_SPI_TXFLTR, fifo); + if (fifo != chip3_readl(dws, CHIP3_SPI_TXFLTR)) + break; + } + chip3_writel(dws, CHIP3_SPI_TXFLTR, 0); + + dws->fifo_len = (fifo == 1) ? 0 : fifo; + dev_info(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); + } +} + +static const struct spi_controller_mem_ops chip3_mem_ops = { + .adjust_op_size = chip3_spi_adjust_mem_op_size, + .exec_op = chip3_spi_exec_mem_op, +}; + + +int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws) +{ + struct spi_controller *master; + int ret; + + BUG_ON(dws == NULL); + + master = spi_alloc_master(dev, 0); + if (!master) + return -ENOMEM; + + dws->master = master; + dws->type = SSI_MOTO_SPI; + + spi_controller_set_devdata(master, dws); + + master->mode_bits = SPI_CPOL | SPI_CPHA; + master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); + master->bus_num = dws->bus_num; + master->num_chipselect = dws->num_cs; + master->setup = chip3_spi_setup; + master->cleanup = chip3_spi_cleanup; + master->transfer_one_message = chip3_spi_transfer_one_message; + master->handle_err = chip3_spi_handle_err; + master->max_speed_hz = dws->max_freq; + master->dev.of_node = dev->of_node; + master->flags = SPI_CONTROLLER_GPIO_SS; + master->max_transfer_size = chip3_spi_max_length; + master->max_message_size = chip3_spi_max_length; + + master->mem_ops = &chip3_mem_ops; + + /* Basic HW init */ + spi_hw_init(dev, dws); + + ret = devm_spi_register_controller(dev, master); + if (ret) { + dev_err(&master->dev, "problem registering spi master\n"); + spi_enable_chip(dws, 0); + free_irq(dws->irq, master); + } + + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_add_host); + +void chip3_spi_remove_host(struct chip3_spi *dws) +{ + spi_shutdown_chip(dws); + + free_irq(dws->irq, dws->master); +} +EXPORT_SYMBOL_GPL(chip3_spi_remove_host); + +int chip3_spi_suspend_host(struct chip3_spi *dws) +{ + int ret; + + ret = spi_controller_suspend(dws->master); + if (ret) + return ret; + + spi_shutdown_chip(dws); + return 0; +} +EXPORT_SYMBOL_GPL(chip3_spi_suspend_host); + +int chip3_spi_resume_host(struct chip3_spi *dws) +{ + int ret; + + spi_hw_init(&dws->master->dev, dws); + ret = spi_controller_resume(dws->master); + if (ret) + dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret); + return ret; +} +EXPORT_SYMBOL_GPL(chip3_spi_resume_host); + +MODULE_AUTHOR("Platform@wxiat.com"); +MODULE_DESCRIPTION("Driver for Sunway CHIP3 SPI controller core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/spi/spi-chip3.h b/drivers/spi/spi-chip3.h new file mode 100644 index 0000000000000000000000000000000000000000..88e49a9091a5d6bb8265fd0b8dc513d8e582b582 --- /dev/null +++ b/drivers/spi/spi-chip3.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef CHIP3_SPI_HEADER_H +#define CHIP3_SPI_HEADER_H + +#include +#include +#include +#include + +/* Register offsets */ +#define CHIP3_SPI_CTRL0 (0x00<<7) +#define CHIP3_SPI_CTRL1 (0x04<<7) +#define CHIP3_SPI_SSIENR (0x08<<7) +#define CHIP3_SPI_MWCR (0x0c<<7) +#define CHIP3_SPI_SER (0x10<<7) +#define CHIP3_SPI_BAUDR (0x14<<7) +#define CHIP3_SPI_TXFLTR (0x18<<7) +#define CHIP3_SPI_RXFLTR (0x1c<<7) +#define CHIP3_SPI_TXFLR (0x20<<7) +#define CHIP3_SPI_RXFLR (0x24<<7) +#define CHIP3_SPI_SR (0x28<<7) +#define CHIP3_SPI_IMR (0x2c<<7) +#define CHIP3_SPI_ISR (0x30<<7) +#define CHIP3_SPI_RISR (0x34<<7) +#define CHIP3_SPI_TXOICR (0x38<<7) +#define CHIP3_SPI_RXOICR (0x3c<<7) +#define CHIP3_SPI_RXUICR (0x40<<7) +#define CHIP3_SPI_MSTICR (0x44<<7) +#define CHIP3_SPI_ICR (0x48<<7) +#define CHIP3_SPI_DMACR (0x4c<<7) +#define CHIP3_SPI_DMATDLR (0x50<<7) +#define CHIP3_SPI_DMARDLR (0x54<<7) +#define CHIP3_SPI_IDR (0x58<<7) +#define CHIP3_SPI_VERSION (0x5c<<7) +#define CHIP3_SPI_DR (0x60<<7) + +/* Bit fields in CTRLR0 */ +#define SPI_DFS_OFFSET 0 + +#define SPI_FRF_OFFSET 4 +#define SPI_FRF_SPI 0x0 +#define SPI_FRF_SSP 0x1 +#define SPI_FRF_MICROWIRE 0x2 +#define SPI_FRF_RESV 0x3 + +#define SPI_MODE_OFFSET 6 +#define SPI_SCPH_OFFSET 6 +#define SPI_SCOL_OFFSET 7 + +#define SPI_TMOD_OFFSET 8 +#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) +#define SPI_TMOD_TR 0x0 /* xmit & recv */ +#define SPI_TMOD_TO 0x1 /* xmit only */ +#define SPI_TMOD_RO 0x2 /* recv only */ +#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */ + +#define SPI_SLVOE_OFFSET 10 +#define SPI_SRL_OFFSET 11 +#define SPI_CFS_OFFSET 12 + +/* Bit fields in SR, 7 bits */ +#define SR_MASK 0x7f /* cover 7 bits */ +#define SR_BUSY (1 << 0) +#define SR_TF_NOT_FULL (1 << 1) +#define SR_TF_EMPT (1 << 2) +#define SR_RF_NOT_EMPT (1 << 3) +#define SR_RF_FULL (1 << 4) +#define SR_TX_ERR (1 << 5) +#define SR_DCOL (1 << 6) + +/* Bit fields in ISR, IMR, RISR, 7 bits */ +#define SPI_INT_TXEI (1 << 0) +#define SPI_INT_TXOI (1 << 1) +#define SPI_INT_RXUI (1 << 2) +#define SPI_INT_RXOI (1 << 3) +#define SPI_INT_RXFI (1 << 4) +#define SPI_INT_MSTI (1 << 5) + +/* Bit fields in DMACR */ +#define SPI_DMA_RDMAE (1 << 0) +#define SPI_DMA_TDMAE (1 << 1) + +/* TX RX interrupt level threshold, max can be 256 */ +#define SPI_INT_THRESHOLD 32 + +/* The depth of the FIFO buffer is 256, so the max transfer length is 256. */ +#define MAX_LEN 256 + +/* The mode of spi controller. */ +#define SPI_TRANSMIT_RECEIVE 0x0c7 +#define SPI_EEPROM_READ 0x3c7 +#define SPI_TRANSMIT_ONLY 0x1c7 + +enum chip3_ssi_type { + SSI_MOTO_SPI = 0, + SSI_TI_SSP, + SSI_NS_MICROWIRE, +}; + +struct chip3_spi; + +struct chip3_spi { + struct spi_controller *master; + enum chip3_ssi_type type; + + void __iomem *regs; + unsigned long paddr; + int irq; + u32 fifo_len; /* depth of the FIFO buffer */ + u32 max_freq; /* max bus freq supported */ + + u32 reg_io_width; /* DR I/O width in bytes */ + u16 bus_num; + u16 num_cs; /* supported slave numbers */ + void (*set_cs)(struct spi_device *spi, bool enable); + + /* Current message transfer state info */ + size_t len; + void *tx; + unsigned int tx_len; + void *rx; + unsigned int rx_len; + u8 n_bytes; /* current is a 1/2 bytes op */ + u32 current_freq; /* frequency in hz */ + + u8 buf[MAX_LEN]; + + /* Bus interface info */ + void *priv; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs; +#endif +}; + +static inline u32 chip3_readl(struct chip3_spi *dws, u32 offset) +{ + return __raw_readl(dws->regs + offset); +} + +static inline u16 chip3_readw(struct chip3_spi *dws, u32 offset) +{ + return __raw_readw(dws->regs + offset); +} + +static inline void chip3_writel(struct chip3_spi *dws, u32 offset, u32 val) +{ + __raw_writel(val, dws->regs + offset); +} + +static inline void chip3_writew(struct chip3_spi *dws, u32 offset, u16 val) +{ + __raw_writew(val, dws->regs + offset); +} + +static inline u32 chip3_read_io_reg(struct chip3_spi *dws, u32 offset) +{ + switch (dws->reg_io_width) { + case 2: + return chip3_readw(dws, offset); + case 4: + default: + return chip3_readl(dws, offset); + } +} + +static inline void chip3_write_io_reg(struct chip3_spi *dws, u32 offset, u32 val) +{ + switch (dws->reg_io_width) { + case 2: + chip3_writew(dws, offset, val); + break; + case 4: + default: + chip3_writel(dws, offset, val); + break; + } +} + +static inline void spi_enable_chip(struct chip3_spi *dws, int enable) +{ + chip3_writel(dws, CHIP3_SPI_SSIENR, (enable ? 1 : 0)); +} + +static inline void spi_set_clk(struct chip3_spi *dws, u16 div) +{ + chip3_writel(dws, CHIP3_SPI_BAUDR, div); +} + +/* Disable IRQ bits */ +static inline void spi_mask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) & ~mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* Enable IRQ bits */ +static inline void spi_umask_intr(struct chip3_spi *dws, u32 mask) +{ + u32 new_mask; + + new_mask = chip3_readl(dws, CHIP3_SPI_IMR) | mask; + chip3_writel(dws, CHIP3_SPI_IMR, new_mask); +} + +/* + * This does disable the SPI controller, interrupts, and re-enable the + * controller back. Transmit and receive FIFO buffers are cleared when the + * device is disabled. + */ +static inline void spi_reset_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_mask_intr(dws, 0xff); + spi_enable_chip(dws, 1); +} + +static inline void spi_shutdown_chip(struct chip3_spi *dws) +{ + spi_enable_chip(dws, 0); + spi_set_clk(dws, 0); +} + +/* + * Each SPI slave device to work with chip3_api controller should + * has such a structure claiming its working mode (poll or PIO/DMA), + * which can be save in the "controller_data" member of the + * struct spi_device. + */ +struct chip3_spi_chip { + u8 poll_mode; /* 1 for controller polling mode */ + u8 type; /* SPI/SSP/MicroWire */ + u8 chip_select; + void (*cs_control)(u32 command); +}; + +extern int chip3_spi_add_host(struct device *dev, struct chip3_spi *dws); +extern void chip3_spi_remove_host(struct chip3_spi *dws); +extern int chip3_spi_suspend_host(struct chip3_spi *dws); +extern int chip3_spi_resume_host(struct chip3_spi *dws); + +/* platform related setup */ +extern int chip3_spi_mid_init(struct chip3_spi *dws); /* Intel MID platforms */ +#endif /* CHIP3_SPI_HEADER_H */ diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4ec99a55ac305a4829e894030226dacb404b0d2d..8ec4667c48d497d505a256494d573245a461b841 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -54,6 +54,9 @@ module_param_named(can_queue, tcm_loop_can_queue, uint, 0644); static unsigned int tcm_loop_cmd_per_lun = 1024; module_param_named(cmd_per_lun, tcm_loop_cmd_per_lun, uint, 0644); +static unsigned short tcm_loop_sg_tablesize = 256; +module_param_named(sg_tablesize, tcm_loop_sg_tablesize, ushort, 0644); + /* * Called from struct target_core_fabric_ops->check_stop_free() */ @@ -301,7 +304,6 @@ static const struct scsi_host_template tcm_loop_driver_template = { .eh_device_reset_handler = tcm_loop_device_reset, .eh_target_reset_handler = tcm_loop_target_reset, .this_id = -1, - .sg_tablesize = 256, .max_sectors = 0xFFFF, .dma_boundary = PAGE_SIZE - 1, .module = THIS_MODULE, @@ -339,6 +341,7 @@ static int tcm_loop_driver_probe(struct device *dev) sh->nr_hw_queues = tcm_loop_nr_hw_queues; sh->can_queue = tcm_loop_can_queue; sh->cmd_per_lun = tcm_loop_cmd_per_lun; + sh->sg_tablesize = tcm_loop_sg_tablesize; host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 22cc6cac0ba2b5704873aaa73bab75ea37cb235a..16e0b27d08ea20491a0b351edc10cc8cfc58b339 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include #include #include @@ -73,6 +75,7 @@ */ #define DATA_PAGES_PER_BLK_DEF 1 #define DATA_AREA_PAGES_DEF (256 * 1024) +#define ZC_DATA_AREA_PAGES_DEF (256 * 1024) #define TCMU_MBS_TO_PAGES(_mbs) ((size_t)_mbs << (20 - PAGE_SHIFT)) #define TCMU_PAGES_TO_MBS(_pages) (_pages >> (20 - PAGE_SHIFT)) @@ -123,6 +126,8 @@ struct tcmu_dev { #define TCMU_DEV_BIT_BLOCKED 2 #define TCMU_DEV_BIT_TMR_NOTIFY 3 #define TCMU_DEV_BIT_PLUGGED 4 +#define TCMU_DEV_BIT_READ_BYPASS_DATA_AREA 5 +#define TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA 6 unsigned long flags; struct uio_info uio_info; @@ -139,10 +144,12 @@ struct tcmu_dev { /* Must add data_off and mb_addr to get the address */ size_t data_off; int data_area_mb; + uint32_t zc_max_blocks; uint32_t max_blocks; size_t mmap_pages; struct mutex cmdr_lock; + struct rw_semaphore i_mmap_sem; struct list_head qfull_queue; struct list_head tmr_queue; @@ -153,6 +160,12 @@ struct tcmu_dev { uint32_t data_pages_per_blk; uint32_t data_blk_size; + uint32_t zc_dbi_max; + uint32_t zc_dbi_thresh; + unsigned long *zc_data_bitmap; + uint32_t read_zc_size; + uint32_t write_zc_size; + struct xarray commands; struct timer_list cmd_timer; @@ -178,6 +191,12 @@ struct tcmu_cmd { struct tcmu_dev *tcmu_dev; struct list_head queue_entry; + /* for zero_copy */ + struct mm_struct *vma_vm_mm; + struct vm_area_struct *vma; + struct iovec *iov; + int iov_cnt; + uint16_t cmd_id; /* Can't use se_cmd when cleaning up expired cmds, because if @@ -193,7 +212,11 @@ struct tcmu_cmd { #define TCMU_CMD_BIT_EXPIRED 0 #define TCMU_CMD_BIT_KEEP_BUF 1 +#define TCMU_CMD_BIT_ZEROCOPY 2 +#define TCMU_CMD_BIT_BYPASS_DATA_AREA 3 unsigned long flags; + + struct mutex cmd_lock; }; struct tcmu_tmr { @@ -497,10 +520,38 @@ static struct genl_family tcmu_genl_family __ro_after_init = { static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len) { struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + unsigned long *data_bitmap; uint32_t i; + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + data_bitmap = udev->zc_data_bitmap; + else + data_bitmap = udev->data_bitmap; + for (i = 0; i < len; i++) - clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap); + clear_bit(tcmu_cmd->dbi[i], data_bitmap); +} + +static inline int tcmu_get_zc_empty_block(struct tcmu_dev *udev, + struct tcmu_cmd *tcmu_cmd, + int prev_dbi, int *iov_cnt) +{ + int dbi; + + dbi = find_first_zero_bit(udev->zc_data_bitmap, udev->zc_dbi_thresh); + if (dbi == udev->zc_dbi_thresh) + return -1; + + if (dbi > udev->zc_dbi_max) + udev->zc_dbi_max = dbi; + + set_bit(dbi, udev->zc_data_bitmap); + tcmu_cmd_set_dbi(tcmu_cmd, dbi); + + if (dbi != prev_dbi + 1) + *iov_cnt += 1; + + return dbi; } static inline int tcmu_get_empty_block(struct tcmu_dev *udev, @@ -552,7 +603,8 @@ static inline int tcmu_get_empty_block(struct tcmu_dev *udev, } static int tcmu_get_empty_blocks(struct tcmu_dev *udev, - struct tcmu_cmd *tcmu_cmd, int length) + struct tcmu_cmd *tcmu_cmd, int length, + bool zero_copy) { /* start value of dbi + 1 must not be a valid dbi */ int dbi = -2; @@ -561,7 +613,10 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, for (; length > 0; length -= blk_size) { blk_data_len = min_t(uint32_t, length, blk_size); - dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, + if (zero_copy) + dbi = tcmu_get_zc_empty_block(udev, tcmu_cmd, dbi, &iov_cnt); + else + dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, blk_data_len, &iov_cnt); if (dbi < 0) return -1; @@ -569,8 +624,40 @@ static int tcmu_get_empty_blocks(struct tcmu_dev *udev, return iov_cnt; } +static void tcmu_cmd_zerocopy_unmap(struct tcmu_cmd *cmd) +{ + struct mm_struct *mm; + struct vm_area_struct *vma; + struct iovec *iov = cmd->iov; + unsigned long address; + int i; + + mm = cmd->vma_vm_mm; + vma = cmd->vma; + if (!mm) + return; + + if (mmget_not_zero(mm)) { + mmap_read_lock(mm); + for (i = 0; i < cmd->iov_cnt; i++) { + address = (unsigned long)iov->iov_base; + zap_page_range_single(vma, address, iov->iov_len, NULL); + iov++; + } + mmap_read_unlock(mm); + mmput(mm); + } + + cmd->vma_vm_mm = NULL; + cmd->vma = NULL; + mmdrop(mm); + kfree(cmd->iov); +} + static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + tcmu_cmd_zerocopy_unmap(tcmu_cmd); kfree(tcmu_cmd->dbi); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); } @@ -630,11 +717,67 @@ static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd, dbi = new_block_to_iov(udev, cmd, iov, dbi, data_length); } +static void tcmu_set_cmd_bypass_data_area(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + + /* + * Zero copy is map sg pages to userspace, and bypass data area + * is copy data between sg pages and userspace buffer, so they + * are completely different. + */ + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags)) + return; + + if (se_cmd->data_direction == DMA_FROM_DEVICE && + test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + + if (se_cmd->data_direction == DMA_TO_DEVICE && + test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + set_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); +} + +static void tcmu_set_cmd_do_zero_copy(struct tcmu_cmd *tcmu_cmd) +{ + struct tcmu_dev *udev = tcmu_cmd->tcmu_dev; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg = se_cmd->t_data_sg, *sg; + unsigned int data_nents = se_cmd->t_data_nents; + int i; + + if ((se_cmd->se_cmd_flags & SCF_BIDI) || !se_cmd->data_length || + !IS_ALIGNED(se_cmd->data_length, PAGE_SIZE)) + return; + + if ((se_cmd->data_direction == DMA_FROM_DEVICE) && + (!udev->read_zc_size || + se_cmd->data_length < (udev->read_zc_size << 10))) + return; + + if ((se_cmd->data_direction == DMA_TO_DEVICE) && + (!udev->write_zc_size || + se_cmd->data_length < (udev->write_zc_size << 10))) + return; + + /* Now, check every sg pages is aligned. */ + for_each_sg(data_sg, sg, data_nents, i) { + if ((sg->offset && !IS_ALIGNED(sg->offset, PAGE_SIZE)) || + !IS_ALIGNED(sg->length, PAGE_SIZE)) + break; + } + if (i == data_nents) + set_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); +} + static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; struct tcmu_dev *udev = TCMU_DEV(se_dev); struct tcmu_cmd *tcmu_cmd; + bool zero_copy; + bool bypass_data_area; tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_NOIO); if (!tcmu_cmd) @@ -643,13 +786,24 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd) INIT_LIST_HEAD(&tcmu_cmd->queue_entry); tcmu_cmd->se_cmd = se_cmd; tcmu_cmd->tcmu_dev = udev; - - tcmu_cmd_set_block_cnts(tcmu_cmd); - tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), - GFP_NOIO); - if (!tcmu_cmd->dbi) { - kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); - return NULL; + mutex_init(&tcmu_cmd->cmd_lock); + + tcmu_set_cmd_do_zero_copy(tcmu_cmd); + tcmu_set_cmd_bypass_data_area(tcmu_cmd); + + zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); + if (zero_copy || !bypass_data_area) { + tcmu_cmd_set_block_cnts(tcmu_cmd); + tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t), + GFP_NOIO); + if (!tcmu_cmd->dbi) { + kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); + return NULL; + } + } else { + tcmu_cmd->dbi_cnt = 0; + tcmu_cmd->dbi = NULL; } return tcmu_cmd; @@ -852,37 +1006,51 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size) * Called with ring lock held. */ static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd, - int *iov_bidi_cnt) + int *iov_bidi_cnt, bool zero_copy) { int space, iov_cnt = 0, ret = 0; + unsigned long *data_bitmap; + uint32_t *dbi_thresh, max_blocks; - if (!cmd->dbi_cnt) + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) goto wr_iov_cnts; + if (zero_copy) { + data_bitmap = udev->zc_data_bitmap; + dbi_thresh = &udev->zc_dbi_thresh; + max_blocks = udev->zc_max_blocks; + } else { + data_bitmap = udev->data_bitmap; + dbi_thresh = &udev->dbi_thresh; + max_blocks = udev->max_blocks; + } + /* try to check and get the data blocks as needed */ - space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh); + space = spc_bitmap_free(data_bitmap, *dbi_thresh); if (space < cmd->dbi_cnt) { - unsigned long blocks_left = - (udev->max_blocks - udev->dbi_thresh) + space; + unsigned long blocks_left = max_blocks - *dbi_thresh + space; if (blocks_left < cmd->dbi_cnt) { - pr_debug("no data space: only %lu available, but ask for %u\n", + pr_debug("no data space[%s]: only %lu available, but ask for %u\n", ++ zero_copy ? "zero copy" : "non zero copy", blocks_left * udev->data_blk_size, cmd->dbi_cnt * udev->data_blk_size); return -1; } - udev->dbi_thresh += cmd->dbi_cnt; - if (udev->dbi_thresh > udev->max_blocks) - udev->dbi_thresh = udev->max_blocks; + *dbi_thresh += cmd->dbi_cnt; + if (*dbi_thresh > max_blocks) + *dbi_thresh = max_blocks; } - iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length); + iov_cnt = tcmu_get_empty_blocks(udev, cmd, cmd->se_cmd->data_length, + zero_copy); if (iov_cnt < 0) return -1; if (cmd->dbi_bidi_cnt) { - ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi); + ret = tcmu_get_empty_blocks(udev, cmd, cmd->data_len_bidi, + zero_copy); if (ret < 0) return -1; } @@ -1023,6 +1191,8 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) uint32_t blk_size = udev->data_blk_size; /* size of data buffer needed */ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * blk_size; + bool zero_copy = test_bit(TCMU_CMD_BIT_ZEROCOPY, &tcmu_cmd->flags); + bool bypass_data_area = test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags); *scsi_err = TCM_NO_SENSE; @@ -1046,7 +1216,7 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) return -1; } - iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt); + iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt, zero_copy); if (iov_cnt < 0) goto free_and_queue; @@ -1095,16 +1265,18 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_cmd_reset_dbi_cur(tcmu_cmd); iov = &entry->req.iov[0]; - if (se_cmd->data_direction == DMA_TO_DEVICE || - se_cmd->se_cmd_flags & SCF_BIDI) - scatter_data_area(udev, tcmu_cmd, &iov); - else - tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + if (zero_copy || !bypass_data_area) { + if (((se_cmd->data_direction == DMA_TO_DEVICE) && !zero_copy) || + se_cmd->se_cmd_flags & SCF_BIDI) + scatter_data_area(udev, tcmu_cmd, &iov); + else + tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length); + } entry->req.iov_cnt = iov_cnt - iov_bidi_cnt; /* Handle BIDI commands */ - if (se_cmd->se_cmd_flags & SCF_BIDI) { + if ((se_cmd->se_cmd_flags & SCF_BIDI) && !bypass_data_area) { iov++; tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi); entry->req.iov_bidi_cnt = iov_bidi_cnt; @@ -1113,6 +1285,21 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err) tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer); entry->hdr.cmd_id = tcmu_cmd->cmd_id; + if (zero_copy) { + int i; + struct iovec *tiov; + + tiov = &entry->req.iov[0]; + for (i = 0; i < entry->req.iov_cnt; i++) { + tiov->iov_base = tiov->iov_base + + (TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT); + tiov++; + } + entry->hdr.kflags |= TCMU_KFLAG_ZERO_COPY; + } + + if (bypass_data_area) + entry->hdr.kflags |= TCMU_KFLAG_BYPASS_DATA_AREA; tcmu_hdr_set_len(&entry->hdr.len_op, command_size); @@ -1368,6 +1555,15 @@ static bool tcmu_handle_completion(struct tcmu_cmd *cmd, else se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL; } + + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) { + tcmu_cmd_zerocopy_unmap(cmd); + goto done; + } + + if (test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &cmd->flags)) + goto done; + if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ gather_data_area(udev, cmd, true, read_len); @@ -1522,11 +1718,15 @@ static void tcmu_check_expired_ring_cmd(struct tcmu_cmd *cmd) if (!time_after_eq(jiffies, cmd->deadline)) return; + mutex_lock(&cmd->cmd_lock); + if (test_bit(TCMU_CMD_BIT_ZEROCOPY, &cmd->flags)) + tcmu_cmd_zerocopy_unmap(cmd); set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags); list_del_init(&cmd->queue_entry); se_cmd = cmd->se_cmd; se_cmd->priv = NULL; cmd->se_cmd = NULL; + mutex_unlock(&cmd->cmd_lock); pr_debug("Timing out inflight cmd %u on dev %s.\n", cmd->cmd_id, cmd->tcmu_dev->name); @@ -1619,10 +1819,14 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) udev->data_pages_per_blk = DATA_PAGES_PER_BLK_DEF; udev->max_blocks = DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; + udev->zc_max_blocks = ZC_DATA_AREA_PAGES_DEF / udev->data_pages_per_blk; udev->cmdr_size = CMDR_SIZE_DEF; udev->data_area_mb = TCMU_PAGES_TO_MBS(DATA_AREA_PAGES_DEF); mutex_init(&udev->cmdr_lock); + udev->read_zc_size = 0; + udev->write_zc_size = 0; + init_rwsem(&udev->i_mmap_sem); INIT_LIST_HEAD(&udev->node); INIT_LIST_HEAD(&udev->timedout_entry); @@ -1740,6 +1944,7 @@ static void tcmu_dev_kref_release(struct kref *kref) tcmu_blocks_release(udev, 0, udev->dbi_max); bitmap_free(udev->data_bitmap); + bitmap_free(udev->zc_data_bitmap); mutex_unlock(&udev->cmdr_lock); pr_debug("dev_kref_release\n"); @@ -1840,12 +2045,12 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) { struct page *page; - mutex_lock(&udev->cmdr_lock); + down_read(&udev->i_mmap_sem); page = xa_load(&udev->data_pages, dpi); if (likely(page)) { get_page(page); lock_page(page); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return page; } @@ -1855,7 +2060,7 @@ static struct page *tcmu_try_get_data_page(struct tcmu_dev *udev, uint32_t dpi) */ pr_err("Invalid addr to data page mapping (dpi %u) on device %s\n", dpi, udev->name); - mutex_unlock(&udev->cmdr_lock); + up_read(&udev->i_mmap_sem); return NULL; } @@ -1928,7 +2133,7 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); - vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); + vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP); vma->vm_ops = &tcmu_vm_ops; vma->vm_private_data = udev; @@ -1942,6 +2147,109 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma) return 0; } +#define TCMU_ZEROCOPY_PAGE_BATCH 32 + +static inline int tcmu_zerocopy_one_seg(struct iovec *iov, + struct vm_area_struct *vma, + struct sg_page_iter *sgiter) +{ + struct page *pages[TCMU_ZEROCOPY_PAGE_BATCH]; + unsigned int len = iov->iov_len; + unsigned long address = (unsigned long)iov->iov_base; + unsigned long pages_remaining, pg_index = 0; + struct page *page; + int ret; + + while (len > 0) { + __sg_page_iter_next(sgiter); + page = sg_page_iter_page(sgiter); + pages[pg_index++] = page; + len -= PAGE_SIZE; + if (pg_index == TCMU_ZEROCOPY_PAGE_BATCH || !len) { + pages_remaining = pg_index; + ret = vm_insert_pages_mkspecial(vma, address, pages, + &pages_remaining); + if (ret < 0) { + pr_err("vm insert pages failed, error code: %d\n", ret); + return ret; + } + address = address + pg_index * PAGE_SIZE; + pg_index = 0; + } + } + + return 0; +} + +long tcmu_ioctl_cmd_zerocopy(struct tcmu_dev *udev, unsigned long arg) +{ + struct tcmu_cmd *cmd; + struct se_cmd *se_cmd; + struct scatterlist *data_sg; + unsigned int data_nents; + struct tcmu_cmd_zerocopy zc; + struct iovec *iov, *tiov; + struct sg_page_iter sgiter; + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int i, ret = 0; + + if (copy_from_user(&zc, (struct tcmu_cmd_zerocopy __user *)arg, sizeof(zc))) + return -EFAULT; + + if (zc.iov_cnt <= 0) + return -EINVAL; + + iov = kmalloc_array(zc.iov_cnt, sizeof(struct iovec), GFP_KERNEL); + if (!iov) + return -ENOMEM; + if (copy_from_user(iov, zc.iov, sizeof(struct iovec) * zc.iov_cnt)) { + kfree(iov); + return -EFAULT; + } + + mutex_lock(&udev->cmdr_lock); + mmap_read_lock(mm); + cmd = xa_load(&udev->commands, zc.cmd_id); + if (!cmd) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: cmd_id %d not found\n", zc.cmd_id); + goto out; + } + se_cmd = cmd->se_cmd; + + vma = find_vma(current->mm, (unsigned long)iov->iov_base); + if (!vma) { + ret = -EINVAL; + kfree(iov); + pr_err("tcmu zero copy: invalid iov_base\n"); + goto out; + } + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + __sg_page_iter_start(&sgiter, data_sg, data_nents, 0); + tiov = iov; + for (i = 0; i < zc.iov_cnt; i++) { + ret = tcmu_zerocopy_one_seg(tiov, vma, &sgiter); + if (ret < 0) { + kfree(iov); + goto out; + } + tiov++; + } + + cmd->iov = iov; + cmd->iov_cnt = zc.iov_cnt; + cmd->vma_vm_mm = vma->vm_mm; + cmd->vma = vma; + mmgrab(cmd->vma_vm_mm); +out: + mmap_read_unlock(mm); + mutex_unlock(&udev->cmdr_lock); + return ret; +} + static int tcmu_open(struct uio_info *info, struct inode *inode) { struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); @@ -2000,6 +2308,108 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) return 0; } +static long tcmu_do_copy_data(struct tcmu_cmd *tcmu_cmd, + struct iovec __user *uiovec, + unsigned int vcnt, + bool is_copy_to_sgl) +{ + struct iovec iovstack[UIO_FASTIOV]; + struct iovec *iov = iovstack; + struct iov_iter iter; + ssize_t ret; + struct se_cmd *se_cmd = tcmu_cmd->se_cmd; + struct scatterlist *data_sg, *sg; + int i; + unsigned int data_nents; + + if (se_cmd->se_cmd_flags & SCF_BIDI) { + data_sg = se_cmd->t_bidi_data_sg; + data_nents = se_cmd->t_bidi_data_nents; + } else { + data_sg = se_cmd->t_data_sg; + data_nents = se_cmd->t_data_nents; + } + + ret = import_iovec(is_copy_to_sgl ? ITER_SOURCE : ITER_DEST, + uiovec, vcnt, ARRAY_SIZE(iovstack), &iov, &iter); + if (ret < 0) { + pr_err("import iovec failed.\n"); + return -EFAULT; + } + + for_each_sg(data_sg, sg, data_nents, i) { + if (is_copy_to_sgl) + ret = copy_page_from_iter(sg_page(sg), sg->offset, sg->length, &iter); + else + ret = copy_page_to_iter(sg_page(sg), sg->offset, sg->length, &iter); + if (ret < 0) { + pr_err("copy failed.\n"); + break; + } + } + kfree(iov); + return ret < 0 ? -EFAULT : 0; +} + +static long tcmu_bypass_data_area_copy_data(struct tcmu_dev *udev, + unsigned long arg, + bool is_copy_to_sgl) +{ + struct tcmu_data_xfer __user *uxfer = (struct tcmu_data_xfer __user *)arg; + struct tcmu_data_xfer xfer; + struct tcmu_cmd *tcmu_cmd; + long ret; + + if (copy_from_user(&xfer, uxfer, sizeof(xfer))) + return -EFAULT; + + tcmu_cmd = xa_load(&udev->commands, xfer.cmd_id); + if (!tcmu_cmd) { + pr_err("Can not find tcmu command, cmd_id:%d\n", xfer.cmd_id); + set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags); + return -EFAULT; + } + + mutex_lock(&tcmu_cmd->cmd_lock); + if (!test_bit(TCMU_CMD_BIT_BYPASS_DATA_AREA, &tcmu_cmd->flags)) { + ret = -EINVAL; + goto out; + } + + if (test_bit(TCMU_CMD_BIT_EXPIRED, &tcmu_cmd->flags)) { + pr_err("Command is expired, cmd_id:%d\n", xfer.cmd_id); + ret = -EFAULT; + goto out; + } + + ret = tcmu_do_copy_data(tcmu_cmd, xfer.iovec, + xfer.iov_cnt, is_copy_to_sgl); +out: + mutex_unlock(&tcmu_cmd->cmd_lock); + return ret; +} + +static long tcmu_ioctl(struct uio_info *info, unsigned int cmd, unsigned long arg) +{ + struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info); + long ret; + + switch (cmd) { + case TCMU_IOCTL_CMD_COPY_TO_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, true); + break; + case TCMU_IOCTL_CMD_COPY_FROM_SGL: + ret = tcmu_bypass_data_area_copy_data(udev, arg, false); + break; + case TCMU_IOCTL_CMD_ZEROCOPY: + ret = tcmu_ioctl_cmd_zerocopy(udev, arg); + break; + default: + ret = -EINVAL; + } + return ret; +} + static int tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) { struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; @@ -2200,6 +2610,7 @@ static int tcmu_configure_device(struct se_device *dev) struct uio_info *info; struct tcmu_mailbox *mb; size_t data_size; + size_t zc_data_size; int ret = 0; ret = tcmu_update_uio_info(udev); @@ -2210,10 +2621,11 @@ static int tcmu_configure_device(struct se_device *dev) mutex_lock(&udev->cmdr_lock); udev->data_bitmap = bitmap_zalloc(udev->max_blocks, GFP_KERNEL); + udev->zc_data_bitmap = bitmap_zalloc(udev->zc_max_blocks, GFP_KERNEL); mutex_unlock(&udev->cmdr_lock); - if (!udev->data_bitmap) { + if (!udev->data_bitmap || !udev->zc_data_bitmap) { ret = -ENOMEM; - goto err_bitmap_alloc; + goto err_vzalloc; } mb = vzalloc(udev->cmdr_size + CMDR_OFF); @@ -2227,9 +2639,12 @@ static int tcmu_configure_device(struct se_device *dev) udev->cmdr = (void *)mb + CMDR_OFF; udev->data_off = udev->cmdr_size + CMDR_OFF; data_size = TCMU_MBS_TO_PAGES(udev->data_area_mb) << PAGE_SHIFT; - udev->mmap_pages = (data_size + udev->cmdr_size + CMDR_OFF) >> PAGE_SHIFT; + zc_data_size = (udev->zc_max_blocks * udev->data_pages_per_blk) << PAGE_SHIFT; + udev->mmap_pages = (data_size + zc_data_size + udev->cmdr_size + + CMDR_OFF) >> PAGE_SHIFT; udev->data_blk_size = udev->data_pages_per_blk * PAGE_SIZE; udev->dbi_thresh = 0; /* Default in Idle state */ + udev->zc_dbi_thresh = 0; /* Default in Idle state */ /* Initialise the mailbox of the ring buffer */ mb->version = TCMU_MAILBOX_VERSION; @@ -2247,7 +2662,8 @@ static int tcmu_configure_device(struct se_device *dev) info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr; - info->mem[0].size = data_size + udev->cmdr_size + CMDR_OFF; + info->mem[0].size = data_size + zc_data_size + + udev->cmdr_size + CMDR_OFF; info->mem[0].memtype = UIO_MEM_NONE; info->irqcontrol = tcmu_irqcontrol; @@ -2256,6 +2672,7 @@ static int tcmu_configure_device(struct se_device *dev) info->mmap = tcmu_mmap; info->open = tcmu_open; info->release = tcmu_release; + info->ioctl = tcmu_ioctl; ret = uio_register_device(tcmu_root_device, info); if (ret) @@ -2302,7 +2719,8 @@ static int tcmu_configure_device(struct se_device *dev) err_vzalloc: bitmap_free(udev->data_bitmap); udev->data_bitmap = NULL; -err_bitmap_alloc: + kfree(udev->zc_data_bitmap); + udev->zc_data_bitmap = NULL; kfree(info->name); info->name = NULL; @@ -3137,6 +3555,130 @@ static ssize_t tcmu_free_kept_buf_store(struct config_item *item, const char *pa } CONFIGFS_ATTR_WO(tcmu_, free_kept_buf); +static ssize_t tcmu_read_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_read_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_READ_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, read_bypass_data_area); + +static ssize_t tcmu_write_bypass_data_area_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + if (test_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags)) + return snprintf(page, PAGE_SIZE, "%s\n", "true"); + else + return snprintf(page, PAGE_SIZE, "%s\n", "false"); +} + +static ssize_t tcmu_write_bypass_data_area_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + bool bypass_data_area; + int ret; + + ret = strtobool(page, &bypass_data_area); + if (ret < 0) + return ret; + + if (bypass_data_area) + set_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + else + clear_bit(TCMU_DEV_BIT_WRITE_BYPASS_DATA_AREA, &udev->flags); + + return count; +} +CONFIGFS_ATTR(tcmu_, write_bypass_data_area); + +static ssize_t tcmu_read_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->read_zc_size); +} + +static ssize_t tcmu_read_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t read_zc_size; + int ret; + + ret = kstrtou32(page, 0, &read_zc_size); + if (ret < 0) + return ret; + + udev->read_zc_size = read_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, read_zc_size); + +static ssize_t tcmu_write_zc_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%ukb\n", udev->write_zc_size); +} + +static ssize_t tcmu_write_zc_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + uint32_t write_zc_size; + int ret; + + ret = kstrtou32(page, 0, &write_zc_size); + if (ret < 0) + return ret; + + udev->write_zc_size = write_zc_size; + + return count; +} +CONFIGFS_ATTR(tcmu_, write_zc_size); + static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_cmd_time_out, &tcmu_attr_qfull_time_out, @@ -3148,6 +3690,10 @@ static struct configfs_attribute *tcmu_attrib_attrs[] = { &tcmu_attr_emulate_write_cache, &tcmu_attr_tmr_notification, &tcmu_attr_nl_reply_supported, + &tcmu_attr_read_bypass_data_area, + &tcmu_attr_write_bypass_data_area, + &tcmu_attr_read_zc_size, + &tcmu_attr_write_zc_size, NULL, }; @@ -3212,6 +3758,7 @@ static void find_free_blocks(void) continue; } + down_write(&udev->i_mmap_sem); end = udev->dbi_max + 1; block = find_last_bit(udev->data_bitmap, end); if (block == udev->dbi_max) { @@ -3219,6 +3766,7 @@ static void find_free_blocks(void) * The last bit is dbi_max, so it is not possible * reclaim any blocks. */ + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); continue; } else if (block == end) { @@ -3246,6 +3794,7 @@ static void find_free_blocks(void) off = udev->data_off + (loff_t)start * udev->data_blk_size; unmap_mapping_range(udev->inode->i_mapping, off, 0, 1); + up_write(&udev->i_mmap_sem); mutex_unlock(&udev->cmdr_lock); total_pages_freed += pages_freed; diff --git a/drivers/tty/serial/8250/8250_sunway.c b/drivers/tty/serial/8250/8250_sunway.c new file mode 100644 index 0000000000000000000000000000000000000000..9e3db232c8325528c24d76be8138d5e83bb555ae --- /dev/null +++ b/drivers/tty/serial/8250/8250_sunway.c @@ -0,0 +1,786 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Synopsys SUNWAY 8250 driver. + * + * Copyright 2011 Picochip, Jamie Iles. + * Copyright 2013 Intel Corporation + * + * The Synopsys SUNWAY 8250 has an extra feature whereby it detects if the + * LCR is written whilst busy. If it is, then a busy detect interrupt is + * raised, the LCR needs to be rewritten and the uart status register read. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "8250.h" + +/* Offsets for the DesignWare specific registers */ +#define SUNWAY_UART_USR 0x1f /* UART Status Register */ +#define SUNWAY_UART_DLF 0xc0 /* Divisor Latch Fraction Register */ +#define SUNWAY_UART_CPR 0xf4 /* Component Parameter Register */ +#define SUNWAY_UART_UCV 0xf8 /* UART Component Version */ + +/* Component Parameter Register bits */ +#define SUNWAY_UART_CPR_ABP_DATA_WIDTH (3 << 0) +#define SUNWAY_UART_CPR_AFCE_MODE (1 << 4) +#define SUNWAY_UART_CPR_THRE_MODE (1 << 5) +#define SUNWAY_UART_CPR_SIR_MODE (1 << 6) +#define SUNWAY_UART_CPR_SIR_LP_MODE (1 << 7) +#define SUNWAY_UART_CPR_ADDITIONAL_FEATURES (1 << 8) +#define SUNWAY_UART_CPR_FIFO_ACCESS (1 << 9) +#define SUNWAY_UART_CPR_FIFO_STAT (1 << 10) +#define SUNWAY_UART_CPR_SHADOW (1 << 11) +#define SUNWAY_UART_CPR_ENCODED_PARMS (1 << 12) +#define SUNWAY_UART_CPR_DMA_EXTRA (1 << 13) +#define SUNWAY_UART_CPR_FIFO_MODE (0xff << 16) +/* Helper for fifo size calculation */ +#define SUNWAY_UART_CPR_FIFO_SIZE(a) (((a >> 16) & 0xff) * 16) + +/* DesignWare specific register fields */ +#define SUNWAY_UART_MCR_SIRE BIT(6) + +struct sunway8250_data { + u8 usr_reg; + u8 dlf_size; + int line; + int msr_mask_on; + int msr_mask_off; + struct clk *clk; + struct clk *pclk; + struct reset_control *rst; + struct uart_8250_dma dma; + + unsigned int skip_autocfg:1; + unsigned int uart_16550_compatible:1; +}; + +static inline u32 sunway8250_readl_ext(struct uart_port *p, int offset) +{ + if (p->iotype == UPIO_MEM32BE) + return ioread32be(p->membase + offset); + return readl(p->membase + offset); +} + +static inline void sunway8250_writel_ext(struct uart_port *p, int offset, u32 reg) +{ + if (p->iotype == UPIO_MEM32BE) + iowrite32be(reg, p->membase + offset); + else + writel(reg, p->membase + offset); +} + +static inline int sunway8250_modify_msr(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Override any modem control signals if needed */ + if (offset == UART_MSR) { + value |= d->msr_mask_on; + value &= ~d->msr_mask_off; + } + + return value; +} + +static void sunway8250_force_idle(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + + serial8250_clear_and_reinit_fifos(up); + (void)p->serial_in(p, UART_RX); +} + +static void sunway8250_check_lcr(struct uart_port *p, int value) +{ + void __iomem *offset = p->membase + (UART_LCR << p->regshift); + int tries = 1000; + + /* Make sure LCR write wasn't ignored */ + while (tries--) { + unsigned int lcr = p->serial_in(p, UART_LCR); + + if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR)) + return; + + sunway8250_force_idle(p); + +#ifdef CONFIG_64BIT + if (p->type == PORT_OCTEON) { + __raw_writeq(value & 0xff, offset); + continue; + } +#endif + if (p->iotype == UPIO_MEM32) + writel(value, offset); + else if (p->iotype == UPIO_MEM32BE) + iowrite32be(value, offset); + else + writeb(value, offset); + } + /* + * FIXME: this deadlocks if port->lock is already held + * dev_err(p->dev, "Couldn't set LCR to %d\n", value); + */ +} + +/* Returns once the transmitter is empty or we run out of retries */ +static void sunway8250_tx_wait_empty(struct uart_port *p) +{ + unsigned int tries = 20000; + unsigned int delay_threshold = tries - 1000; + unsigned int lsr; + + while (tries--) { + lsr = readb(p->membase + (UART_LSR << p->regshift)); + if (lsr & UART_LSR_TEMT) + break; + + /* + * The device is first given a chance to empty without delay, + * to avoid slowdowns at high bitrates. If after 1000 tries + * the buffer has still not emptied, allow more time for low- + * speed links. + */ + if (tries < delay_threshold) + udelay(1); + } +} + +static void sunway8250_serial_out38x(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + /* Allow the TX to drain before we reconfigure */ + if (offset == UART_LCR) + sunway8250_tx_wait_empty(p); + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + + +static void sunway8250_serial_out(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writeb(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in(struct uart_port *p, int offset) +{ + unsigned int value = readb(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +#ifdef CONFIG_64BIT +static unsigned int sunway8250_serial_inq(struct uart_port *p, int offset) +{ + unsigned int value; + + value = (u8)__raw_readq(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_outq(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + value &= 0xff; + __raw_writeq(value, p->membase + (offset << p->regshift)); + /* Read back to ensure register write ordering. */ + __raw_readq(p->membase + (UART_LCR << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} +#endif /* CONFIG_64BIT */ + +static void sunway8250_serial_out32(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + writel(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32(struct uart_port *p, int offset) +{ + unsigned int value = readl(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + +static void sunway8250_serial_out32be(struct uart_port *p, int offset, int value) +{ + struct sunway8250_data *d = p->private_data; + + iowrite32be(value, p->membase + (offset << p->regshift)); + + if (offset == UART_LCR && !d->uart_16550_compatible) + sunway8250_check_lcr(p, value); +} + +static unsigned int sunway8250_serial_in32be(struct uart_port *p, int offset) +{ + unsigned int value = ioread32be(p->membase + (offset << p->regshift)); + + return sunway8250_modify_msr(p, offset, value); +} + + +static int sunway8250_handle_irq(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + struct sunway8250_data *d = p->private_data; + unsigned int iir = p->serial_in(p, UART_IIR); + unsigned int status; + unsigned long flags; + + /* + * There are ways to get Designware-based UARTs into a state where + * they are asserting UART_IIR_RX_TIMEOUT but there is no actual + * data available. If we see such a case then we'll do a bogus + * read. If we don't do this then the "RX TIMEOUT" interrupt will + * fire forever. + * + * This problem has only been observed so far when not in DMA mode + * so we limit the workaround only to non-DMA mode. + */ + if (!up->dma && ((iir & 0x3f) == UART_IIR_RX_TIMEOUT)) { + spin_lock_irqsave(&p->lock, flags); + status = p->serial_in(p, UART_LSR); + + if (!(status & (UART_LSR_DR | UART_LSR_BI))) + (void) p->serial_in(p, UART_RX); + + spin_unlock_irqrestore(&p->lock, flags); + } + + if (serial8250_handle_irq(p, iir)) + return 1; + + if ((iir & UART_IIR_BUSY) == UART_IIR_BUSY) { + /* Clear the USR */ + (void)p->serial_in(p, d->usr_reg); + + return 1; + } + + return 0; +} + +static void +sunway8250_do_pm(struct uart_port *port, unsigned int state, unsigned int old) +{ + if (!state) + pm_runtime_get_sync(port->dev); + + serial8250_do_pm(port, state, old); + + if (state) + pm_runtime_put_sync_suspend(port->dev); +} + +static void sunway8250_set_termios(struct uart_port *p, struct ktermios *termios, + const struct ktermios *old) +{ + unsigned int baud = tty_termios_baud_rate(termios); + struct sunway8250_data *d = p->private_data; + long rate; + int ret; + + if (IS_ERR(d->clk)) + goto out; + + clk_disable_unprepare(d->clk); + rate = clk_round_rate(d->clk, baud * 16); + if (rate < 0) + ret = rate; + else if (rate == 0) + ret = -ENOENT; + else + ret = clk_set_rate(d->clk, rate); + clk_prepare_enable(d->clk); + + if (!ret) + p->uartclk = rate; + +out: + p->status &= ~UPSTAT_AUTOCTS; + if (termios->c_cflag & CRTSCTS) + p->status |= UPSTAT_AUTOCTS; + + serial8250_do_set_termios(p, termios, old); +} + +static void sunway8250_set_ldisc(struct uart_port *p, struct ktermios *termios) +{ + struct uart_8250_port *up = up_to_u8250p(p); + unsigned int mcr = p->serial_in(p, UART_MCR); + + if (up->capabilities & UART_CAP_IRDA) { + if (termios->c_line == N_IRDA) + mcr |= SUNWAY_UART_MCR_SIRE; + else + mcr &= ~SUNWAY_UART_MCR_SIRE; + + p->serial_out(p, UART_MCR, mcr); + } + serial8250_do_set_ldisc(p, termios); +} + +/* + * sunway8250_fallback_dma_filter will prevent the UART from getting just any free + * channel on platforms that have DMA engines, but don't have any channels + * assigned to the UART. + * + * REVISIT: This is a work around for limitation in the DMA Engine API. Once the + * core problem is fixed, this function is no longer needed. + */ +static bool sunway8250_fallback_dma_filter(struct dma_chan *chan, void *param) +{ + return false; +} + +static bool sunway8250_idma_filter(struct dma_chan *chan, void *param) +{ + return param == chan->device->dev; +} + +/* + * divisor = div(I) + div(F) + * "I" means integer, "F" means fractional + * quot = div(I) = clk / (16 * baud) + * frac = div(F) * 2^dlf_size + * + * let rem = clk % (16 * baud) + * we have: div(F) * (16 * baud) = rem + * so frac = 2^dlf_size * rem / (16 * baud) = (rem << dlf_size) / (16 * baud) + */ +static unsigned int sunway8250_get_divisor(struct uart_port *p, + unsigned int baud, + unsigned int *frac) +{ + unsigned int quot, rem, base_baud = baud * 16; + struct sunway8250_data *d = p->private_data; + + quot = p->uartclk / base_baud; + rem = p->uartclk % base_baud; + *frac = DIV_ROUND_CLOSEST(rem << d->dlf_size, base_baud); + + return quot; +} + +static void sunway8250_set_divisor(struct uart_port *p, unsigned int baud, + unsigned int quot, unsigned int quot_frac) +{ + sunway8250_writel_ext(p, SUNWAY_UART_DLF, quot_frac); + serial8250_do_set_divisor(p, baud, quot, quot_frac); +} + +static void sunway8250_quirks(struct uart_port *p, struct sunway8250_data *data) +{ + if (p->dev->of_node) { + struct device_node *np = p->dev->of_node; + int id; + + /* get index of serial line, if found in DT aliases */ + id = of_alias_get_id(np, "serial"); + if (id >= 0) + p->line = id; +#ifdef CONFIG_64BIT + if (of_device_is_compatible(np, "cavium,octeon-3860-uart")) { + p->serial_in = sunway8250_serial_inq; + p->serial_out = sunway8250_serial_outq; + p->flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_TYPE; + p->type = PORT_OCTEON; + data->usr_reg = 0x27; + data->skip_autocfg = true; + } +#endif + if (of_device_is_big_endian(p->dev->of_node)) { + p->iotype = UPIO_MEM32BE; + p->serial_in = sunway8250_serial_in32be; + p->serial_out = sunway8250_serial_out32be; + } + if (of_device_is_compatible(np, "marvell,armada-38x-uart")) + p->serial_out = sunway8250_serial_out38x; + + } else if (acpi_dev_present("APMC0D08", NULL, -1)) { + p->iotype = UPIO_MEM32; + p->regshift = 2; + p->serial_in = sunway8250_serial_in32; + data->uart_16550_compatible = true; + } + + /* Platforms with iDMA 64-bit */ + if (platform_get_resource_byname(to_platform_device(p->dev), + IORESOURCE_MEM, "lpss_priv")) { + data->dma.rx_param = p->dev->parent; + data->dma.tx_param = p->dev->parent; + data->dma.fn = sunway8250_idma_filter; + } +} + +static void sunway8250_setup_port(struct uart_port *p) +{ + struct uart_8250_port *up = up_to_u8250p(p); + u32 reg; + + /* + * If the Component Version Register returns zero, we know that + * ADDITIONAL_FEATURES are not enabled. No need to go any further. + */ + reg = sunway8250_readl_ext(p, SUNWAY_UART_UCV); + if (!reg) + return; + + dev_dbg(p->dev, "Designware UART version %c.%c%c\n", + (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + + sunway8250_writel_ext(p, SUNWAY_UART_DLF, ~0U); + reg = sunway8250_readl_ext(p, SUNWAY_UART_DLF); + sunway8250_writel_ext(p, SUNWAY_UART_DLF, 0); + + if (reg) { + struct sunway8250_data *d = p->private_data; + + d->dlf_size = fls(reg); + p->get_divisor = sunway8250_get_divisor; + p->set_divisor = sunway8250_set_divisor; + } + + reg = sunway8250_readl_ext(p, SUNWAY_UART_CPR); + if (!reg) + return; + + /* Select the type based on fifo */ + if (reg & SUNWAY_UART_CPR_FIFO_MODE) { + p->type = PORT_16550A; + p->flags |= UPF_FIXED_TYPE; + p->fifosize = SUNWAY_UART_CPR_FIFO_SIZE(reg); + up->capabilities = UART_CAP_FIFO; + } + + if (reg & SUNWAY_UART_CPR_AFCE_MODE) + up->capabilities |= UART_CAP_AFE; + + if (reg & SUNWAY_UART_CPR_SIR_MODE) + up->capabilities |= UART_CAP_IRDA; +} + +static int sunway8250_probe(struct platform_device *pdev) +{ + struct uart_8250_port uart = {}; + struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + int irq = platform_get_irq(pdev, 0); + struct uart_port *p = &uart.port; + struct device *dev = &pdev->dev; + struct sunway8250_data *data; + int err; + u32 val; + + if (!regs) { + dev_err(dev, "no registers defined\n"); + return -EINVAL; + } + + if (irq < 0) { + if (irq != -EPROBE_DEFER) + dev_err(dev, "cannot get irq\n"); + irq = 0; // Set serial poll mode + } + + spin_lock_init(&p->lock); + p->mapbase = regs->start; + p->irq = irq; + p->handle_irq = sunway8250_handle_irq; + p->pm = sunway8250_do_pm; + p->type = PORT_8250; + p->flags = UPF_SHARE_IRQ | UPF_FIXED_PORT; + p->dev = dev; + p->iotype = UPIO_MEM; + p->serial_in = sunway8250_serial_in; + p->serial_out = sunway8250_serial_out; + p->set_ldisc = sunway8250_set_ldisc; + p->set_termios = sunway8250_set_termios; + + p->membase = devm_ioremap(dev, regs->start, resource_size(regs)); + if (!p->membase) + return -ENOMEM; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dma.fn = sunway8250_fallback_dma_filter; + data->usr_reg = SUNWAY_UART_USR; + p->private_data = data; + + data->uart_16550_compatible = device_property_read_bool(dev, + "snps,uart-16550-compatible"); + + err = device_property_read_u32(dev, "reg-shift", &val); + if (!err) + p->regshift = val; + + err = device_property_read_u32(dev, "reg-io-width", &val); + if (!err && val == 4) { + p->iotype = UPIO_MEM32; + p->serial_in = sunway8250_serial_in32; + p->serial_out = sunway8250_serial_out32; + } + + if (device_property_read_bool(dev, "dcd-override")) { + /* Always report DCD as active */ + data->msr_mask_on |= UART_MSR_DCD; + data->msr_mask_off |= UART_MSR_DDCD; + } + + if (device_property_read_bool(dev, "dsr-override")) { + /* Always report DSR as active */ + data->msr_mask_on |= UART_MSR_DSR; + data->msr_mask_off |= UART_MSR_DDSR; + } + + if (device_property_read_bool(dev, "cts-override")) { + /* Always report CTS as active */ + data->msr_mask_on |= UART_MSR_CTS; + data->msr_mask_off |= UART_MSR_DCTS; + } + + if (device_property_read_bool(dev, "ri-override")) { + /* Always report Ring indicator as inactive */ + data->msr_mask_off |= UART_MSR_RI; + data->msr_mask_off |= UART_MSR_TERI; + } + + /* Always ask for fixed clock rate from a property. */ + device_property_read_u32(dev, "clock-frequency", &p->uartclk); + + /* If there is separate baudclk, get the rate from it. */ + data->clk = devm_clk_get(dev, "baudclk"); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) != -EPROBE_DEFER) + data->clk = devm_clk_get(dev, NULL); + if (IS_ERR(data->clk) && PTR_ERR(data->clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (!IS_ERR_OR_NULL(data->clk)) { + err = clk_prepare_enable(data->clk); + if (err) + dev_warn(dev, "could not enable optional baudclk: %d\n", + err); + else + p->uartclk = clk_get_rate(data->clk); + } + + /* If no clock rate is defined, fail. */ + if (!p->uartclk) { + dev_err(dev, "clock rate not defined\n"); + err = -EINVAL; + goto err_clk; + } + + data->pclk = devm_clk_get(dev, "apb_pclk"); + if (IS_ERR(data->pclk) && PTR_ERR(data->pclk) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_clk; + } + if (!IS_ERR(data->pclk)) { + err = clk_prepare_enable(data->pclk); + if (err) { + dev_err(dev, "could not enable apb_pclk\n"); + goto err_clk; + } + } + + data->rst = devm_reset_control_get_optional_exclusive(dev, NULL); + if (IS_ERR(data->rst)) { + err = PTR_ERR(data->rst); + goto err_pclk; + } + reset_control_deassert(data->rst); + + sunway8250_quirks(p, data); + + /* If the Busy Functionality is not implemented, don't handle it */ + if (data->uart_16550_compatible) + p->handle_irq = NULL; + + if (!data->skip_autocfg) + sunway8250_setup_port(p); + + /* If we have a valid fifosize, try hooking up DMA */ + if (p->fifosize) { + data->dma.rxconf.src_maxburst = p->fifosize / 4; + data->dma.txconf.dst_maxburst = p->fifosize / 4; + uart.dma = &data->dma; + } + + data->line = serial8250_register_8250_port(&uart); + if (data->line < 0) { + err = data->line; + goto err_reset; + } + + platform_set_drvdata(pdev, data); + + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + + return 0; + +err_reset: + reset_control_assert(data->rst); + +err_pclk: + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + +err_clk: + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + return err; +} + +static int sunway8250_remove(struct platform_device *pdev) +{ + struct sunway8250_data *data = platform_get_drvdata(pdev); + + pm_runtime_get_sync(&pdev->dev); + + serial8250_unregister_port(data->line); + + reset_control_assert(data->rst); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sunway8250_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_suspend_port(data->line); + + return 0; +} + +static int sunway8250_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + serial8250_resume_port(data->line); + + return 0; +} +#endif /* CONFIG_PM_SLEEP */ + +#ifdef CONFIG_PM +static int sunway8250_runtime_suspend(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); + + if (!IS_ERR(data->pclk)) + clk_disable_unprepare(data->pclk); + + return 0; +} + +static int sunway8250_runtime_resume(struct device *dev) +{ + struct sunway8250_data *data = dev_get_drvdata(dev); + + if (!IS_ERR(data->pclk)) + clk_prepare_enable(data->pclk); + + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); + + return 0; +} +#endif + +static const struct dev_pm_ops sunway8250_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(sunway8250_suspend, sunway8250_resume) + SET_RUNTIME_PM_OPS(sunway8250_runtime_suspend, sunway8250_runtime_resume, NULL) +}; + +static const struct of_device_id sunway8250_of_match[] = { + { .compatible = "sw6,sunway-apb-uart" }, + { .compatible = "cavium,octeon-3860-uart" }, + { .compatible = "marvell,armada-38x-uart" }, + { .compatible = "renesas,rzn1-uart" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunway8250_of_match); + +static const struct acpi_device_id sunway8250_acpi_match[] = { + { "INT33C4", 0 }, + { "INT33C5", 0 }, + { "INT3434", 0 }, + { "INT3435", 0 }, + { "80860F0A", 0 }, + { "8086228A", 0 }, + { "APMC0D08", 0}, + { "AMD0020", 0 }, + { "AMDI0020", 0 }, + { "BRCM2032", 0 }, + { "HISI0031", 0 }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sunway8250_acpi_match); + +static struct platform_driver sunway8250_platform_driver = { + .driver = { + .name = "sunway-apb-uart", + .pm = &sunway8250_pm_ops, + .of_match_table = sunway8250_of_match, + .acpi_match_table = ACPI_PTR(sunway8250_acpi_match), + }, + .probe = sunway8250_probe, + .remove = sunway8250_remove, +}; + +module_platform_driver(sunway8250_platform_driver); + +MODULE_AUTHOR("Jamie Iles"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Synopsys DesignWare 8250 serial port driver"); +MODULE_ALIAS("platform:sunway-apb-uart"); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index ee17cf5c44c6b046227093d2892716e506d3e49d..e8edd9388d762807b493e1c92566084662b82fc8 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -407,6 +407,13 @@ config SERIAL_8250_DW Selecting this option will enable handling of the extra features present in the Synopsys DesignWare APB UART. +config SERIAL_8250_SUNWAY + tristate "Support for SW6B Builtin Synopsys DesignWare 8250 quirks" + depends on SERIAL_8250 && SW64 + help + Selecting this option will enable handling of the extra features + present in the Synopsys DesignWare APB UART of SW6. + config SERIAL_8250_EM tristate "Support for Emma Mobile integrated serial port" depends on SERIAL_8250 && HAVE_CLK diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index 628b75be312ea531d3aed9674793f15f3659dfa6..8186ea891405db2c8c792ec59b74499d25eed546 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -32,6 +32,7 @@ obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o obj-$(CONFIG_SERIAL_8250_MEN_MCB) += 8250_men_mcb.o obj-$(CONFIG_SERIAL_8250_DFL) += 8250_dfl.o obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o +obj-$(CONFIG_SERIAL_8250_SUNWAY) += 8250_sunway.o obj-$(CONFIG_SERIAL_8250_EM) += 8250_em.o obj-$(CONFIG_SERIAL_8250_IOC3) += 8250_ioc3.o obj-$(CONFIG_SERIAL_8250_OMAP) += 8250_omap.o diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 62082d64ece00613a9d82eadd0552f6262cb9c10..65ec2ea39dcc7ef72a6a211071946e860cdbc559 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -12,6 +12,8 @@ * Base Functions */ +#include +#include #include #include #include @@ -218,7 +220,9 @@ static ssize_t name_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -228,7 +232,7 @@ static ssize_t name_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->name); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(name); @@ -239,7 +243,9 @@ static ssize_t version_show(struct device *dev, struct uio_device *idev = dev_get_drvdata(dev); int ret; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); @@ -249,7 +255,7 @@ static ssize_t version_show(struct device *dev, ret = sprintf(buf, "%s\n", idev->info->version); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } static DEVICE_ATTR_RO(version); @@ -489,16 +495,20 @@ static int uio_open(struct inode *inode, struct file *filep) listener->event_count = atomic_read(&idev->event); filep->private_data = listener; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + ret = -EINVAL; + goto err_infoopen; + } + if (!idev->info) { - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); ret = -EINVAL; goto err_infoopen; } if (idev->info->open) ret = idev->info->open(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) goto err_infoopen; @@ -531,10 +541,12 @@ static int uio_release(struct inode *inode, struct file *filep) struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); module_put(idev->owner); kfree(listener); @@ -548,10 +560,12 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait) struct uio_device *idev = listener->dev; __poll_t ret = 0; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info || !idev->info->irq) ret = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); if (ret) return ret; @@ -577,13 +591,17 @@ static ssize_t uio_read(struct file *filep, char __user *buf, add_wait_queue(&idev->wait, &wait); do { - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) { + retval = -EINVAL; + break; + } + if (!idev->info || !idev->info->irq) { retval = -EIO; - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); break; } - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); set_current_state(TASK_INTERRUPTIBLE); @@ -631,7 +649,9 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, if (copy_from_user(&irq_on, buf, count)) return -EFAULT; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { retval = -EINVAL; goto out; @@ -650,7 +670,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf, retval = idev->info->irqcontrol(idev->info, irq_on); out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return retval ? retval : sizeof(s32); } @@ -675,7 +695,9 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vm_fault_t ret = 0; int mi; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return VM_FAULT_SIGBUS; + if (!idev->info) { ret = VM_FAULT_SIGBUS; goto out; @@ -702,8 +724,7 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf) vmf->page = page; out: - mutex_unlock(&idev->info_lock); - + percpu_ref_put(&idev->info_ref); return ret; } @@ -772,7 +793,9 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) vma->vm_private_data = idev; - mutex_lock(&idev->info_lock); + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + if (!idev->info) { ret = -EINVAL; goto out; @@ -811,10 +834,30 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma) } out: - mutex_unlock(&idev->info_lock); + percpu_ref_put(&idev->info_ref); return ret; } +static long uio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct uio_listener *listener = filp->private_data; + struct uio_device *idev = listener->dev; + long retval = 0; + + if (!percpu_ref_tryget_live(&idev->info_ref)) + return -EINVAL; + + if (!idev->info || !idev->info->ioctl) { + retval = -EINVAL; + goto out; + } + + retval = idev->info->ioctl(idev->info, cmd, arg); +out: + percpu_ref_put(&idev->info_ref); + return retval; +} + static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, @@ -825,6 +868,8 @@ static const struct file_operations uio_fops = { .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, + .unlocked_ioctl = uio_ioctl, + .compat_ioctl = uio_ioctl, }; static int uio_major_init(void) @@ -907,6 +952,14 @@ static void uio_device_release(struct device *dev) kfree(idev); } +static void uio_info_free(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->free_done); +} + + /** * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device @@ -937,10 +990,18 @@ int __uio_register_device(struct module *owner, idev->owner = owner; idev->info = info; - mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); + ret = percpu_ref_init(&idev->info_ref, uio_info_free, 0, GFP_KERNEL); + if (ret) { + pr_err("percpu_ref init failed!\n"); + kfree(idev); + return ret; + } + init_completion(&idev->confirm_done); + init_completion(&idev->free_done); + ret = uio_get_minor(idev); if (ret) { kfree(idev); @@ -1036,6 +1097,13 @@ int __devm_uio_register_device(struct module *owner, } EXPORT_SYMBOL_GPL(__devm_uio_register_device); +static void uio_confirm_info(struct percpu_ref *ref) +{ + struct uio_device *idev = container_of(ref, struct uio_device, info_ref); + + complete(&idev->confirm_done); +} + /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities @@ -1052,14 +1120,16 @@ void uio_unregister_device(struct uio_info *info) idev = info->uio_dev; minor = idev->minor; - mutex_lock(&idev->info_lock); + percpu_ref_kill_and_confirm(&idev->info_ref, uio_confirm_info); + wait_for_completion(&idev->confirm_done); + wait_for_completion(&idev->free_done); + /* now, we can set info to NULL */ uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); idev->info = NULL; - mutex_unlock(&idev->info_lock); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); diff --git a/drivers/usb/core/Makefile b/drivers/usb/core/Makefile index 7d338e9c0657e9aa594c9e3b26f8828645e2067b..8ee58be1fb37ebae836645e3ac4b5c9a32dca086 100644 --- a/drivers/usb/core/Makefile +++ b/drivers/usb/core/Makefile @@ -9,7 +9,7 @@ usbcore-y += devio.o notify.o generic.o quirks.o devices.o usbcore-y += phy.o port.o usbcore-$(CONFIG_OF) += of.o -usbcore-$(CONFIG_USB_PCI) += hcd-pci.o +usbcore-$(CONFIG_USB_PCI) += hcd-pci.o usbcore-$(CONFIG_ACPI) += usb-acpi.o ifdef CONFIG_USB_ONBOARD_HUB diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c index 990280688b254d913d843bde17dae0a8be203778..df8f91e6a2c7f1aa14fbde177b8886396978da7f 100644 --- a/drivers/usb/core/hcd-pci.c +++ b/drivers/usb/core/hcd-pci.c @@ -48,6 +48,9 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); +#if IS_ENABLED(CONFIG_X86) + struct pci_driver *drv; +#endif /* * Iterate through other PCI functions in the same slot. @@ -60,6 +63,18 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, PCI_SLOT(companion->devfn) != slot) continue; +#if IS_ENABLED(CONFIG_X86) + if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR || + boot_cpu_data.x86_vendor == X86_VENDOR_ZHAOXIN) { + drv = companion->driver; + if (drv && + strncmp(drv->name, "uhci_hcd", sizeof("uhci_hcd") - 1) && + strncmp(drv->name, "ohci-pci", sizeof("ohci-pci") - 1) && + strncmp(drv->name, "ehci-pci", sizeof("ehci-pci") - 1)) + continue; + } +#endif + /* * Companion device should be either UHCI,OHCI or EHCI host * controller, otherwise skip. diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 2665832f9addff97c4ec520cb7f7bb66de82dac0..498497cace207c1d96841474e2b7d4fd28880aef 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -1283,3 +1283,130 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) } DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_SERIAL_USB, 8, quirk_usb_early_handoff); + +#ifdef CONFIG_SW64 +#include +#define XHCI_STS_FATAL (1 << 2) +#define XHCI_STS_EINT (1 << 3) +#define XHCI_STS_PORT (1 << 4) +#define XHCI_STS_SRE (1 << 10) +#define STS_RW1C_BITS (XHCI_STS_FATAL | XHCI_STS_EINT | XHCI_STS_PORT | XHCI_STS_SRE) + +static void +fixup_usb_xhci_reset(struct pci_dev *dev) +{ + void __iomem *op_reg_base; + int timeout; + u32 xhci_command; + u32 tmp, val; + void __iomem *base; + struct pci_controller *hose = pci_bus_to_pci_controller(dev->bus); + unsigned long offset; + int ext_cap_offset; + int retries = 3; + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); + + pci_read_config_dword(dev, PCI_BASE_ADDRESS_0, &tmp); + if (tmp & PCI_BASE_ADDRESS_MEM_TYPE_MASK) { + pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &val); + offset = (unsigned long)(val) << 32 | (tmp & (~0xf)); + } else + offset = (unsigned long)(tmp & (~0xf)); + + if (offset == 0) + return; + + base = (void *)__va(SW64_PCI_IO_BASE(hose->node, hose->index) | offset); + + ext_cap_offset = xhci_find_next_ext_cap(base, 0, XHCI_EXT_CAPS_LEGACY); + if (!ext_cap_offset) + goto hc_init; + + val = readl(base + ext_cap_offset); + + if ((dev->vendor == PCI_VENDOR_ID_TI && dev->device == 0x8241) || + (dev->vendor == PCI_VENDOR_ID_RENESAS + && dev->device == 0x0014)) { + val = (val | XHCI_HC_OS_OWNED) & ~XHCI_HC_BIOS_OWNED; + writel(val, base + ext_cap_offset); + } + + if (val & XHCI_HC_BIOS_OWNED) { + writel(val | XHCI_HC_OS_OWNED, base + ext_cap_offset); + + timeout = handshake(base + ext_cap_offset, XHCI_HC_BIOS_OWNED, + 0, 1000000, 10); + if (timeout) { + pr_err("xHCI BIOS handoff failed (BIOS bug ?) %08x\n", val); + writel(val & ~XHCI_HC_BIOS_OWNED, base + ext_cap_offset); + } + } + + val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + val &= XHCI_LEGACY_DISABLE_SMI; + val |= XHCI_LEGACY_SMI_EVENTS; + writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); + +hc_init: + if (dev->vendor == PCI_VENDOR_ID_INTEL) + usb_enable_intel_xhci_ports(dev); + + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); + + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_CNR, 0, + 5000000, 10); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW not ready after 5 sec (HC bug?) status = 0x%x\n", val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + +retry: + val = readl(op_reg_base + XHCI_STS_OFFSET); + val |= STS_RW1C_BITS; + writel(val, op_reg_base + XHCI_STS_OFFSET); + val = readl(op_reg_base + XHCI_STS_OFFSET); + + if ((val & STS_RW1C_BITS) && retries--) { + pr_err("clear USB Status Register (status = %#x) failed, retry\n", val); + goto retry; + } + + val = readl(op_reg_base + XHCI_CMD_OFFSET); + val &= ~(XHCI_CMD_RUN | XHCI_IRQS); + writel(val, op_reg_base + XHCI_CMD_OFFSET); + timeout = handshake(op_reg_base + XHCI_STS_OFFSET, XHCI_STS_HALT, 1, + XHCI_MAX_HALT_USEC, 125); + if (timeout) { + val = readl(op_reg_base + XHCI_STS_OFFSET); + pr_err("xHCI HW did not halt within %d usec status = 0x%x\n", + XHCI_MAX_HALT_USEC, val); + } + + xhci_command = readl(op_reg_base + XHCI_CMD_OFFSET); + xhci_command |= 0x2; + writel(xhci_command, op_reg_base + XHCI_CMD_OFFSET); + + timeout = handshake(op_reg_base + XHCI_CMD_OFFSET, + 0x2, 0, 10 * 1000 * 1000, 125); + if (timeout) + pr_err("xHCI BIOS handoff time out\n"); + + pci_read_config_dword(dev, PCI_COMMAND, &tmp); + tmp &= ~(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + pci_write_config_dword(dev, PCI_COMMAND, tmp); +} +DECLARE_PCI_FIXUP_CLASS_EARLY(PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_SERIAL_USB_XHCI, 0, fixup_usb_xhci_reset); +#endif diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index d6fc08e5db8fbd410c7b547782f44fa1eff23271..0a0a4702ef26d7f8adcb7848e575918f5e10b2d6 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -36,6 +36,7 @@ #define PCI_VENDOR_ID_ETRON 0x1b6f #define PCI_DEVICE_ID_EJ168 0x7023 +#define PCI_DEVICE_ID_EJ188 0x7052 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -461,6 +462,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_TRUST_TX_LENGTH; xhci->quirks |= XHCI_BROKEN_STREAMS; } + + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_EJ188) + xhci->quirks |= XHCI_NO_SOFT_RETRY; + if (pdev->vendor == PCI_VENDOR_ID_RENESAS && pdev->device == 0x0014) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig index 6bda6dbb48784b7047c467388575f9e35c8e2c0b..d80b6ffefd9d03ab173634778f911cf30de1057b 100644 --- a/drivers/vfio/Kconfig +++ b/drivers/vfio/Kconfig @@ -39,7 +39,7 @@ config VFIO_GROUP config VFIO_CONTAINER bool "Support for the VFIO container /dev/vfio/vfio" - select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64) + select VFIO_IOMMU_TYPE1 if MMU && (X86 || S390 || ARM || ARM64 || SW64) depends on VFIO_GROUP default y help diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig index c29754b65c0ec923387cfb8c1b7e0f47557d33cd..35b3ca2fb50a6861c3acdcea8ab7f666f89a924d 100644 --- a/drivers/video/fbdev/Kconfig +++ b/drivers/video/fbdev/Kconfig @@ -1950,6 +1950,19 @@ config FB_SM712 called sm712fb. If you want to compile it as a module, say M here and read . +config FB_LS2K500 + tristate "Loongson LS2K500 frame buffer support" + depends on FB && PCI + select FB_CFB_FILLRECT + select FB_CFB_COPYAREA + select FB_CFB_IMAGEBLIT + help + Frame buffer driver for the Loongson LS7A Platform-Bridge. + + This driver is also available as a module. + If you want to compile it as a module, say M here and read + . + source "drivers/video/fbdev/omap/Kconfig" source "drivers/video/fbdev/omap2/Kconfig" source "drivers/video/fbdev/mmp/Kconfig" diff --git a/drivers/video/fbdev/Makefile b/drivers/video/fbdev/Makefile index 70569f7027ed7b2110c51b1d01f21858a51ee3ce..d3fbb185daa34599e14d397c7df4664774322e9c 100644 --- a/drivers/video/fbdev/Makefile +++ b/drivers/video/fbdev/Makefile @@ -128,3 +128,4 @@ obj-$(CONFIG_FB_SIMPLE) += simplefb.o # the test framebuffer is last obj-$(CONFIG_FB_VIRTUAL) += vfb.o +obj-$(CONFIG_FB_LS2K500) += ls2k500sfb.o diff --git a/drivers/video/fbdev/ls2k500sfb.c b/drivers/video/fbdev/ls2k500sfb.c new file mode 100644 index 0000000000000000000000000000000000000000..00a83ea7c1e3c3aee2a14140e0628879cfd35179 --- /dev/null +++ b/drivers/video/fbdev/ls2k500sfb.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * + * linux/drivers/video/ls2k500sfb.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive for + * more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char mode_option[32] = "1280x1024-32@2M"; +module_param_string(mode, mode_option, sizeof(mode_option), 0444); +static int useshell; +module_param(useshell, int, 0664); +static int totty = 18; +module_param(totty, int, 0664); +static int resetdelay = 60; +module_param(resetdelay, int, 0664); +static int resetbootwait = 10; +module_param(resetbootwait, int, 0664); +static int GPIO = 14; +module_param(GPIO, int, 0664); +struct ls2k500sfb_struct { + struct pci_dev *dev; + struct platform_device *pd; + struct workqueue_struct *wq; + struct work_struct work; + struct delayed_work redraw_work; + int running; + unsigned long reset_time; + char *penv; + char saved_env[16]; +}; + +static int saved_console; +static unsigned long mscycles; +static atomic_t waiting_for_pciebreak_ipi; + +static int switch_console(int console) +{ + struct file *filp; + + filp = filp_open("/dev/tty1", O_RDWR, 0); + if (IS_ERR(filp)) + return -ENODEV; + + vfs_ioctl(filp, VT_ACTIVATE, console + 1); + filp_close(filp, NULL); + return 0; +} +static void ls2k500sfb_pciebreak_func(void *unused) +{ + atomic_dec(&waiting_for_pciebreak_ipi); + + while (atomic_read(&waiting_for_pciebreak_ipi)) + cpu_relax(); +} + +static void pciebreak_smp_send_stop(int ms) +{ + /* Wait at most 100 msecond for the other cpus to stop */ + unsigned long max_cycles = mscycles * ms; + unsigned long start_time = get_cycles(); + + atomic_set(&waiting_for_pciebreak_ipi, num_online_cpus()); + smp_call_function(ls2k500sfb_pciebreak_func, NULL, false); + while ((atomic_read(&waiting_for_pciebreak_ipi) > 1) + && get_cycles() - start_time < max_cycles) { + cpu_relax(); + } + if (atomic_read(&waiting_for_pciebreak_ipi) > 1) + pr_emerg("Non-pciebreaking CPUs did not react to IPI\n"); +} +static void ls2k500sfb_redraw_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = + container_of(work, struct ls2k500sfb_struct, redraw_work.work); + /*restore resolution info */ + if (memcmp(priv->penv, priv->saved_env, sizeof(priv->saved_env))) + memcpy(priv->penv, priv->saved_env, sizeof(priv->saved_env)); + switch_console(saved_console); +} + +static unsigned long event_jiffies; +static void ls2k500sfb_events_fn(struct work_struct *work) +{ + struct ls2k500sfb_struct *priv = container_of(work, struct ls2k500sfb_struct, work); + struct pci_dev *pdev = priv->dev; + struct pci_dev *ppdev = pdev->bus->self; + uint32_t i, d, timeout, retry = 0; + static const uint32_t index[] = { + 0x10, 0x14, 0x18, 0x1c, 0x20, 0x24, 0x30, 0x3c, 0x54, 0x58, 0x78, 0x7c, 0x80, 4 + }; + + static uint32_t data[sizeof(index) / 4]; + static const uint32_t cindex[] = { 0x10, 0x3c, 4 }; + + static uint32_t cdata[sizeof(cindex) / 4]; + static uint32_t d80c, d71c, ctrl; + static void *p; + + if (!priv->running) { + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_read_config_dword(ppdev, index[i], &data[i]); + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_read_config_dword(pdev, cindex[i], &cdata[i]); + if (ppdev->vendor == 0x14) { + pci_read_config_dword(ppdev, 0x80c, &d80c); + d80c = (d80c & ~(3 << 17)) | (1 << 17); + + pci_read_config_dword(ppdev, 0x71c, &d71c); + d71c |= 1 << 26; + + p = pci_iomap(ppdev, 0, 0x100); + } + ctrl = readl(p); + return; + } + local_bh_disable(); + pciebreak_smp_send_stop(100); + wmb(); /* flush all write before we disable pcie window */ + pci_write_config_dword(ppdev, 0x18, 0); + pci_write_config_dword(ppdev, 0x1c, 0); + pci_write_config_dword(ppdev, 0x20, 0); + event_jiffies = jiffies; + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after change pcie window */ + local_bh_enable(); + if (ppdev->vendor == 0x14) { + timeout = 10000; + while (timeout) { + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) + break; + mdelay(1); + timeout--; + }; + if (!timeout) + pr_info("bar not clear 0\n"); + + pci_read_config_dword(ppdev, 0x0, &d); + pr_info("pcie port deviceid=0x%x recover begin\n", d); +retrain: + while (1) { + pci_write_config_dword(ppdev, index[0], data[0]); + pci_read_config_dword(ppdev, index[0], &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + while (1) { + for (i = 0; i < ARRAY_SIZE(index); i++) { + if (index[i] != 0x18 && index[i] != 0x1c && index[i] != 0x20) + pci_write_config_dword(ppdev, index[i], data[i]); + } + pci_write_config_dword(ppdev, 0x80c, d80c); + pci_write_config_dword(ppdev, 0x71c, d71c); + + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (d) + break; + mdelay(1); + } + + timeout = 10000; + + writel(ctrl | 0x8, p); + while (1) { + d = readl(p + 0xc); + if ((d & 0x11) == 0x11) { + break; + } else if (!timeout) { + pr_info("pcie train failed status=0x%x\n", d); + goto out; + } + mdelay(1); + timeout--; + } + + + pr_info("pcie recovered done\n"); + + if (!retry) { + /*wait u-boot ddr config */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + pci_read_config_dword(ppdev, 0x10, &d); + d &= ~0xf; + if (!d) { + retry = 1; + goto retrain; + } + } + } else { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ*resetbootwait); + set_current_state(TASK_RUNNING); + } + local_bh_disable(); + pciebreak_smp_send_stop(10000); + wmb(); /* flush all write before we update pcie window */ + for (i = 0; i < ARRAY_SIZE(index); i++) + pci_write_config_dword(ppdev, index[i], data[i]); + + for (i = 0; i < ARRAY_SIZE(cindex); i++) + pci_write_config_dword(pdev, cindex[i], cdata[i]); + atomic_set(&waiting_for_pciebreak_ipi, 0); + wmb(); /* flush all write after we update pcie window */ + local_bh_enable(); + + + pr_info("redraw console\n"); + + saved_console = fg_console; + switch_console(fg_console > 0?fg_console - 1 : fg_console + 1); + queue_delayed_work(priv->wq, &priv->redraw_work, HZ); +out: + priv->running = 0; +} + +irqreturn_t ls2k500sfb_interrupt(int irq, void *arg) +{ + struct ls2k500sfb_struct *priv = arg; + struct pci_dev *pdev = priv->dev; + + if (irq == pdev->irq) + pr_info("ls2k500sfb pcie interrupt\n"); + else + pr_info("ls2k500sfb gpio interrupt\n"); + if (system_state != SYSTEM_RUNNING) + return IRQ_HANDLED; + + if (!priv->running) { + if (!resetdelay || time_after(jiffies, priv->reset_time + resetdelay * HZ)) { + priv->running = 1; + queue_work(priv->wq, &priv->work); + } + priv->reset_time = jiffies; + } + return IRQ_HANDLED; +} + +#ifdef CONFIG_LOONGARCH +#define GPIO_OEN ((void *)IO_BASE+0x1fe00000+0x500) +#define GPIO_FUNCEN ((void *)IO_BASE+0x1fe00000+0x504) +#define GPIO_OUT ((void *)IO_BASE+0x1fe00000+0x508) +#define GPIO_IN ((void *)IO_BASE+0x1fe00000+0x50c) +#define GPIO_INTPOL ((void *)IO_BASE+0x1fe00000+0x510) +#define GPIO_INTEN ((void *)IO_BASE+0x1fe00000+0x514) + +static int gpiochip_match_name(struct gpio_chip *chip, void *data) +{ + const char *name = data; + + return !strcmp(chip->label, name); +} +static int get_gpio_irq_from_acpi_table(int gpio) +{ + struct gpio_chip *chip; + struct gpio_desc *desc; + + chip = gpiochip_find("LOON0007:00", gpiochip_match_name); + if (!chip) + return -ENOENT; + desc = gpiochip_request_own_desc(chip, gpio, "reboot", GPIO_LOOKUP_FLAGS_DEFAULT, GPIOD_IN); + if (!desc) + return -ENOENT; + return gpiod_to_irq(desc); +} + +static int get_gpio_irq_from_acpi_gsi(int gpio) +{ + int gsi = 16 + (gpio & 7); + + return acpi_register_gsi(NULL, gsi, ACPI_EDGE_SENSITIVE, ACPI_ACTIVE_LOW); +} + +static int register_gpio_reboot_handler(struct ls2k500sfb_struct *priv) +{ + int irq = get_gpio_irq_from_acpi_table(GPIO); + + if (irq < 0) { + irq = get_gpio_irq_from_acpi_gsi(GPIO); + pr_notice("gsi gpio irq %d\n", irq); + } else + pr_notice("acpi gpio irq %d\n", irq); + writel(readl(GPIO_OEN) | (0x1 << GPIO), GPIO_OEN); + writel(readl(GPIO_FUNCEN) & ~(0x1 << GPIO), GPIO_FUNCEN); + writel(readl(GPIO_INTPOL) & ~(0x1 << GPIO), GPIO_INTPOL); + writel(readl(GPIO_INTEN) | (0x1 << GPIO), GPIO_INTEN); + if (request_irq(irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_FALLING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", irq); + return 0; +} +#endif + +static const struct fb_fix_screeninfo simplefb_fix = { + .id = "simple", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, +}; + +static const struct fb_var_screeninfo simplefb_var = { + .height = -1, + .width = -1, + .activate = FB_ACTIVATE_NOW, + .vmode = FB_VMODE_NONINTERLACED, +}; + +#define PSEUDO_PALETTE_SIZE 16 +struct simplefb_par { + char *penv; + char *preg; + u32 palette[PSEUDO_PALETTE_SIZE]; +}; + +static u_long get_line_length(int xres_virtual, int bpp) +{ + u_long length; + + length = xres_virtual * bpp; + length = (length + 31) & ~31; + length >>= 3; + return length; +} + +static int simplefb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + u_long line_length; + + /* + * FB_VMODE_CONUPDATE and FB_VMODE_SMOOTH_XPAN are equal! + * as FB_VMODE_SMOOTH_XPAN is only used internally + */ + + if (var->vmode & FB_VMODE_CONUPDATE) { + var->vmode |= FB_VMODE_YWRAP; + var->xoffset = info->var.xoffset; + var->yoffset = info->var.yoffset; + } + + /* + * Some very basic checks + */ + if (!var->xres) + var->xres = 1; + if (!var->yres) + var->yres = 1; + if (var->xres > var->xres_virtual) + var->xres_virtual = var->xres; + if (var->yres > var->yres_virtual) + var->yres_virtual = var->yres; + if (var->bits_per_pixel <= 16) + var->bits_per_pixel = 16; + else if (var->bits_per_pixel <= 32) + var->bits_per_pixel = 32; + else + return -EINVAL; + + if (var->xres_virtual < var->xoffset + var->xres) + var->xres_virtual = var->xoffset + var->xres; + if (var->yres_virtual < var->yoffset + var->yres) + var->yres_virtual = var->yoffset + var->yres; + + /* + * Memory limit + */ + line_length = + get_line_length(var->xres_virtual, var->bits_per_pixel); + if (line_length * var->yres_virtual > info->fix.smem_len) + return -ENOMEM; + + /* + * Now that we checked it we alter var. The reason being is that the video + * mode passed in might not work but slight changes to it might make it + * work. This way we let the user know what is acceptable. + */ + switch (var->bits_per_pixel) { + case 16: /* BGR 565 */ + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + break; + case 32: /* BGRA 8888 */ + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + break; + } + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + + return 0; +} + +static int simplefb_set_par(struct fb_info *info) +{ + struct simplefb_par *par = info->par; + int reg_val; + + info->fix.line_length = get_line_length(info->var.xres_virtual, + info->var.bits_per_pixel); + sprintf(par->penv, "video=%dx%d-%d@2M", + info->var.xres_virtual, + info->var.yres_virtual, + info->var.bits_per_pixel); + + reg_val = readl(par->preg); + writel(reg_val + 1, par->preg); + + return 0; +} + +static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, + u_int transp, struct fb_info *info) +{ + u32 *pal = info->pseudo_palette; + u32 cr = red >> (16 - info->var.red.length); + u32 cg = green >> (16 - info->var.green.length); + u32 cb = blue >> (16 - info->var.blue.length); + u32 value; + + if (regno >= PSEUDO_PALETTE_SIZE) + return -EINVAL; + + value = (cr << info->var.red.offset) | + (cg << info->var.green.offset) | + (cb << info->var.blue.offset); + if (info->var.transp.length > 0) { + u32 mask = (1 << info->var.transp.length) - 1; + + mask <<= info->var.transp.offset; + value |= mask; + } + pal[regno] = value; + + return 0; +} + + +static void simplefb_destroy(struct fb_info *info) +{ + if (info->screen_base) + iounmap(info->screen_base); +} + +static const struct fb_ops simplefb_ops = { + .owner = THIS_MODULE, + .fb_destroy = simplefb_destroy, + .fb_setcolreg = simplefb_setcolreg, + .fb_fillrect = cfb_fillrect, + .fb_copyarea = cfb_copyarea, + .fb_imageblit = cfb_imageblit, + .fb_check_var = simplefb_check_var, + .fb_set_par = simplefb_set_par, +}; + +static struct simplefb_format simplefb_formats[] = SIMPLEFB_FORMATS; + +struct simplefb_params { + u32 width; + u32 height; + u32 stride; + struct simplefb_format *format; +}; + +static int simplefb_parse_pd(struct platform_device *pdev, + struct simplefb_params *params) +{ + struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev); + int i; + + params->width = pd->width; + params->height = pd->height; + params->stride = pd->stride; + + params->format = NULL; + for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) { + if (strcmp(pd->format, simplefb_formats[i].name)) + continue; + + params->format = &simplefb_formats[i]; + break; + } + + if (!params->format) { + dev_err(&pdev->dev, "Invalid format value\n"); + return -EINVAL; + } + + return 0; +} + +static int simplefb_probe(struct platform_device *pdev) +{ + int ret; + struct simplefb_params params; + struct fb_info *info; + struct simplefb_par *par; + struct resource *mem, *envmem, *regmem; + + ret = simplefb_parse_pd(pdev, ¶ms); + + if (ret) + return ret; + + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + envmem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + regmem = platform_get_resource(pdev, IORESOURCE_MEM, 2); + if (!mem || !envmem || !regmem) { + dev_err(&pdev->dev, "No memory resource\n"); + return -EINVAL; + } + + info = framebuffer_alloc(sizeof(struct simplefb_par), &pdev->dev); + if (!info) + return -ENOMEM; + platform_set_drvdata(pdev, info); + + par = info->par; + par->penv = ioremap(envmem->start, resource_size(envmem)); + par->preg = ioremap(regmem->start, resource_size(regmem)); + + info->fix = simplefb_fix; + info->fix.smem_start = mem->start; + info->fix.smem_len = resource_size(mem); + info->fix.line_length = params.stride; + + info->var = simplefb_var; + info->var.xres = params.width; + info->var.yres = params.height; + info->var.xres_virtual = params.width; + info->var.yres_virtual = params.height; + info->var.bits_per_pixel = params.format->bits_per_pixel; + info->var.red = params.format->red; + info->var.green = params.format->green; + info->var.blue = params.format->blue; + info->var.transp = params.format->transp; + + ret = devm_aperture_acquire_for_platform_device(pdev, + info->fix.smem_start, + info->fix.smem_len); + if (ret) { + dev_info(&pdev->dev, "cannot acquire aperture\n"); + goto error_fb_release; + } + + info->fbops = &simplefb_ops; + info->flags = 0; + info->screen_base = ioremap_wc(info->fix.smem_start, + info->fix.smem_len); + if (!info->screen_base) { + ret = -ENOMEM; + goto error_fb_release; + } + info->pseudo_palette = par->palette; + + dev_info(&pdev->dev, "framebuffer at 0x%lx, 0x%x bytes, mapped to 0x%p\n", + info->fix.smem_start, info->fix.smem_len, + info->screen_base); + dev_info(&pdev->dev, "format=%s, mode=%dx%dx%d, linelength=%d\n", + params.format->name, + info->var.xres, info->var.yres, + info->var.bits_per_pixel, info->fix.line_length); + + ret = register_framebuffer(info); + if (ret < 0) { + dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret); + goto error_fb_release; + } else + dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node); + + local_irq_disable(); + mscycles = get_cycles(); + mdelay(1); + mscycles = get_cycles() - mscycles; + local_irq_enable(); + + return ret; +error_fb_release: + framebuffer_release(info); + return ret; +} + +static int simplefb_remove(struct platform_device *pdev) +{ + struct fb_info *info = platform_get_drvdata(pdev); + + unregister_framebuffer(info); + framebuffer_release(info); + + return 0; +} + +static struct platform_driver simplefb_driver = { + .driver = { + .name = "virt-framebuffer", + }, + .probe = simplefb_probe, + .remove = simplefb_remove, +}; + +static void *kcs_data[2] = {&event_jiffies, &mscycles}; +static int ls2k500sfb_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + struct simplefb_platform_data mode; + struct resource res[3]; + struct platform_device *pd; + struct ls2k500sfb_struct *priv; + long phybase, videooffset, videomemorysize; + char *pmode = mode_option; + int depth; + char *penv; + int ret, i; + + if (!dev->bus->number || pci_enable_device(dev)) + return -ENODEV; + priv = kzalloc(sizeof(struct ls2k500sfb_struct), GFP_KERNEL); + priv->dev = dev; + + /* pcimem bar last 16M free, 2MB offset from free for framebuffer */ + phybase = pci_resource_start(dev, 0); + phybase += pci_resource_len(dev, 0) - 0x1000000; + penv = ioremap(phybase, 0x100000); + /*env at last 16M's beginning, first env is video */ + if (!strncmp(penv, "video=", 6)) + pmode = penv + 6; + + priv->penv = penv + 6; + memcpy(priv->saved_env, priv->penv, sizeof(priv->saved_env)); + + mode.width = simple_strtoul(pmode, &pmode, 0); + pmode++; + mode.height = simple_strtoul(pmode, &pmode, 0); + pmode++; + depth = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + pmode++; + videooffset = simple_strtoul(pmode, &pmode, 0); + if (pmode && pmode[0]) { + switch (pmode[0]) { + case 'M': + case 'm': + videooffset *= 0x100000; + break; + case 'K': + case 'k': + videooffset *= 1024; + break; + } + } + } else + videooffset = 0x200000; + mode.stride = mode.width * depth / 8; + mode.format = depth == 32 ? "a8r8g8b8" : "r5g6b5"; + + videomemorysize = 0x400000; + + memset(res, 0, sizeof(res)); + res[0].start = phybase + videooffset; + res[0].end = phybase + videooffset + videomemorysize - 1; + res[0].flags = IORESOURCE_MEM; + res[0].parent = &dev->resource[0]; + + res[1].start = phybase; + res[1].end = phybase + 64 - 1; + res[1].flags = IORESOURCE_MEM; + res[1].parent = &dev->resource[0]; + + res[2].start = phybase + 0x00f00014; + res[2].end = phybase + 0x00f0001c - 1; + res[2].flags = IORESOURCE_MEM; + res[2].parent = &dev->resource[0]; + + priv->pd = pd = platform_device_register_resndata(NULL, "virt-framebuffer", 0, + res, 3, &mode, sizeof(mode)); + + ret = platform_driver_register(&simplefb_driver); + if (ret) + return ret; + priv->wq = create_singlethread_workqueue("ls2k500sfb wq"); + INIT_WORK(&priv->work, ls2k500sfb_events_fn); + INIT_DELAYED_WORK(&priv->redraw_work, ls2k500sfb_redraw_fn); + + ls2k500sfb_events_fn(&priv->work); + if (request_irq(dev->irq, ls2k500sfb_interrupt, IRQF_SHARED | IRQF_TRIGGER_RISING, + "ls2k500sfb", priv)) + pr_err("request_irq(%d) failed\n", dev->irq); + #ifdef CONFIG_LOONGARCH + register_gpio_reboot_handler(priv); + #endif + pci_set_drvdata(dev, priv); + for (i = 0; i < 5; i++) { + res[0].start = phybase + 0x00f00000 + 0x1c*i; + res[0].end = phybase + 0x00f00000 + 0x1c*(i+1) - 1; + platform_device_register_resndata(NULL, "ipmi_ls2k500_si", i, res, 1, + kcs_data, sizeof(kcs_data)); + } + + return PTR_ERR_OR_ZERO(pd); +} + +static void ls2k500sfb_remove(struct pci_dev *dev) +{ + struct ls2k500sfb_struct *priv = pci_get_drvdata(dev); + + platform_device_del(priv->pd); +} + +static struct pci_device_id ls2k500sfb_devices[] = { + {PCI_DEVICE(0x14, 0x1a05)}, + {0, 0, 0, 0, 0, 0, 0} +}; +MODULE_DEVICE_TABLE(pci, ls2k500sfb_devices); + +static struct pci_driver ls2k500sfb_driver = { + .name = "ls2k500sfb", + .id_table = ls2k500sfb_devices, + .probe = ls2k500sfb_probe, + .remove = ls2k500sfb_remove, + .driver = { + .name = "ls2k500sfb", + }, +}; + +static int __init ls2k500sfb_init(void) +{ + return pci_register_driver(&ls2k500sfb_driver); +} + +module_init(ls2k500sfb_init); + +#ifdef MODULE +static void __exit ls2k500sfb_exit(void) +{ + pci_unregister_driver(&ls2k500sfb_driver); +} + +module_exit(ls2k500sfb_exit); +#endif + +MODULE_LICENSE("GPL"); diff --git a/drivers/virt/Kconfig b/drivers/virt/Kconfig index f79ab13a5c28b0cd7a71d41ba8d3177914946351..b1c4efa00182e0f40a8fa199620da19fb615918a 100644 --- a/drivers/virt/Kconfig +++ b/drivers/virt/Kconfig @@ -54,4 +54,6 @@ source "drivers/virt/coco/sev-guest/Kconfig" source "drivers/virt/coco/tdx-guest/Kconfig" +source "drivers/virt/coco/csv-guest/Kconfig" + endif diff --git a/drivers/virt/Makefile b/drivers/virt/Makefile index e9aa6fc96fab7242a9963d9a834722af24e4b6a2..62681a26075878b0bef4d2513bf9f36120a4cdd2 100644 --- a/drivers/virt/Makefile +++ b/drivers/virt/Makefile @@ -12,3 +12,4 @@ obj-$(CONFIG_ACRN_HSM) += acrn/ obj-$(CONFIG_EFI_SECRET) += coco/efi_secret/ obj-$(CONFIG_SEV_GUEST) += coco/sev-guest/ obj-$(CONFIG_INTEL_TDX_GUEST) += coco/tdx-guest/ +obj-$(CONFIG_CSV_GUEST) += coco/csv-guest/ diff --git a/drivers/virt/coco/csv-guest/Kconfig b/drivers/virt/coco/csv-guest/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..4cbde598e66508482bbe210b8eb7676f2564eac3 --- /dev/null +++ b/drivers/virt/coco/csv-guest/Kconfig @@ -0,0 +1,12 @@ +config CSV_GUEST + tristate "HYGON CSV Guest driver" + default m + depends on AMD_MEM_ENCRYPT + help + CSV firmware provides the guest a mechanism to communicate with + the PSP without risk from a malicious hypervisor who wishes to read, + alter, drop or replay the messages sent. The driver provides + userspace interface to communicate with the PSP to request the + attestation report and more. + + If you choose 'M' here, this module will be called csv-guest. diff --git a/drivers/virt/coco/csv-guest/Makefile b/drivers/virt/coco/csv-guest/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..a1c3a1499fc6f6e997d1629df4e1677b31f1a28d --- /dev/null +++ b/drivers/virt/coco/csv-guest/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_CSV_GUEST) += csv-guest.o diff --git a/drivers/virt/coco/csv-guest/csv-guest.c b/drivers/virt/coco/csv-guest/csv-guest.c new file mode 100644 index 0000000000000000000000000000000000000000..7bd9abe7d8b646bcf4a4fbe938c2f7a1b5f4018e --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Userspace interface for CSV guest driver + * + * Copyright (C) 2024 Hygon Info Technologies Ltd. + * + * Author: fangbaoshun + */ +#include +#include +#include +#include +#include +#include + +#include + +#include "csv-guest.h" + +static long csv_get_report(void __user *argp) +{ + u8 *csv_report; + long ret; + struct csv_report_req req; + + if (copy_from_user(&req, argp, sizeof(struct csv_report_req))) + return -EFAULT; + + if (req.len < CSV_REPORT_INPUT_DATA_LEN) + return -EINVAL; + + csv_report = kzalloc(req.len, GFP_KERNEL); + if (!csv_report) { + ret = -ENOMEM; + goto out; + } + + /* Save user input data */ + if (copy_from_user(csv_report, req.report_data, CSV_REPORT_INPUT_DATA_LEN)) { + ret = -EFAULT; + goto out; + } + + /* Generate CSV_REPORT using "KVM_HC_VM_ATTESTATION" VMMCALL */ + ret = kvm_hypercall2(KVM_HC_VM_ATTESTATION, __pa(csv_report), req.len); + if (ret) + goto out; + + if (copy_to_user(req.report_data, csv_report, req.len)) + ret = -EFAULT; + +out: + kfree(csv_report); + return ret; +} + +static long csv_guest_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case CSV_CMD_GET_REPORT: + return csv_get_report((void __user *)arg); + default: + return -ENOTTY; + } +} + +static void mem_test_init(void) +{ + char head_str[] = "test mem encrypt"; + u64 *va_addr = __va(0x0); + + if (va_addr) { + memset(va_addr, 0x66, PAGE_SIZE); + memcpy(va_addr, head_str, sizeof(head_str)); + clflush_cache_range(va_addr, PAGE_SIZE); + } else + pr_err("Initialize 1 page for csv memory test failed!\n"); +} + +static const struct file_operations csv_guest_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = csv_guest_ioctl, + .compat_ioctl = csv_guest_ioctl, +}; + +static struct miscdevice csv_guest_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "csv-guest", + .fops = &csv_guest_fops, + .mode = 0777, +}; + +static int __init csv_guest_init(void) +{ + // This module only working on CSV guest vm. + if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT)) + return -ENODEV; + + // Initialize 1 page for csv memory test + mem_test_init(); + + return misc_register(&csv_guest_dev); +} + +static void __exit csv_guest_exit(void) +{ + misc_deregister(&csv_guest_dev); +} + +MODULE_LICENSE("GPL"); +MODULE_VERSION("1.0.0"); +MODULE_DESCRIPTION("HYGON CSV Guest Driver"); +module_init(csv_guest_init); +module_exit(csv_guest_exit); diff --git a/drivers/virt/coco/csv-guest/csv-guest.h b/drivers/virt/coco/csv-guest/csv-guest.h new file mode 100644 index 0000000000000000000000000000000000000000..0342d5f16cb3fed9fa61b8fe529ff4e2b4b84399 --- /dev/null +++ b/drivers/virt/coco/csv-guest/csv-guest.h @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Userspace interface for CSV guest driver + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __VIRT_CSVGUEST_H__ +#define __VIRT_CSVGUEST_H__ + +#include +#include + +/* Length of the user input datas used in VMMCALL */ +#define CSV_REPORT_USER_DATA_LEN 64 +#define CSV_REPORT_MNONCE_LEN 16 +#define CSV_REPORT_HASH_LEN 32 +#define CSV_REPORT_INPUT_DATA_LEN (CSV_REPORT_USER_DATA_LEN + CSV_REPORT_MNONCE_LEN \ + + CSV_REPORT_HASH_LEN) + +/** + * struct csv_report_req - Request struct for CSV_CMD_GET_REPORT IOCTL. + * + * @report_data:User buffer with REPORT_DATA to be included into CSV_REPORT, and it's also + * user buffer to store CSV_REPORT output from VMMCALL[KVM_HC_VM_ATTESTATION]. + * @len: Length of the user buffer. + */ +struct csv_report_req { + u8 *report_data; + int len; +}; + +/* + * CSV_CMD_GET_REPORT - Get CSV_REPORT using VMMCALL[KVM_HC_VM_ATTESTATION] + * + * Return 0 on success, -EIO on VMMCALL execution failure, and + * standard errno on other general error cases. + */ +#define CSV_CMD_GET_REPORT _IOWR('D', 1, struct csv_report_req) + +#endif /* __VIRT_CSVGUEST_H__ */ diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 0a53a61231c2944092d3ecac74d5caf40aaca2d3..da89d498d14eeea7874286ee8618a06f222ad655 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -117,7 +117,7 @@ config VIRTIO_BALLOON config VIRTIO_MEM tristate "Virtio mem driver" - depends on X86_64 || ARM64 + depends on X86_64 || ARM64 || SW64 depends on VIRTIO depends on MEMORY_HOTPLUG depends on MEMORY_HOTREMOVE diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 51d8f3299c105594487d504115351fa716b7cc01..f13e6016e795adc5d7d65696a65903f5db070b74 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #ifdef DEBUG @@ -251,6 +252,21 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, return (vq->indirect && total_sg > 1 && vq->vq.num_free); } +static bool vring_force_dma_api; + +#ifdef MODULE +module_param(vring_force_dma_api, bool, 0640); +#else +static int __init vring_dma_api_setup(char *str) +{ + vring_force_dma_api = true; + printk(KERN_INFO "Force vring dma api enabled\n"); + + return 0; +} +__setup("vring_force_dma_api", vring_dma_api_setup); +#endif + /* * Modern virtio devices have feature bits to specify whether they need a * quirk and bypass the IOMMU. If not there, just use the DMA API. @@ -279,6 +295,13 @@ static bool virtqueue_use_indirect(const struct vring_virtqueue *vq, static bool vring_use_dma_api(const struct virtio_device *vdev) { + /* + * Prior to xdragon platform 20181230 release (e.g. 0930 release), we + * need this hack to get ENI hotplug to work. + */ + if (vring_force_dma_api) + return true; + if (!virtio_has_dma_quirk(vdev)) return true; diff --git a/fs/Kconfig b/fs/Kconfig index aa7e03cc1941cb3e6145d95886b99b83b677a69b..1e3ed753b9fe0c7754e81367688f56ad77123fff 100644 --- a/fs/Kconfig +++ b/fs/Kconfig @@ -325,6 +325,7 @@ source "fs/omfs/Kconfig" source "fs/hpfs/Kconfig" source "fs/qnx4/Kconfig" source "fs/qnx6/Kconfig" +source "fs/resctrl/Kconfig" source "fs/romfs/Kconfig" source "fs/pstore/Kconfig" source "fs/sysv/Kconfig" diff --git a/fs/Makefile b/fs/Makefile index f9541f40be4e08fbdee72f39e8c0c7ef856fb3f6..b62375770deed116ab26f3ce7f37198b54d09c01 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -129,3 +129,4 @@ obj-$(CONFIG_EFIVAR_FS) += efivarfs/ obj-$(CONFIG_EROFS_FS) += erofs/ obj-$(CONFIG_VBOXSF_FS) += vboxsf/ obj-$(CONFIG_ZONEFS_FS) += zonefs/ +obj-$(CONFIG_RESCTRL_FS) += resctrl/ diff --git a/fs/cachefiles/cache.c b/fs/cachefiles/cache.c index 7077f72e6f4747c2a1f1bd6c898221d4bdaab380..f449f7340aad0811ae2cea3134731e1a2111f5ff 100644 --- a/fs/cachefiles/cache.c +++ b/fs/cachefiles/cache.c @@ -168,6 +168,8 @@ int cachefiles_add_cache(struct cachefiles_cache *cache) dput(root); error_open_root: cachefiles_end_secure(cache, saved_cred); + put_cred(cache->cache_cred); + cache->cache_cred = NULL; error_getsec: fscache_relinquish_cache(cache_cookie); cache->cache = NULL; diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index aa4efcabb5e37b155407e724d22572c34fde1d3f..6465e257423091d5183a6bf4c7963a2e8e900766 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -77,6 +77,7 @@ static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = { { "tag", cachefiles_daemon_tag }, #ifdef CONFIG_CACHEFILES_ONDEMAND { "copen", cachefiles_ondemand_copen }, + { "restore", cachefiles_ondemand_restore }, #endif { "", NULL } }; @@ -355,14 +356,24 @@ static __poll_t cachefiles_daemon_poll(struct file *file, struct poll_table_struct *poll) { struct cachefiles_cache *cache = file->private_data; + XA_STATE(xas, &cache->reqs, 0); + struct cachefiles_req *req; __poll_t mask; poll_wait(file, &cache->daemon_pollwq, poll); mask = 0; if (cachefiles_in_ondemand_mode(cache)) { - if (!xa_empty(&cache->reqs)) - mask |= EPOLLIN; + if (!xa_empty(&cache->reqs)) { + rcu_read_lock(); + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { + if (!cachefiles_ondemand_is_reopening_read(req)) { + mask |= EPOLLIN; + break; + } + } + rcu_read_unlock(); + } } else { if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags)) mask |= EPOLLIN; @@ -805,6 +816,7 @@ static void cachefiles_daemon_unbind(struct cachefiles_cache *cache) cachefiles_put_directory(cache->graveyard); cachefiles_put_directory(cache->store); mntput(cache->mnt); + put_cred(cache->cache_cred); kfree(cache->rootdirname); kfree(cache->secctx); diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 40052bdb33655baa88191602dbb03d60d9d8dd1f..35ba2117a6f652c8903e32db08b10824bf537660 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -31,6 +31,11 @@ struct cachefiles_object *cachefiles_alloc_object(struct fscache_cookie *cookie) if (!object) return NULL; + if (cachefiles_ondemand_init_obj_info(object, volume)) { + kmem_cache_free(cachefiles_object_jar, object); + return NULL; + } + refcount_set(&object->ref, 1); spin_lock_init(&object->lock); @@ -88,7 +93,7 @@ void cachefiles_put_object(struct cachefiles_object *object, ASSERTCMP(object->file, ==, NULL); kfree(object->d_name); - + cachefiles_ondemand_deinit_obj_info(object); cache = object->volume->cache->cache; fscache_put_cookie(object->cookie, fscache_cookie_put_object); object->cookie = NULL; diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 2ad58c465208480e42106393ae01fd2f317389ee..4a87c9d714a9498b80599d15461c48f0ea1c3f68 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h @@ -44,6 +44,19 @@ struct cachefiles_volume { struct dentry *fanout[256]; /* Fanout subdirs */ }; +enum cachefiles_object_state { + CACHEFILES_ONDEMAND_OBJSTATE_CLOSE, /* Anonymous fd closed by daemon or initial state */ + CACHEFILES_ONDEMAND_OBJSTATE_OPEN, /* Anonymous fd associated with object is available */ + CACHEFILES_ONDEMAND_OBJSTATE_REOPENING, /* Object that was closed and is being reopened. */ +}; + +struct cachefiles_ondemand_info { + struct work_struct ondemand_work; + int ondemand_id; + enum cachefiles_object_state state; + struct cachefiles_object *object; +}; + /* * Backing file state. */ @@ -61,7 +74,7 @@ struct cachefiles_object { unsigned long flags; #define CACHEFILES_OBJECT_USING_TMPFILE 0 /* Have an unlinked tmpfile */ #ifdef CONFIG_CACHEFILES_ONDEMAND - int ondemand_id; + struct cachefiles_ondemand_info *ondemand; #endif }; @@ -290,12 +303,42 @@ extern ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, extern int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args); +extern int cachefiles_ondemand_restore(struct cachefiles_cache *cache, + char *args); + extern int cachefiles_ondemand_init_object(struct cachefiles_object *object); extern void cachefiles_ondemand_clean_object(struct cachefiles_object *object); extern int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len); +extern int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume); +extern void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj); + +#define CACHEFILES_OBJECT_STATE_FUNCS(_state, _STATE) \ +static inline bool \ +cachefiles_ondemand_object_is_##_state(const struct cachefiles_object *object) \ +{ \ + return object->ondemand->state == CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} \ + \ +static inline void \ +cachefiles_ondemand_set_object_##_state(struct cachefiles_object *object) \ +{ \ + object->ondemand->state = CACHEFILES_ONDEMAND_OBJSTATE_##_STATE; \ +} + +CACHEFILES_OBJECT_STATE_FUNCS(open, OPEN); +CACHEFILES_OBJECT_STATE_FUNCS(close, CLOSE); +CACHEFILES_OBJECT_STATE_FUNCS(reopening, REOPENING); + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return cachefiles_ondemand_object_is_reopening(req->object) && + req->msg.opcode == CACHEFILES_OP_READ; +} + #else static inline ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) @@ -317,6 +360,20 @@ static inline int cachefiles_ondemand_read(struct cachefiles_object *object, { return -EOPNOTSUPP; } + +static inline int cachefiles_ondemand_init_obj_info(struct cachefiles_object *obj, + struct cachefiles_volume *volume) +{ + return 0; +} +static inline void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *obj) +{ +} + +static inline bool cachefiles_ondemand_is_reopening_read(struct cachefiles_req *req) +{ + return false; +} #endif /* diff --git a/fs/cachefiles/ondemand.c b/fs/cachefiles/ondemand.c index 0254ed39f68ceb75dcb2ec840af88b136ac21fe3..c15f2547222ebed20e569be7a04f3f366d1020d0 100644 --- a/fs/cachefiles/ondemand.c +++ b/fs/cachefiles/ondemand.c @@ -9,21 +9,19 @@ static int cachefiles_ondemand_fd_release(struct inode *inode, { struct cachefiles_object *object = file->private_data; struct cachefiles_cache *cache = object->volume->cache; - int object_id = object->ondemand_id; + struct cachefiles_ondemand_info *info = object->ondemand; + int object_id = info->ondemand_id; struct cachefiles_req *req; XA_STATE(xas, &cache->reqs, 0); xa_lock(&cache->reqs); - object->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + info->ondemand_id = CACHEFILES_ONDEMAND_ID_CLOSED; + cachefiles_ondemand_set_object_close(object); - /* - * Flush all pending READ requests since their completion depends on - * anon_fd. - */ - xas_for_each(&xas, req, ULONG_MAX) { + /* Only flush CACHEFILES_REQ_NEW marked req to avoid race with daemon_read */ + xas_for_each_marked(&xas, req, ULONG_MAX, CACHEFILES_REQ_NEW) { if (req->msg.object_id == object_id && - req->msg.opcode == CACHEFILES_OP_READ) { - req->error = -EIO; + req->msg.opcode == CACHEFILES_OP_CLOSE) { complete(&req->done); xas_store(&xas, NULL); } @@ -176,11 +174,37 @@ int cachefiles_ondemand_copen(struct cachefiles_cache *cache, char *args) set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); trace_cachefiles_ondemand_copen(req->object, id, size); + cachefiles_ondemand_set_object_open(req->object); + wake_up_all(&cache->daemon_pollwq); + out: complete(&req->done); return ret; } +int cachefiles_ondemand_restore(struct cachefiles_cache *cache, char *args) +{ + struct cachefiles_req *req; + + XA_STATE(xas, &cache->reqs, 0); + + if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) + return -EOPNOTSUPP; + + /* + * Reset the requests to CACHEFILES_REQ_NEW state, so that the + * requests have been processed halfway before the crash of the + * user daemon could be reprocessed after the recovery. + */ + xas_lock(&xas); + xas_for_each(&xas, req, ULONG_MAX) + xas_set_mark(&xas, CACHEFILES_REQ_NEW); + xas_unlock(&xas); + + wake_up_all(&cache->daemon_pollwq); + return 0; +} + static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) { struct cachefiles_object *object; @@ -218,8 +242,7 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) load = (void *)req->msg.data; load->fd = fd; - req->msg.object_id = object_id; - object->ondemand_id = object_id; + object->ondemand->ondemand_id = object_id; cachefiles_get_unbind_pincount(cache); trace_cachefiles_ondemand_open(object, &req->msg, load); @@ -234,6 +257,43 @@ static int cachefiles_ondemand_get_fd(struct cachefiles_req *req) return ret; } +static void ondemand_object_worker(struct work_struct *work) +{ + struct cachefiles_ondemand_info *info = + container_of(work, struct cachefiles_ondemand_info, ondemand_work); + + cachefiles_ondemand_init_object(info->object); +} + +/* + * If there are any inflight or subsequent READ requests on the + * closed object, reopen it. + * Skip read requests whose related object is reopening. + */ +static struct cachefiles_req *cachefiles_ondemand_select_req(struct xa_state *xas, + unsigned long xa_max) +{ + struct cachefiles_req *req; + struct cachefiles_object *object; + struct cachefiles_ondemand_info *info; + + xas_for_each_marked(xas, req, xa_max, CACHEFILES_REQ_NEW) { + if (req->msg.opcode != CACHEFILES_OP_READ) + return req; + object = req->object; + info = object->ondemand; + if (cachefiles_ondemand_object_is_close(object)) { + cachefiles_ondemand_set_object_reopening(object); + queue_work(fscache_wq, &info->ondemand_work); + continue; + } + if (cachefiles_ondemand_object_is_reopening(object)) + continue; + return req; + } + return NULL; +} + ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, char __user *_buffer, size_t buflen) { @@ -244,16 +304,16 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, int ret = 0; XA_STATE(xas, &cache->reqs, cache->req_id_next); + xa_lock(&cache->reqs); /* * Cyclically search for a request that has not ever been processed, * to prevent requests from being processed repeatedly, and make * request distribution fair. */ - xa_lock(&cache->reqs); - req = xas_find_marked(&xas, UINT_MAX, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, ULONG_MAX); if (!req && cache->req_id_next > 0) { xas_set(&xas, 0); - req = xas_find_marked(&xas, cache->req_id_next - 1, CACHEFILES_REQ_NEW); + req = cachefiles_ondemand_select_req(&xas, cache->req_id_next - 1); } if (!req) { xa_unlock(&cache->reqs); @@ -273,14 +333,18 @@ ssize_t cachefiles_ondemand_daemon_read(struct cachefiles_cache *cache, xa_unlock(&cache->reqs); id = xas.xa_index; - msg->msg_id = id; if (msg->opcode == CACHEFILES_OP_OPEN) { ret = cachefiles_ondemand_get_fd(req); - if (ret) + if (ret) { + cachefiles_ondemand_set_object_close(req->object); goto error; + } } + msg->msg_id = id; + msg->object_id = req->object->ondemand->ondemand_id; + if (copy_to_user(_buffer, msg, n) != 0) { ret = -EFAULT; goto err_put_fd; @@ -313,19 +377,23 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, void *private) { struct cachefiles_cache *cache = object->volume->cache; - struct cachefiles_req *req; + struct cachefiles_req *req = NULL; XA_STATE(xas, &cache->reqs, 0); int ret; if (!test_bit(CACHEFILES_ONDEMAND_MODE, &cache->flags)) return 0; - if (test_bit(CACHEFILES_DEAD, &cache->flags)) - return -EIO; + if (test_bit(CACHEFILES_DEAD, &cache->flags)) { + ret = -EIO; + goto out; + } req = kzalloc(sizeof(*req) + data_len, GFP_KERNEL); - if (!req) - return -ENOMEM; + if (!req) { + ret = -ENOMEM; + goto out; + } req->object = object; init_completion(&req->done); @@ -363,8 +431,9 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, /* coupled with the barrier in cachefiles_flush_reqs() */ smp_mb(); - if (opcode != CACHEFILES_OP_OPEN && object->ondemand_id <= 0) { - WARN_ON_ONCE(object->ondemand_id == 0); + if (opcode == CACHEFILES_OP_CLOSE && + !cachefiles_ondemand_object_is_open(object)) { + WARN_ON_ONCE(object->ondemand->ondemand_id == 0); xas_unlock(&xas); ret = -EIO; goto out; @@ -387,7 +456,15 @@ static int cachefiles_ondemand_send_req(struct cachefiles_object *object, wake_up_all(&cache->daemon_pollwq); wait_for_completion(&req->done); ret = req->error; + kfree(req); + return ret; out: + /* Reset the object to close state in error handling path. + * If error occurs after creating the anonymous fd, + * cachefiles_ondemand_fd_release() will set object to close. + */ + if (opcode == CACHEFILES_OP_OPEN) + cachefiles_ondemand_set_object_close(object); kfree(req); return ret; } @@ -430,18 +507,10 @@ static int cachefiles_ondemand_init_close_req(struct cachefiles_req *req, void *private) { struct cachefiles_object *object = req->object; - int object_id = object->ondemand_id; - /* - * It's possible that object id is still 0 if the cookie looking up - * phase failed before OPEN request has ever been sent. Also avoid - * sending CLOSE request for CACHEFILES_ONDEMAND_ID_CLOSED, which means - * anon_fd has already been closed. - */ - if (object_id <= 0) + if (!cachefiles_ondemand_object_is_open(object)) return -ENOENT; - req->msg.object_id = object_id; trace_cachefiles_ondemand_close(object, &req->msg); return 0; } @@ -457,16 +526,7 @@ static int cachefiles_ondemand_init_read_req(struct cachefiles_req *req, struct cachefiles_object *object = req->object; struct cachefiles_read *load = (void *)req->msg.data; struct cachefiles_read_ctx *read_ctx = private; - int object_id = object->ondemand_id; - - /* Stop enqueuing requests when daemon has closed anon_fd. */ - if (object_id <= 0) { - WARN_ON_ONCE(object_id == 0); - pr_info_once("READ: anonymous fd closed prematurely.\n"); - return -EIO; - } - req->msg.object_id = object_id; load->off = read_ctx->off; load->len = read_ctx->len; trace_cachefiles_ondemand_read(object, &req->msg, load); @@ -479,13 +539,16 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object) struct fscache_volume *volume = object->volume->vcookie; size_t volume_key_size, cookie_key_size, data_len; + if (!object->ondemand) + return 0; + /* * CacheFiles will firstly check the cache file under the root cache * directory. If the coherency check failed, it will fallback to * creating a new tmpfile as the cache file. Reuse the previously * allocated object ID if any. */ - if (object->ondemand_id > 0) + if (cachefiles_ondemand_object_is_open(object)) return 0; volume_key_size = volume->key[0] + 1; @@ -503,6 +566,28 @@ void cachefiles_ondemand_clean_object(struct cachefiles_object *object) cachefiles_ondemand_init_close_req, NULL); } +int cachefiles_ondemand_init_obj_info(struct cachefiles_object *object, + struct cachefiles_volume *volume) +{ + if (!cachefiles_in_ondemand_mode(volume->cache)) + return 0; + + object->ondemand = kzalloc(sizeof(struct cachefiles_ondemand_info), + GFP_KERNEL); + if (!object->ondemand) + return -ENOMEM; + + object->ondemand->object = object; + INIT_WORK(&object->ondemand->ondemand_work, ondemand_object_worker); + return 0; +} + +void cachefiles_ondemand_deinit_obj_info(struct cachefiles_object *object) +{ + kfree(object->ondemand); + object->ondemand = NULL; +} + int cachefiles_ondemand_read(struct cachefiles_object *object, loff_t pos, size_t len) { diff --git a/fs/d_path.c b/fs/d_path.c index 5f4da5c8d5db3e957b1034312e9a2a257b6018c9..df50090b6a0f78b696510a2624262657520235db 100644 --- a/fs/d_path.c +++ b/fs/d_path.c @@ -196,6 +196,58 @@ static int prepend_path(const struct path *path, return error; } +static int prepend_path_locked(const struct path *path, + const struct path *root, + struct prepend_buffer *p) +{ + struct prepend_buffer b; + unsigned seq = 0; + int error; + + rcu_read_lock(); +restart: + b = *p; + read_seqbegin_or_lock(&rename_lock, &seq); + error = __prepend_path(path->dentry, real_mount(path->mnt), root, &b); + if (!(seq & 1)) + rcu_read_unlock(); + if (need_seqretry(&rename_lock, seq)) { + seq = 1; + goto restart; + } + done_seqretry(&rename_lock, seq); + + if (unlikely(error == 3)) + b = *p; + + if (b.len == p->len) + prepend_char(&b, '/'); + + *p = b; + return error; +} + +/* + * d_absolute_path_locked - return the absolute path of a dentry + * + * @path: path to report + * @buf: buffer to return value in + * @buflen: buffer length + * + * Write absolute pathname like d_absolute_path() except with mount_lock held. + */ +char *d_absolute_path_locked(const struct path *path, char *buf, int buflen) +{ + struct path root = {}; + DECLARE_BUFFER(b, buf, buflen); + + prepend_char(&b, 0); + if (unlikely(prepend_path_locked(path, &root, &b) > 1)) + return ERR_PTR(-EINVAL); + return extract_string(&b); +} +EXPORT_SYMBOL(d_absolute_path_locked); + /** * __d_path - return the path of a dentry * @path: the dentry/vfsmount to report diff --git a/fs/erofs/Kconfig b/fs/erofs/Kconfig index f6dc961e6c2bc501bab4bc44f51877c6df92119d..1d318f85232de9361714471ac973762ed2e6b0e6 100644 --- a/fs/erofs/Kconfig +++ b/fs/erofs/Kconfig @@ -21,7 +21,7 @@ config EROFS_FS performance under extremely memory pressure without extra cost. See the documentation at - for more details. + and the web pages at for more details. If unsure, say N. @@ -91,13 +91,10 @@ config EROFS_FS_ZIP_LZMA select XZ_DEC_MICROLZMA help Saying Y here includes support for reading EROFS file systems - containing LZMA compressed data, specifically called microLZMA. it - gives better compression ratios than the LZ4 algorithm, at the + containing LZMA compressed data, specifically called microLZMA. It + gives better compression ratios than the default LZ4 format, at the expense of more CPU overhead. - LZMA support is an experimental feature for now and so most file - systems will be readable without selecting this option. - If unsure, say N. config EROFS_FS_ZIP_DEFLATE diff --git a/fs/erofs/compress.h b/fs/erofs/compress.h index 349c3316ae6bbba1806766e8eb2549d9e60b27e3..7cc5841577b240f90f9a623e64adc87c3fb24982 100644 --- a/fs/erofs/compress.h +++ b/fs/erofs/compress.h @@ -11,16 +11,17 @@ struct z_erofs_decompress_req { struct super_block *sb; struct page **in, **out; - unsigned short pageofs_in, pageofs_out; unsigned int inputsize, outputsize; - /* indicate the algorithm will be used for decompression */ - unsigned int alg; + unsigned int alg; /* the algorithm for decompression */ bool inplace_io, partial_decoding, fillgaps; + gfp_t gfp; /* allocation flags for extra temporary buffers */ }; struct z_erofs_decompressor { + int (*config)(struct super_block *sb, struct erofs_super_block *dsb, + void *data, int size); int (*decompress)(struct z_erofs_decompress_req *rq, struct page **pagepool); char *name; @@ -92,6 +93,10 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, extern const struct z_erofs_decompressor erofs_decompressors[]; /* prototypes for specific algorithms */ +int z_erofs_load_lzma_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size); +int z_erofs_load_deflate_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size); int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, struct page **pagepool); int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, diff --git a/fs/erofs/data.c b/fs/erofs/data.c index 0c2c99c58b5e3aa2524e1b17506b117624772c4d..31f5c690dfc2783fdc3d449e12715beb019ecf4e 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -5,9 +5,7 @@ * Copyright (C) 2021, Alibaba Cloud */ #include "internal.h" -#include #include -#include #include void erofs_unmap_metabuf(struct erofs_buf *buf) @@ -448,5 +446,6 @@ const struct file_operations erofs_file_fops = { .llseek = generic_file_llseek, .read_iter = erofs_file_read_iter, .mmap = erofs_file_mmap, + .get_unmapped_area = thp_get_unmapped_area, .splice_read = filemap_splice_read, }; diff --git a/fs/erofs/decompressor.c b/fs/erofs/decompressor.c index 332ec5f74002b8474bef9a88135108827d16d809..fce41d4875bf6bb1dbbb7b693e11140ff357dbdf 100644 --- a/fs/erofs/decompressor.c +++ b/fs/erofs/decompressor.c @@ -4,7 +4,6 @@ * https://www.huawei.com/ */ #include "compress.h" -#include #include #ifndef LZ4_DISTANCE_MAX /* history window size */ @@ -24,11 +23,11 @@ struct z_erofs_lz4_decompress_ctx { unsigned int oend; }; -int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lz4_cfgs *lz4, int size) +static int z_erofs_load_lz4_config(struct super_block *sb, + struct erofs_super_block *dsb, void *data, int size) { struct erofs_sb_info *sbi = EROFS_SB(sb); + struct z_erofs_lz4_cfgs *lz4 = data; u16 distance; if (lz4) { @@ -112,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, victim = availables[--top]; get_page(victim); } else { - victim = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + victim = erofs_allocpage(pagepool, rq->gfp); + if (!victim) + return -ENOMEM; set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE); } rq->out[i] = victim; @@ -122,11 +122,11 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx, } static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, - void *inpage, unsigned int *inputmargin, int *maptype, - bool may_inplace) + void *inpage, void *out, unsigned int *inputmargin, + int *maptype, bool may_inplace) { struct z_erofs_decompress_req *rq = ctx->rq; - unsigned int omargin, total, i, j; + unsigned int omargin, total, i; struct page **in; void *src, *tmp; @@ -136,12 +136,13 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) goto docopy; - for (i = 0; i < ctx->inpages; ++i) { - DBG_BUGON(rq->in[i] == NULL); - for (j = 0; j < ctx->outpages - ctx->inpages + i; ++j) - if (rq->out[j] == rq->in[i]) - goto docopy; - } + for (i = 0; i < ctx->inpages; ++i) + if (rq->out[ctx->outpages - ctx->inpages + i] != + rq->in[i]) + goto docopy; + kunmap_local(inpage); + *maptype = 3; + return out + ((ctx->outpages - ctx->inpages) << PAGE_SHIFT); } if (ctx->inpages <= 1) { @@ -149,7 +150,6 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, return inpage; } kunmap_local(inpage); - might_sleep(); src = erofs_vm_map_ram(rq->in, ctx->inpages); if (!src) return ERR_PTR(-ENOMEM); @@ -205,12 +205,12 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf, } static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, - u8 *out) + u8 *dst) { struct z_erofs_decompress_req *rq = ctx->rq; bool support_0padding = false, may_inplace = false; unsigned int inputmargin; - u8 *headpage, *src; + u8 *out, *headpage, *src; int ret, maptype; DBG_BUGON(*rq->in == NULL); @@ -231,11 +231,12 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, } inputmargin = rq->pageofs_in; - src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin, + src = z_erofs_lz4_handle_overlap(ctx, headpage, dst, &inputmargin, &maptype, may_inplace); if (IS_ERR(src)) return PTR_ERR(src); + out = dst + rq->pageofs_out; /* legacy format could compress extra data in a pcluster. */ if (rq->partial_decoding || !support_0padding) ret = LZ4_decompress_safe_partial(src + inputmargin, out, @@ -247,15 +248,9 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, if (ret != rq->outputsize) { erofs_err(rq->sb, "failed to decompress %d in[%u, %u] out[%u]", ret, rq->inputsize, inputmargin, rq->outputsize); - - print_hex_dump(KERN_DEBUG, "[ in]: ", DUMP_PREFIX_OFFSET, - 16, 1, src + inputmargin, rq->inputsize, true); - print_hex_dump(KERN_DEBUG, "[out]: ", DUMP_PREFIX_OFFSET, - 16, 1, out, rq->outputsize, true); - if (ret >= 0) memset(out + ret, 0, rq->outputsize - ret); - ret = -EIO; + ret = -EFSCORRUPTED; } else { ret = 0; } @@ -266,7 +261,7 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, vm_unmap_ram(src, ctx->inpages); } else if (maptype == 2) { erofs_put_pcpubuf(src); - } else { + } else if (maptype != 3) { DBG_BUGON(1); return -EFAULT; } @@ -309,7 +304,7 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, } dstmap_out: - ret = z_erofs_lz4_decompress_mem(&ctx, dst + rq->pageofs_out); + ret = z_erofs_lz4_decompress_mem(&ctx, dst); if (!dst_maptype) kunmap_local(dst); else if (dst_maptype == 2) @@ -320,43 +315,59 @@ static int z_erofs_lz4_decompress(struct z_erofs_decompress_req *rq, static int z_erofs_transform_plain(struct z_erofs_decompress_req *rq, struct page **pagepool) { - const unsigned int inpages = PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; - const unsigned int outpages = + const unsigned int nrpages_in = + PAGE_ALIGN(rq->pageofs_in + rq->inputsize) >> PAGE_SHIFT; + const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; - const unsigned int righthalf = min_t(unsigned int, rq->outputsize, - PAGE_SIZE - rq->pageofs_out); - const unsigned int lefthalf = rq->outputsize - righthalf; - const unsigned int interlaced_offset = - rq->alg == Z_EROFS_COMPRESSION_SHIFTED ? 0 : rq->pageofs_out; - u8 *src; - - if (outpages > 2 && rq->alg == Z_EROFS_COMPRESSION_SHIFTED) { - DBG_BUGON(1); - return -EFSCORRUPTED; - } - - if (rq->out[0] == *rq->in) { - DBG_BUGON(rq->pageofs_out); - return 0; + const unsigned int bs = rq->sb->s_blocksize; + unsigned int cur = 0, ni = 0, no, pi, po, insz, cnt; + u8 *kin; + + if (rq->outputsize > rq->inputsize) + return -EOPNOTSUPP; + if (rq->alg == Z_EROFS_COMPRESSION_INTERLACED) { + cur = bs - (rq->pageofs_out & (bs - 1)); + pi = (rq->pageofs_in + rq->inputsize - cur) & ~PAGE_MASK; + cur = min(cur, rq->outputsize); + if (cur && rq->out[0]) { + kin = kmap_local_page(rq->in[nrpages_in - 1]); + if (rq->out[0] == rq->in[nrpages_in - 1]) { + memmove(kin + rq->pageofs_out, kin + pi, cur); + flush_dcache_page(rq->out[0]); + } else { + memcpy_to_page(rq->out[0], rq->pageofs_out, + kin + pi, cur); + } + kunmap_local(kin); + } + rq->outputsize -= cur; } - src = kmap_local_page(rq->in[inpages - 1]) + rq->pageofs_in; - if (rq->out[0]) - memcpy_to_page(rq->out[0], rq->pageofs_out, - src + interlaced_offset, righthalf); - - if (outpages > inpages) { - DBG_BUGON(!rq->out[outpages - 1]); - if (rq->out[outpages - 1] != rq->in[inpages - 1]) { - memcpy_to_page(rq->out[outpages - 1], 0, src + - (interlaced_offset ? 0 : righthalf), - lefthalf); - } else if (!interlaced_offset) { - memmove(src, src + righthalf, lefthalf); - flush_dcache_page(rq->in[inpages - 1]); - } + for (; rq->outputsize; rq->pageofs_in = 0, cur += PAGE_SIZE, ni++) { + insz = min_t(unsigned int, PAGE_SIZE - rq->pageofs_in, rq->outputsize); + rq->outputsize -= insz; + if (!rq->in[ni]) + continue; + kin = kmap_local_page(rq->in[ni]); + pi = 0; + do { + no = (rq->pageofs_out + cur + pi) >> PAGE_SHIFT; + po = (rq->pageofs_out + cur + pi) & ~PAGE_MASK; + DBG_BUGON(no >= nrpages_out); + cnt = min_t(unsigned int, insz - pi, PAGE_SIZE - po); + if (rq->out[no] == rq->in[ni]) { + memmove(kin + po, + kin + rq->pageofs_in + pi, cnt); + flush_dcache_page(rq->out[no]); + } else if (rq->out[no]) { + memcpy_to_page(rq->out[no], po, + kin + rq->pageofs_in + pi, cnt); + } + pi += cnt; + } while (pi < insz); + kunmap_local(kin); } - kunmap_local(src); + DBG_BUGON(ni > nrpages_in); return 0; } @@ -370,19 +381,75 @@ const struct z_erofs_decompressor erofs_decompressors[] = { .name = "interlaced" }, [Z_EROFS_COMPRESSION_LZ4] = { + .config = z_erofs_load_lz4_config, .decompress = z_erofs_lz4_decompress, .name = "lz4" }, #ifdef CONFIG_EROFS_FS_ZIP_LZMA [Z_EROFS_COMPRESSION_LZMA] = { + .config = z_erofs_load_lzma_config, .decompress = z_erofs_lzma_decompress, .name = "lzma" }, #endif #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE [Z_EROFS_COMPRESSION_DEFLATE] = { + .config = z_erofs_load_deflate_config, .decompress = z_erofs_deflate_decompress, .name = "deflate" }, #endif }; + +int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb) +{ + struct erofs_sb_info *sbi = EROFS_SB(sb); + struct erofs_buf buf = __EROFS_BUF_INITIALIZER; + unsigned int algs, alg; + erofs_off_t offset; + int size, ret = 0; + + if (!erofs_sb_has_compr_cfgs(sbi)) { + sbi->available_compr_algs = 1 << Z_EROFS_COMPRESSION_LZ4; + return z_erofs_load_lz4_config(sb, dsb, NULL, 0); + } + + sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); + if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { + erofs_err(sb, "unidentified algorithms %x, please upgrade kernel", + sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); + return -EOPNOTSUPP; + } + + erofs_init_metabuf(&buf, sb); + offset = EROFS_SUPER_OFFSET + sbi->sb_size; + alg = 0; + for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { + void *data; + + if (!(algs & 1)) + continue; + + data = erofs_read_metadata(sb, &buf, &offset, &size); + if (IS_ERR(data)) { + ret = PTR_ERR(data); + break; + } + + if (alg >= ARRAY_SIZE(erofs_decompressors) || + !erofs_decompressors[alg].config) { + erofs_err(sb, "algorithm %d isn't enabled on this kernel", + alg); + ret = -EOPNOTSUPP; + } else { + ret = erofs_decompressors[alg].config(sb, + dsb, data, size); + } + + kfree(data); + if (ret) + break; + } + erofs_put_metabuf(&buf); + return ret; +} diff --git a/fs/erofs/decompressor_deflate.c b/fs/erofs/decompressor_deflate.c index 19e5bdeb30b6064bd442642b427dfac13c823267..b98872058abe82d4034b84c1c93c46645b50968b 100644 --- a/fs/erofs/decompressor_deflate.c +++ b/fs/erofs/decompressor_deflate.c @@ -1,5 +1,4 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include #include #include "compress.h" @@ -71,15 +70,16 @@ int __init z_erofs_deflate_init(void) return 0; out_failed: - pr_err("failed to allocate zlib workspace\n"); + erofs_err(NULL, "failed to allocate zlib workspace"); z_erofs_deflate_exit(); return -ENOMEM; } int z_erofs_load_deflate_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_deflate_cfgs *dfl, int size) + struct erofs_super_block *dsb, void *data, int size) { + struct z_erofs_deflate_cfgs *dfl = data; + if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) { erofs_err(sb, "invalid deflate cfgs, size=%u", size); return -EINVAL; @@ -95,7 +95,7 @@ int z_erofs_load_deflate_config(struct super_block *sb, } int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -158,8 +158,12 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs); outsz -= strm->z.avail_out; if (!rq->out[no]) { - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + kout = NULL; + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -211,8 +215,11 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -230,7 +237,7 @@ int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, break; } } - +failed: if (zlib_inflateEnd(&strm->z) != Z_OK && !err) err = -EIO; if (kout) diff --git a/fs/erofs/decompressor_lzma.c b/fs/erofs/decompressor_lzma.c index dee10d22ada96e5f6031d752e00ef33dbe984018..6ca357d83cfa458225f20e2d6f6a45307fef2194 100644 --- a/fs/erofs/decompressor_lzma.c +++ b/fs/erofs/decompressor_lzma.c @@ -1,6 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-or-later #include -#include #include "compress.h" struct z_erofs_lzma { @@ -72,10 +71,10 @@ int __init z_erofs_lzma_init(void) } int z_erofs_load_lzma_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lzma_cfgs *lzma, int size) + struct erofs_super_block *dsb, void *data, int size) { static DEFINE_MUTEX(lzma_resize_mutex); + struct z_erofs_lzma_cfgs *lzma = data; unsigned int dict_size, i; struct z_erofs_lzma *strm, *head = NULL; int err; @@ -96,8 +95,6 @@ int z_erofs_load_lzma_config(struct super_block *sb, return -EINVAL; } - erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!"); - /* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */ mutex_lock(&lzma_resize_mutex); @@ -151,7 +148,7 @@ int z_erofs_load_lzma_config(struct super_block *sb, } int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, - struct page **pagepool) + struct page **pgpl) { const unsigned int nrpages_out = PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; @@ -218,8 +215,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, PAGE_SIZE - pageofs); outlen -= strm->buf.out_size; if (!rq->out[no] && rq->fillgaps) { /* deduped */ - rq->out[no] = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + rq->out[no] = erofs_allocpage(pgpl, rq->gfp); + if (!rq->out[no]) { + err = -ENOMEM; + break; + } set_page_private(rq->out[no], Z_EROFS_SHORTLIVED_PAGE); } @@ -261,8 +261,11 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb), rq->in[j])); - tmppage = erofs_allocpage(pagepool, - GFP_KERNEL | __GFP_NOFAIL); + tmppage = erofs_allocpage(pgpl, rq->gfp); + if (!tmppage) { + err = -ENOMEM; + goto failed; + } set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE); copy_highpage(tmppage, rq->in[j]); rq->in[j] = tmppage; @@ -280,6 +283,7 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, break; } } +failed: if (no < nrpages_out && strm->buf.out) kunmap(rq->out[no]); if (ni < nrpages_in) diff --git a/fs/erofs/fscache.c b/fs/erofs/fscache.c index 87ff35bff8d5bb3acb8dbc4d79c07d1d018cba56..89a7c2453aae6f130e679af1459673397d581842 100644 --- a/fs/erofs/fscache.c +++ b/fs/erofs/fscache.c @@ -165,10 +165,10 @@ static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie, static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio) { int ret; - struct erofs_fscache *ctx = folio_mapping(folio)->host->i_private; + struct erofs_fscache *ctx = folio->mapping->host->i_private; struct erofs_fscache_request *req; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); @@ -276,7 +276,7 @@ static int erofs_fscache_read_folio(struct file *file, struct folio *folio) struct erofs_fscache_request *req; int ret; - req = erofs_fscache_req_alloc(folio_mapping(folio), + req = erofs_fscache_req_alloc(folio->mapping, folio_pos(folio), folio_size(folio)); if (IS_ERR(req)) { folio_unlock(folio); @@ -381,11 +381,12 @@ static int erofs_fscache_init_domain(struct super_block *sb) goto out; if (!erofs_pseudo_mnt) { - erofs_pseudo_mnt = kern_mount(&erofs_fs_type); - if (IS_ERR(erofs_pseudo_mnt)) { - err = PTR_ERR(erofs_pseudo_mnt); + struct vfsmount *mnt = kern_mount(&erofs_fs_type); + if (IS_ERR(mnt)) { + err = PTR_ERR(mnt); goto out; } + erofs_pseudo_mnt = mnt; } domain->volume = sbi->volume; @@ -459,7 +460,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &erofs_fscache_meta_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); inode->i_blkbits = EROFS_SB(sb)->blkszbits; inode->i_private = ctx; diff --git a/fs/erofs/inode.c b/fs/erofs/inode.c index edc8ec7581b8f057ac70b66c1f04edfca1792c66..efe439a961c0edb17d2582d2292be766c3cedc00 100644 --- a/fs/erofs/inode.c +++ b/fs/erofs/inode.c @@ -15,11 +15,11 @@ static void *erofs_read_inode(struct erofs_buf *buf, struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_inode *vi = EROFS_I(inode); const erofs_off_t inode_loc = erofs_iloc(inode); - erofs_blk_t blkaddr, nblks = 0; void *kaddr; struct erofs_inode_compact *dic; struct erofs_inode_extended *die, *copied = NULL; + union erofs_inode_i_u iu; unsigned int ifmt; int err; @@ -35,9 +35,8 @@ static void *erofs_read_inode(struct erofs_buf *buf, dic = kaddr + *ofs; ifmt = le16_to_cpu(dic->i_format); - if (ifmt & ~EROFS_I_ALL) { - erofs_err(inode->i_sb, "unsupported i_format %u of nid %llu", + erofs_err(sb, "unsupported i_format %u of nid %llu", ifmt, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -45,7 +44,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->datalayout = erofs_inode_datalayout(ifmt); if (vi->datalayout >= EROFS_INODE_DATALAYOUT_MAX) { - erofs_err(inode->i_sb, "unsupported datalayout %u of nid %llu", + erofs_err(sb, "unsupported datalayout %u of nid %llu", vi->datalayout, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -61,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, } else { const unsigned int gotten = sb->s_blocksize - *ofs; - copied = kmalloc(vi->inode_isize, GFP_NOFS); + copied = kmalloc(vi->inode_isize, GFP_KERNEL); if (!copied) { err = -ENOMEM; goto err_out; @@ -82,40 +81,15 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(die->i_xattr_icount); inode->i_mode = le16_to_cpu(die->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(die->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(die->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = die->i_u; i_uid_write(inode, le32_to_cpu(die->i_uid)); i_gid_write(inode, le32_to_cpu(die->i_gid)); set_nlink(inode, le32_to_cpu(die->i_nlink)); - - /* extended inode has its own timestamp */ + /* each extended inode has its own timestamp */ inode_set_ctime(inode, le64_to_cpu(die->i_mtime), le32_to_cpu(die->i_mtime_nsec)); inode->i_size = le64_to_cpu(die->i_size); - - /* total blocks for compressed files */ - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(die->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - /* fill chunked inode summary info */ - vi->chunkformat = le16_to_cpu(die->i_u.c.format); kfree(copied); copied = NULL; break; @@ -125,49 +99,51 @@ static void *erofs_read_inode(struct erofs_buf *buf, vi->xattr_isize = erofs_xattr_ibody_size(dic->i_xattr_icount); inode->i_mode = le16_to_cpu(dic->i_mode); - switch (inode->i_mode & S_IFMT) { - case S_IFREG: - case S_IFDIR: - case S_IFLNK: - vi->raw_blkaddr = le32_to_cpu(dic->i_u.raw_blkaddr); - break; - case S_IFCHR: - case S_IFBLK: - inode->i_rdev = - new_decode_dev(le32_to_cpu(dic->i_u.rdev)); - break; - case S_IFIFO: - case S_IFSOCK: - inode->i_rdev = 0; - break; - default: - goto bogusimode; - } + iu = dic->i_u; i_uid_write(inode, le16_to_cpu(dic->i_uid)); i_gid_write(inode, le16_to_cpu(dic->i_gid)); set_nlink(inode, le16_to_cpu(dic->i_nlink)); - /* use build time for compact inodes */ inode_set_ctime(inode, sbi->build_time, sbi->build_time_nsec); inode->i_size = le32_to_cpu(dic->i_size); - if (erofs_inode_is_data_compressed(vi->datalayout)) - nblks = le32_to_cpu(dic->i_u.compressed_blocks); - else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) - vi->chunkformat = le16_to_cpu(dic->i_u.c.format); break; default: - erofs_err(inode->i_sb, - "unsupported on-disk inode version %u of nid %llu", + erofs_err(sb, "unsupported on-disk inode version %u of nid %llu", erofs_inode_version(ifmt), vi->nid); err = -EOPNOTSUPP; goto err_out; } - if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + switch (inode->i_mode & S_IFMT) { + case S_IFREG: + case S_IFDIR: + case S_IFLNK: + vi->raw_blkaddr = le32_to_cpu(iu.raw_blkaddr); + break; + case S_IFCHR: + case S_IFBLK: + inode->i_rdev = new_decode_dev(le32_to_cpu(iu.rdev)); + break; + case S_IFIFO: + case S_IFSOCK: + inode->i_rdev = 0; + break; + default: + erofs_err(sb, "bogus i_mode (%o) @ nid %llu", inode->i_mode, + vi->nid); + err = -EFSCORRUPTED; + goto err_out; + } + + /* total blocks for compressed files */ + if (erofs_inode_is_data_compressed(vi->datalayout)) { + nblks = le32_to_cpu(iu.compressed_blocks); + } else if (vi->datalayout == EROFS_INODE_CHUNK_BASED) { + /* fill chunked inode summary info */ + vi->chunkformat = le16_to_cpu(iu.c.format); if (vi->chunkformat & ~EROFS_CHUNK_FORMAT_ALL) { - erofs_err(inode->i_sb, - "unsupported chunk format %x of nid %llu", + erofs_err(sb, "unsupported chunk format %x of nid %llu", vi->chunkformat, vi->nid); err = -EOPNOTSUPP; goto err_out; @@ -190,10 +166,6 @@ static void *erofs_read_inode(struct erofs_buf *buf, inode->i_blocks = nblks << (sb->s_blocksize_bits - 9); return kaddr; -bogusimode: - erofs_err(inode->i_sb, "bogus i_mode (%o) @ nid %llu", - inode->i_mode, vi->nid); - err = -EFSCORRUPTED; err_out: DBG_BUGON(1); kfree(copied); @@ -286,8 +258,10 @@ static int erofs_fill_inode(struct inode *inode) if (erofs_inode_is_data_compressed(vi->datalayout)) { #ifdef CONFIG_EROFS_FS_ZIP - if (!erofs_is_fscache_mode(inode->i_sb) && - inode->i_sb->s_blocksize_bits == PAGE_SHIFT) { + if (!erofs_is_fscache_mode(inode->i_sb)) { + DO_ONCE_LITE_IF(inode->i_sb->s_blocksize != PAGE_SIZE, + erofs_info, inode->i_sb, + "EXPERIMENTAL EROFS subpage compressed block support in use. Use at your own risk!"); inode->i_mapping->a_ops = &z_erofs_aops; err = 0; goto out_unlock; diff --git a/fs/erofs/internal.h b/fs/erofs/internal.h index 4ff88d0dd980fbd5998c79c71c7a89c129050fb8..00a2b392f445913eb98e39efcf7e824d545f5881 100644 --- a/fs/erofs/internal.h +++ b/fs/erofs/internal.h @@ -8,8 +8,10 @@ #define __EROFS_INTERNAL_H #include +#include #include #include +#include #include #include #include @@ -228,8 +230,6 @@ struct erofs_buf { }; #define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL }) -#define ROOT_NID(sb) ((sb)->root_nid) - #define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits) #define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1)) #define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits) @@ -469,9 +469,6 @@ int __init z_erofs_init_zip_subsystem(void); void z_erofs_exit_zip_subsystem(void); int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, struct erofs_workgroup *egrp); -int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lz4_cfgs *lz4, int len); int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags); void *erofs_get_pcpubuf(unsigned int requiredpages); @@ -480,6 +477,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages); void __init erofs_pcpubuf_init(void); void erofs_pcpubuf_exit(void); int erofs_init_managed_cache(struct super_block *sb); +int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb); #else static inline void erofs_shrinker_register(struct super_block *sb) {} static inline void erofs_shrinker_unregister(struct super_block *sb) {} @@ -487,16 +485,6 @@ static inline int erofs_init_shrinker(void) { return 0; } static inline void erofs_exit_shrinker(void) {} static inline int z_erofs_init_zip_subsystem(void) { return 0; } static inline void z_erofs_exit_zip_subsystem(void) {} -static inline int z_erofs_load_lz4_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lz4_cfgs *lz4, int len) -{ - if (lz4 || dsb->u1.lz4_max_distance) { - erofs_err(sb, "lz4 algorithm isn't enabled"); - return -EINVAL; - } - return 0; -} static inline void erofs_pcpubuf_init(void) {} static inline void erofs_pcpubuf_exit(void) {} static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } @@ -505,41 +493,17 @@ static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; } #ifdef CONFIG_EROFS_FS_ZIP_LZMA int __init z_erofs_lzma_init(void); void z_erofs_lzma_exit(void); -int z_erofs_load_lzma_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lzma_cfgs *lzma, int size); #else static inline int z_erofs_lzma_init(void) { return 0; } static inline int z_erofs_lzma_exit(void) { return 0; } -static inline int z_erofs_load_lzma_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_lzma_cfgs *lzma, int size) { - if (lzma) { - erofs_err(sb, "lzma algorithm isn't enabled"); - return -EINVAL; - } - return 0; -} #endif /* !CONFIG_EROFS_FS_ZIP_LZMA */ #ifdef CONFIG_EROFS_FS_ZIP_DEFLATE int __init z_erofs_deflate_init(void); void z_erofs_deflate_exit(void); -int z_erofs_load_deflate_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_deflate_cfgs *dfl, int size); #else static inline int z_erofs_deflate_init(void) { return 0; } static inline int z_erofs_deflate_exit(void) { return 0; } -static inline int z_erofs_load_deflate_config(struct super_block *sb, - struct erofs_super_block *dsb, - struct z_erofs_deflate_cfgs *dfl, int size) { - if (dfl) { - erofs_err(sb, "deflate algorithm isn't enabled"); - return -EINVAL; - } - return 0; -} #endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */ #ifdef CONFIG_EROFS_FS_ONDEMAND diff --git a/fs/erofs/namei.c b/fs/erofs/namei.c index d4f631d39f0fa83141eafcb13951bb3fd36598bd..f0110a78acb2078aa2ce6eae13e39481e46b7ea9 100644 --- a/fs/erofs/namei.c +++ b/fs/erofs/namei.c @@ -130,24 +130,24 @@ static void *erofs_find_target_block(struct erofs_buf *target, /* string comparison without already matched prefix */ diff = erofs_dirnamecmp(name, &dname, &matched); - if (!diff) { - *_ndirents = 0; - goto out; - } else if (diff > 0) { - head = mid + 1; - startprfx = matched; - - if (!IS_ERR(candidate)) - erofs_put_metabuf(target); - *target = buf; - candidate = de; - *_ndirents = ndirents; - } else { + if (diff < 0) { erofs_put_metabuf(&buf); - back = mid - 1; endprfx = matched; + continue; + } + + if (!IS_ERR(candidate)) + erofs_put_metabuf(target); + *target = buf; + if (!diff) { + *_ndirents = 0; + return de; } + head = mid + 1; + startprfx = matched; + candidate = de; + *_ndirents = ndirents; continue; } out: /* free if the candidate is valid */ diff --git a/fs/erofs/super.c b/fs/erofs/super.c index 3700af9ee17332f4f6ea766ba3297cdd1e3746fc..06a433bc7eaed0acadda0ee5de252d7ec4cf392d 100644 --- a/fs/erofs/super.c +++ b/fs/erofs/super.c @@ -4,14 +4,11 @@ * https://www.huawei.com/ * Copyright (C) 2021, Alibaba Cloud */ -#include #include -#include #include #include #include #include -#include #include #include "xattr.h" @@ -30,7 +27,10 @@ void _erofs_err(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + if (sb) + pr_err("(device %s): %s: %pV", sb->s_id, func, &vaf); + else + pr_err("%s: %pV", func, &vaf); va_end(args); } @@ -44,7 +44,10 @@ void _erofs_info(struct super_block *sb, const char *func, const char *fmt, ...) vaf.fmt = fmt; vaf.va = &args; - pr_info("(device %s): %pV", sb->s_id, &vaf); + if (sb) + pr_info("(device %s): %pV", sb->s_id, &vaf); + else + pr_info("%pV", &vaf); va_end(args); } @@ -156,68 +159,15 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf, return buffer; } -#ifdef CONFIG_EROFS_FS_ZIP -static int erofs_load_compr_cfgs(struct super_block *sb, - struct erofs_super_block *dsb) +#ifndef CONFIG_EROFS_FS_ZIP +static int z_erofs_parse_cfgs(struct super_block *sb, + struct erofs_super_block *dsb) { - struct erofs_sb_info *sbi = EROFS_SB(sb); - struct erofs_buf buf = __EROFS_BUF_INITIALIZER; - unsigned int algs, alg; - erofs_off_t offset; - int size, ret = 0; - - sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs); - if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) { - erofs_err(sb, "try to load compressed fs with unsupported algorithms %x", - sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS); - return -EINVAL; - } - - erofs_init_metabuf(&buf, sb); - offset = EROFS_SUPER_OFFSET + sbi->sb_size; - alg = 0; - for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) { - void *data; - - if (!(algs & 1)) - continue; - - data = erofs_read_metadata(sb, &buf, &offset, &size); - if (IS_ERR(data)) { - ret = PTR_ERR(data); - break; - } + if (!dsb->u1.available_compr_algs) + return 0; - switch (alg) { - case Z_EROFS_COMPRESSION_LZ4: - ret = z_erofs_load_lz4_config(sb, dsb, data, size); - break; - case Z_EROFS_COMPRESSION_LZMA: - ret = z_erofs_load_lzma_config(sb, dsb, data, size); - break; - case Z_EROFS_COMPRESSION_DEFLATE: - ret = z_erofs_load_deflate_config(sb, dsb, data, size); - break; - default: - DBG_BUGON(1); - ret = -EFAULT; - } - kfree(data); - if (ret) - break; - } - erofs_put_metabuf(&buf); - return ret; -} -#else -static int erofs_load_compr_cfgs(struct super_block *sb, - struct erofs_super_block *dsb) -{ - if (dsb->u1.available_compr_algs) { - erofs_err(sb, "try to load compressed fs when compression is disabled"); - return -EINVAL; - } - return 0; + erofs_err(sb, "compression disabled, unable to mount compressed EROFS"); + return -EOPNOTSUPP; } #endif @@ -406,10 +356,7 @@ static int erofs_read_superblock(struct super_block *sb) } /* parse on-disk compression configurations */ - if (erofs_sb_has_compr_cfgs(sbi)) - ret = erofs_load_compr_cfgs(sb, dsb); - else - ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0); + ret = z_erofs_parse_cfgs(sb, dsb); if (ret < 0) goto out; @@ -724,13 +671,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) xa_init(&sbi->managed_pslots); #endif - inode = erofs_iget(sb, ROOT_NID(sbi)); + inode = erofs_iget(sb, sbi->root_nid); if (IS_ERR(inode)) return PTR_ERR(inode); if (!S_ISDIR(inode->i_mode)) { erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", - ROOT_NID(sbi), inode->i_mode); + sbi->root_nid, inode->i_mode); iput(inode); return -EINVAL; } @@ -760,7 +707,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc) if (err) return err; - erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi)); + erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid); return 0; } diff --git a/fs/erofs/utils.c b/fs/erofs/utils.c index 4256a85719a1d25fbe3f0aa33820fafe3ad01d45..603ded4db58e112f2a0fa94a4069ed2205560812 100644 --- a/fs/erofs/utils.c +++ b/fs/erofs/utils.c @@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb, repeat: xa_lock(&sbi->managed_pslots); pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index, - NULL, grp, GFP_NOFS); + NULL, grp, GFP_KERNEL); if (pre) { if (xa_is_err(pre)) { pre = ERR_PTR(xa_err(pre)); diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index a7e6847f6f8f18f65ede10f6f7bc400e770cea04..ff0aa72b0db342f10ed7c1b565d2cc7bd6a540ff 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -56,6 +56,9 @@ struct z_erofs_pcluster { /* L: total number of bvecs */ unsigned int vcnt; + /* I: pcluster size (compressed size) in bytes */ + unsigned int pclustersize; + /* I: page offset of start position of decompression */ unsigned short pageofs_out; @@ -70,14 +73,6 @@ struct z_erofs_pcluster { struct rcu_head rcu; }; - union { - /* I: physical cluster size in pages */ - unsigned short pclusterpages; - - /* I: tailpacking inline compressed size */ - unsigned short tailpacking_size; - }; - /* I: compression algorithm format */ unsigned char algorithmformat; @@ -87,6 +82,9 @@ struct z_erofs_pcluster { /* L: indicate several pageofs_outs or not */ bool multibases; + /* L: whether extra buffer allocations are best-effort */ + bool besteffort; + /* A: compressed bvecs (can be cached or inplaced pages) */ struct z_erofs_bvec compressed_bvecs[]; }; @@ -115,9 +113,7 @@ static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl) static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl) { - if (z_erofs_is_inline_pcluster(pcl)) - return 1; - return pcl->pclusterpages; + return PAGE_ALIGN(pcl->pclustersize) >> PAGE_SHIFT; } /* @@ -237,7 +233,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter, struct page *nextpage = *candidate_bvpage; if (!nextpage) { - nextpage = erofs_allocpage(pagepool, GFP_NOFS); + nextpage = erofs_allocpage(pagepool, GFP_KERNEL); if (!nextpage) return -ENOMEM; set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE); @@ -298,21 +294,21 @@ static int z_erofs_create_pcluster_pool(void) return 0; } -static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int nrpages) +static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size) { - int i; + unsigned int nrpages = PAGE_ALIGN(size) >> PAGE_SHIFT; + struct z_erofs_pcluster_slab *pcs = pcluster_pool; - for (i = 0; i < ARRAY_SIZE(pcluster_pool); ++i) { - struct z_erofs_pcluster_slab *pcs = pcluster_pool + i; + for (; pcs < pcluster_pool + ARRAY_SIZE(pcluster_pool); ++pcs) { struct z_erofs_pcluster *pcl; if (nrpages > pcs->maxpages) continue; - pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS); + pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL); if (!pcl) return ERR_PTR(-ENOMEM); - pcl->pclusterpages = nrpages; + pcl->pclustersize = size; return pcl; } return ERR_PTR(-EINVAL); @@ -559,6 +555,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) { struct address_space *mc = MNGD_MAPPING(EROFS_I_SB(fe->inode)); struct z_erofs_pcluster *pcl = fe->pcl; + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool shouldalloc = z_erofs_should_alloc_cache(fe); bool standalone = true; /* @@ -569,22 +566,22 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; unsigned int i; - if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) + if (i_blocksize(fe->inode) != PAGE_SIZE || + fe->mode < Z_EROFS_PCLUSTER_FOLLOWED) return; - for (i = 0; i < pcl->pclusterpages; ++i) { - struct page *page; + for (i = 0; i < pclusterpages; ++i) { + struct page *page, *newpage; void *t; /* mark pages just found for debugging */ - struct page *newpage = NULL; - /* the compressed page was loaded before */ + /* Inaccurate check w/o locking to avoid unneeded lookups */ if (READ_ONCE(pcl->compressed_bvecs[i].page)) continue; page = find_get_page(mc, pcl->obj.index + i); - if (page) { t = (void *)((unsigned long)page | 1); + newpage = NULL; } else { /* I/O is needed, no possible to decompress directly */ standalone = false; @@ -592,9 +589,8 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) continue; /* - * try to use cached I/O if page allocation - * succeeds or fallback to in-place I/O instead - * to avoid any direct reclaim. + * Try cached I/O if allocation succeeds or fallback to + * in-place I/O instead to avoid any direct reclaim. */ newpage = erofs_allocpage(&fe->pagepool, gfp); if (!newpage) @@ -602,9 +598,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe) set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE); t = (void *)((unsigned long)newpage | 1); } - - if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t)) + spin_lock(&pcl->obj.lockref.lock); + if (!pcl->compressed_bvecs[i].page) { + pcl->compressed_bvecs[i].page = t; + spin_unlock(&pcl->obj.lockref.lock); continue; + } + spin_unlock(&pcl->obj.lockref.lock); if (page) put_page(page); @@ -626,6 +626,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, { struct z_erofs_pcluster *const pcl = container_of(grp, struct z_erofs_pcluster, obj); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); int i; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); @@ -633,7 +634,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, * refcount of workgroup is now freezed as 0, * therefore no need to worry about available decompression users. */ - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { struct page *page = pcl->compressed_bvecs[i].page; if (!page) @@ -657,6 +658,7 @@ int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi, static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) { struct z_erofs_pcluster *pcl = folio_get_private(folio); + unsigned int pclusterpages = z_erofs_pclusterpages(pcl); bool ret; int i; @@ -669,7 +671,7 @@ static bool z_erofs_cache_release_folio(struct folio *folio, gfp_t gfp) goto out; DBG_BUGON(z_erofs_is_inline_pcluster(pcl)); - for (i = 0; i < pcl->pclusterpages; ++i) { + for (i = 0; i < pclusterpages; ++i) { if (pcl->compressed_bvecs[i].page == &folio->page) { WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); ret = true; @@ -697,7 +699,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio, DBG_BUGON(stop > folio_size(folio) || stop < length); if (offset == 0 && stop == folio_size(folio)) - while (!z_erofs_cache_release_folio(folio, GFP_NOFS)) + while (!z_erofs_cache_release_folio(folio, 0)) cond_resched(); } @@ -716,36 +718,30 @@ int erofs_init_managed_cache(struct super_block *sb) set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &z_erofs_cache_aops; - mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); + mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL); EROFS_SB(sb)->managed_cache = inode; return 0; } -static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe, - struct z_erofs_bvec *bvec) -{ - struct z_erofs_pcluster *const pcl = fe->pcl; - - while (fe->icur > 0) { - if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page, - NULL, bvec->page)) { - pcl->compressed_bvecs[fe->icur] = *bvec; - return true; - } - } - return false; -} - /* callers must be with pcluster lock held */ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, struct z_erofs_bvec *bvec, bool exclusive) { + struct z_erofs_pcluster *pcl = fe->pcl; int ret; if (exclusive) { /* give priority for inplaceio to use file pages first */ - if (z_erofs_try_inplace_io(fe, bvec)) + spin_lock(&pcl->obj.lockref.lock); + while (fe->icur > 0) { + if (pcl->compressed_bvecs[--fe->icur].page) + continue; + pcl->compressed_bvecs[fe->icur] = *bvec; + spin_unlock(&pcl->obj.lockref.lock); return 0; + } + spin_unlock(&pcl->obj.lockref.lock); + /* otherwise, check if it can be used as a bvpage */ if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED && !fe->candidate_bvpage) @@ -778,20 +774,20 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) { struct erofs_map_blocks *map = &fe->map; + struct super_block *sb = fe->inode->i_sb; bool ztailpacking = map->m_flags & EROFS_MAP_META; struct z_erofs_pcluster *pcl; struct erofs_workgroup *grp; int err; if (!(map->m_flags & EROFS_MAP_ENCODED) || - (!ztailpacking && !(map->m_pa >> PAGE_SHIFT))) { + (!ztailpacking && !erofs_blknr(sb, map->m_pa))) { DBG_BUGON(1); return -EFSCORRUPTED; } /* no available pcluster, let's allocate one */ - pcl = z_erofs_alloc_pcluster(ztailpacking ? 1 : - map->m_plen >> PAGE_SHIFT); + pcl = z_erofs_alloc_pcluster(map->m_plen); if (IS_ERR(pcl)) return PTR_ERR(pcl); @@ -815,10 +811,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) if (ztailpacking) { pcl->obj.index = 0; /* which indicates ztailpacking */ - pcl->pageofs_in = erofs_blkoff(fe->inode->i_sb, map->m_pa); - pcl->tailpacking_size = map->m_plen; } else { - pcl->obj.index = map->m_pa >> PAGE_SHIFT; + pcl->obj.index = erofs_blknr(sb, map->m_pa); grp = erofs_insert_workgroup(fe->inode->i_sb, &pcl->obj); if (IS_ERR(grp)) { @@ -893,6 +887,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) } get_page(map->buf.page); WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page); + fe->pcl->pageofs_in = map->m_pa & ~PAGE_MASK; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE; } /* file-backed inplace I/O pages are traversed in reverse order */ @@ -968,17 +963,17 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page, } static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, - struct page *page) + struct page *page, bool ra) { struct inode *const inode = fe->inode; struct erofs_map_blocks *const map = &fe->map; const loff_t offset = page_offset(page); + const unsigned int bs = i_blocksize(inode); bool tight = true, exclusive; unsigned int cur, end, len, split; int err = 0; z_erofs_onlinepage_init(page); - split = 0; end = PAGE_SIZE; repeat: @@ -1018,6 +1013,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, err = z_erofs_pcluster_begin(fe); if (err) goto out; + fe->pcl->besteffort |= !ra; } /* @@ -1027,7 +1023,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, * for inplace I/O or bvpage (should be processed in a strict order.) */ tight &= (fe->mode > Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE); - exclusive = (!cur && ((split <= 1) || tight)); + exclusive = (!cur && ((split <= 1) || (tight && bs == PAGE_SIZE))); if (cur) tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED); @@ -1206,34 +1202,27 @@ static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be, struct z_erofs_bvec *bvec = &pcl->compressed_bvecs[i]; struct page *page = bvec->page; - /* compressed pages ought to be present before decompressing */ + /* compressed data ought to be valid before decompressing */ if (!page) { - DBG_BUGON(1); + err = -EIO; continue; } be->compressed_pages[i] = page; - if (z_erofs_is_inline_pcluster(pcl)) { + if (z_erofs_is_inline_pcluster(pcl) || + erofs_page_is_managed(EROFS_SB(be->sb), page)) { if (!PageUptodate(page)) err = -EIO; continue; } DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (!z_erofs_is_shortlived_page(page)) { - if (erofs_page_is_managed(EROFS_SB(be->sb), page)) { - if (!PageUptodate(page)) - err = -EIO; - continue; - } - z_erofs_do_decompressed_bvec(be, bvec); - *overlapped = true; - } + if (z_erofs_is_shortlived_page(page)) + continue; + z_erofs_do_decompressed_bvec(be, bvec); + *overlapped = true; } - - if (err) - return err; - return 0; + return err; } static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, @@ -1242,10 +1231,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, struct erofs_sb_info *const sbi = EROFS_SB(be->sb); struct z_erofs_pcluster *pcl = be->pcl; unsigned int pclusterpages = z_erofs_pclusterpages(pcl); - const struct z_erofs_decompressor *decompressor = + const struct z_erofs_decompressor *decomp = &erofs_decompressors[pcl->algorithmformat]; - unsigned int i, inputsize; - int err2; + int i, err2; struct page *page; bool overlapped; @@ -1279,29 +1267,24 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, err2 = z_erofs_parse_in_bvecs(be, &overlapped); if (err2) err = err2; - if (err) - goto out; - - if (z_erofs_is_inline_pcluster(pcl)) - inputsize = pcl->tailpacking_size; - else - inputsize = pclusterpages * PAGE_SIZE; - - err = decompressor->decompress(&(struct z_erofs_decompress_req) { + if (!err) + err = decomp->decompress(&(struct z_erofs_decompress_req) { .sb = be->sb, .in = be->compressed_pages, .out = be->decompressed_pages, .pageofs_in = pcl->pageofs_in, .pageofs_out = pcl->pageofs_out, - .inputsize = inputsize, + .inputsize = pcl->pclustersize, .outputsize = pcl->length, .alg = pcl->algorithmformat, .inplace_io = overlapped, .partial_decoding = pcl->partial, .fillgaps = pcl->multibases, + .gfp = pcl->besteffort ? + GFP_KERNEL | __GFP_NOFAIL : + GFP_NOWAIT | __GFP_NORETRY }, be->pagepool); -out: /* must handle all compressed pages before actual file pages */ if (z_erofs_is_inline_pcluster(pcl)) { page = pcl->compressed_bvecs[0].page; @@ -1309,12 +1292,11 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, put_page(page); } else { for (i = 0; i < pclusterpages; ++i) { - page = pcl->compressed_bvecs[i].page; + /* consider shortlived pages added when decompressing */ + page = be->compressed_pages[i]; - if (erofs_page_is_managed(sbi, page)) + if (!page || erofs_page_is_managed(sbi, page)) continue; - - /* recycle all individual short-lived pages */ (void)z_erofs_put_shortlivedpage(be->pagepool, page); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); } @@ -1343,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be, pcl->length = 0; pcl->partial = true; pcl->multibases = false; + pcl->besteffort = false; pcl->bvset.nextpage = NULL; pcl->vcnt = 0; @@ -1436,86 +1419,86 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, z_erofs_decompressqueue_work(&io->u.work); } -static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, - unsigned int nr, - struct page **pagepool, - struct address_space *mc) +static void z_erofs_fill_bio_vec(struct bio_vec *bvec, + struct z_erofs_decompress_frontend *f, + struct z_erofs_pcluster *pcl, + unsigned int nr, + struct address_space *mc) { - const pgoff_t index = pcl->obj.index; gfp_t gfp = mapping_gfp_mask(mc); bool tocache = false; - + struct z_erofs_bvec zbv; struct address_space *mapping; - struct page *oldpage, *page; - int justfound; + struct page *page; + int justfound, bs = i_blocksize(f->inode); + /* Except for inplace pages, the entire page can be used for I/Os */ + bvec->bv_offset = 0; + bvec->bv_len = PAGE_SIZE; repeat: - page = READ_ONCE(pcl->compressed_bvecs[nr].page); - oldpage = page; - - if (!page) - goto out_allocpage; - + spin_lock(&pcl->obj.lockref.lock); + zbv = pcl->compressed_bvecs[nr]; + page = zbv.page; justfound = (unsigned long)page & 1UL; page = (struct page *)((unsigned long)page & ~1UL); + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); + if (!page) + goto out_allocpage; + bvec->bv_page = page; + DBG_BUGON(z_erofs_is_shortlived_page(page)); /* - * preallocated cached pages, which is used to avoid direct reclaim - * otherwise, it will go inplace I/O path instead. + * Handle preallocated cached pages. We tried to allocate such pages + * without triggering direct reclaim. If allocation failed, inplace + * file-backed pages will be used instead. */ if (page->private == Z_EROFS_PREALLOCATED_PAGE) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); set_page_private(page, 0); tocache = true; goto out_tocache; } - mapping = READ_ONCE(page->mapping); + mapping = READ_ONCE(page->mapping); /* - * file-backed online pages in plcuster are all locked steady, - * therefore it is impossible for `mapping' to be NULL. + * File-backed pages for inplace I/Os are all locked steady, + * therefore it is impossible for `mapping` to be NULL. */ - if (mapping && mapping != mc) - /* ought to be unmanaged pages */ - goto out; - - /* directly return for shortlived page as well */ - if (z_erofs_is_shortlived_page(page)) - goto out; + if (mapping && mapping != mc) { + if (zbv.offset < 0) + bvec->bv_offset = round_up(-zbv.offset, bs); + bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; + return; + } lock_page(page); - /* only true if page reclaim goes wrong, should never happen */ DBG_BUGON(justfound && PagePrivate(page)); - /* the page is still in manage cache */ + /* the cached page is still in managed cache */ if (page->mapping == mc) { - WRITE_ONCE(pcl->compressed_bvecs[nr].page, page); - + /* + * The cached page is still available but without a valid + * `->private` pcluster hint. Let's reconnect them. + */ if (!PagePrivate(page)) { - /* - * impossible to be !PagePrivate(page) for - * the current restriction as well if - * the page is already in compressed_bvecs[]. - */ DBG_BUGON(!justfound); - - justfound = 0; - set_page_private(page, (unsigned long)pcl); - SetPagePrivate(page); + /* compressed_bvecs[] already takes a ref */ + attach_page_private(page, pcl); + put_page(page); } - /* no need to submit io if it is already up-to-date */ + /* no need to submit if it is already up-to-date */ if (PageUptodate(page)) { unlock_page(page); - page = NULL; + bvec->bv_page = NULL; } - goto out; + return; } /* - * the managed page has been truncated, it's unsafe to - * reuse this one, let's allocate a new cache-managed page. + * It has been truncated, so it's unsafe to reuse this one. Let's + * allocate a new page for compressed data. */ DBG_BUGON(page->mapping); DBG_BUGON(!justfound); @@ -1524,25 +1507,27 @@ static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, unlock_page(page); put_page(page); out_allocpage: - page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL); - if (oldpage != cmpxchg(&pcl->compressed_bvecs[nr].page, - oldpage, page)) { - erofs_pagepool_add(pagepool, page); + page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL); + spin_lock(&pcl->obj.lockref.lock); + if (pcl->compressed_bvecs[nr].page) { + erofs_pagepool_add(&f->pagepool, page); + spin_unlock(&pcl->obj.lockref.lock); cond_resched(); goto repeat; } + pcl->compressed_bvecs[nr].page = page; + spin_unlock(&pcl->obj.lockref.lock); + bvec->bv_page = page; out_tocache: - if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) { - /* turn into temporary page if fails (1 ref) */ + if (!tocache || bs != PAGE_SIZE || + add_to_page_cache_lru(page, mc, pcl->obj.index + nr, gfp)) { + /* turn into a temporary shortlived page (1 ref) */ set_page_private(page, Z_EROFS_SHORTLIVED_PAGE); - goto out; + return; } attach_page_private(page, pcl); - /* drop a refcount added by allocpage (then we have 2 refs here) */ + /* drop a refcount added by allocpage (then 2 refs in total here) */ put_page(page); - -out: /* the only exit (for tracing and debugging) */ - return page; } static struct z_erofs_decompressqueue *jobqueue_init(struct super_block *sb, @@ -1597,7 +1582,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } -static void z_erofs_decompressqueue_endio(struct bio *bio) +static void z_erofs_submissionqueue_endio(struct bio *bio) { struct z_erofs_decompressqueue *q = bio->bi_private; blk_status_t err = bio->bi_status; @@ -1609,7 +1594,6 @@ static void z_erofs_decompressqueue_endio(struct bio *bio) DBG_BUGON(PageUptodate(page)); DBG_BUGON(z_erofs_page_is_invalidated(page)); - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { if (!err) SetPageUptodate(page); @@ -1632,17 +1616,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; z_erofs_next_pcluster_t owned_head = f->owned_head; /* bio is NULL initially, so no need to initialize last_{index,bdev} */ - pgoff_t last_index; + erofs_off_t last_pa; struct block_device *last_bdev; unsigned int nr_bios = 0; struct bio *bio = NULL; unsigned long pflags; int memstall = 0; - /* - * if managed cache is enabled, bypass jobqueue is needed, - * no need to read from device for all pclusters in this queue. - */ + /* No need to read from device for pclusters in the bypass queue. */ q[JQ_BYPASS] = jobqueue_init(sb, fgq + JQ_BYPASS, NULL); q[JQ_SUBMIT] = jobqueue_init(sb, fgq + JQ_SUBMIT, force_fg); @@ -1655,7 +1636,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, do { struct erofs_map_dev mdev; struct z_erofs_pcluster *pcl; - pgoff_t cur, end; + erofs_off_t cur, end; + struct bio_vec bvec; unsigned int i = 0; bool bypass = true; @@ -1674,18 +1656,14 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, }; (void)erofs_map_dev(sb, &mdev); - cur = erofs_blknr(sb, mdev.m_pa); - end = cur + pcl->pclusterpages; - + cur = mdev.m_pa; + end = cur + pcl->pclustersize; do { - struct page *page; - - page = pickup_page_for_submission(pcl, i++, - &f->pagepool, mc); - if (!page) + z_erofs_fill_bio_vec(&bvec, f, pcl, i++, mc); + if (!bvec.bv_page) continue; - if (bio && (cur != last_index + 1 || + if (bio && (cur != last_pa || last_bdev != mdev.m_bdev)) { submit_bio_retry: submit_bio(bio); @@ -1696,7 +1674,8 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, bio = NULL; } - if (unlikely(PageWorkingset(page)) && !memstall) { + if (unlikely(PageWorkingset(bvec.bv_page)) && + !memstall) { psi_memstall_enter(&pflags); memstall = 1; } @@ -1704,23 +1683,25 @@ static void z_erofs_submit_queue(struct z_erofs_decompress_frontend *f, if (!bio) { bio = bio_alloc(mdev.m_bdev, BIO_MAX_VECS, REQ_OP_READ, GFP_NOIO); - bio->bi_end_io = z_erofs_decompressqueue_endio; - - last_bdev = mdev.m_bdev; - bio->bi_iter.bi_sector = (sector_t)cur << - (sb->s_blocksize_bits - 9); + bio->bi_end_io = z_erofs_submissionqueue_endio; + bio->bi_iter.bi_sector = cur >> 9; bio->bi_private = q[JQ_SUBMIT]; if (readahead) bio->bi_opf |= REQ_RAHEAD; ++nr_bios; + last_bdev = mdev.m_bdev; } - if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) + if (cur + bvec.bv_len > end) + bvec.bv_len = end - cur; + DBG_BUGON(bvec.bv_len < sb->s_blocksize); + if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, + bvec.bv_offset)) goto submit_bio_retry; - last_index = cur; + last_pa = cur + bvec.bv_len; bypass = false; - } while (++cur < end); + } while ((cur += bvec.bv_len) < end); if (!bypass) qtail[JQ_SUBMIT] = &pcl->next; @@ -1814,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f, if (PageUptodate(page)) unlock_page(page); else - (void)z_erofs_do_read_page(f, page); + (void)z_erofs_do_read_page(f, page, !!rac); put_page(page); } @@ -1835,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio) f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT; z_erofs_pcluster_readmore(&f, NULL, true); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, false); z_erofs_pcluster_readmore(&f, NULL, false); z_erofs_pcluster_end(&f); @@ -1876,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac) folio = head; head = folio_get_private(folio); - err = z_erofs_do_read_page(&f, &folio->page); + err = z_erofs_do_read_page(&f, &folio->page, true); if (err && err != -EINTR) erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu", folio->index, EROFS_I(inode)->nid); diff --git a/fs/erofs/zmap.c b/fs/erofs/zmap.c index 7b55111fd5337773555f6a78760b7e80cb95896a..e313c936351d51fb39685702437d22bbef16719a 100644 --- a/fs/erofs/zmap.c +++ b/fs/erofs/zmap.c @@ -82,29 +82,26 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, } static unsigned int decode_compactedbits(unsigned int lobits, - unsigned int lomask, u8 *in, unsigned int pos, u8 *type) { const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); - const unsigned int lo = v & lomask; + const unsigned int lo = v & ((1 << lobits) - 1); *type = (v >> lobits) & 3; return lo; } -static int get_compacted_la_distance(unsigned int lclusterbits, +static int get_compacted_la_distance(unsigned int lobits, unsigned int encodebits, unsigned int vcnt, u8 *in, int i) { - const unsigned int lomask = (1 << lclusterbits) - 1; unsigned int lo, d1 = 0; u8 type; DBG_BUGON(i >= vcnt); do { - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); + lo = decode_compactedbits(lobits, in, encodebits * i, &type); if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) return d1; @@ -123,15 +120,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, { struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; - const unsigned int lomask = (1 << lclusterbits) - 1; - unsigned int vcnt, base, lo, encodebits, nblk, eofs; + unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs; int i; u8 *in, type; bool big_pcluster; if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; - else if (1 << amortizedshift == 2 && lclusterbits == 12) + else if (1 << amortizedshift == 2 && lclusterbits <= 12) vcnt = 16; else return -EOPNOTSUPP; @@ -140,6 +136,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, m->nextpackoff = round_down(pos, vcnt << amortizedshift) + (vcnt << amortizedshift); big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; + lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; eofs = erofs_blkoff(m->inode->i_sb, pos); base = round_down(eofs, vcnt << amortizedshift); @@ -147,15 +144,14 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, i = (eofs - base) >> amortizedshift; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); + lo = decode_compactedbits(lobits, in, encodebits * i, &type); m->type = type; if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << lclusterbits; /* figure out lookahead_distance: delta[1] if needed */ if (lookahead) - m->delta[1] = get_compacted_la_distance(lclusterbits, + m->delta[1] = get_compacted_la_distance(lobits, encodebits, vcnt, in, i); if (lo & Z_EROFS_LI_D0_CBLKCNT) { if (!big_pcluster) { @@ -174,8 +170,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, * of which lo saves delta[1] rather than delta[0]. * Hence, get delta[0] by the previous lcluster indirectly. */ - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * (i - 1), &type); + lo = decode_compactedbits(lobits, in, + encodebits * (i - 1), &type); if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) lo = 0; else if (lo & Z_EROFS_LI_D0_CBLKCNT) @@ -190,8 +186,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, nblk = 1; while (i > 0) { --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); + lo = decode_compactedbits(lobits, in, + encodebits * i, &type); if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) i -= lo; @@ -202,8 +198,8 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, nblk = 0; while (i > 0) { --i; - lo = decode_compactedbits(lclusterbits, lomask, - in, encodebits * i, &type); + lo = decode_compactedbits(lobits, in, + encodebits * i, &type); if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { if (lo & Z_EROFS_LI_D0_CBLKCNT) { --i; @@ -458,7 +454,7 @@ static int z_erofs_do_map_blocks(struct inode *inode, .map = map, }; int err = 0; - unsigned int lclusterbits, endoff; + unsigned int lclusterbits, endoff, afmt; unsigned long initial_lcn; unsigned long long ofs, end; @@ -547,17 +543,20 @@ static int z_erofs_do_map_blocks(struct inode *inode, err = -EFSCORRUPTED; goto unmap_out; } - if (vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER) - map->m_algorithmformat = - Z_EROFS_COMPRESSION_INTERLACED; - else - map->m_algorithmformat = - Z_EROFS_COMPRESSION_SHIFTED; - } else if (m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) { - map->m_algorithmformat = vi->z_algorithmtype[1]; + afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ? + Z_EROFS_COMPRESSION_INTERLACED : + Z_EROFS_COMPRESSION_SHIFTED; } else { - map->m_algorithmformat = vi->z_algorithmtype[0]; + afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ? + vi->z_algorithmtype[1] : vi->z_algorithmtype[0]; + if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) { + erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu", + afmt, vi->nid); + err = -EFSCORRUPTED; + goto unmap_out; + } } + map->m_algorithmformat = afmt; if ((flags & EROFS_GET_BLOCKS_FIEMAP) || ((flags & EROFS_GET_BLOCKS_READMORE) && diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index bbda587f76b85ab0a6840c4cf19b45151ea3f26b..f444fddd892e99babcc5c1fd672093c32ff724dd 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -50,6 +50,8 @@ #define NAMEI_RA_BLOCKS 4 #define NAMEI_RA_SIZE (NAMEI_RA_CHUNKS * NAMEI_RA_BLOCKS) +extern int sysctl_hardlink_cross_projid __read_mostly; + static struct buffer_head *ext4_append(handle_t *handle, struct inode *inode, ext4_lblk_t *block) @@ -3507,7 +3509,8 @@ static int ext4_link(struct dentry *old_dentry, if (err) return err; - if ((ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && + if (!sysctl_hardlink_cross_projid && + (ext4_test_inode_flag(dir, EXT4_INODE_PROJINHERIT)) && (!projid_eq(EXT4_I(dir)->i_projid, EXT4_I(old_dentry->d_inode)->i_projid))) return -EXDEV; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index d062383ea50ef4f8a4962bc139d0337d76d9c4b3..dc65a26faed1d3af6fedd2ed49ce1be97fda1dce 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4968,6 +4968,7 @@ static int ext4_load_and_init_journal(struct super_block *sb, } set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); sbi->s_journal->j_submit_inode_data_buffers = ext4_journal_submit_inode_data_buffers; @@ -6541,6 +6542,7 @@ static int __ext4_remount(struct fs_context *fc, struct super_block *sb) if (sbi->s_journal) { ext4_init_journal_params(sb, sbi->s_journal); set_task_ioprio(sbi->s_journal->j_task, ctx->journal_ioprio); + set_task_ioprio(sbi->s_journal->j_checkpoint_task, ctx->journal_ioprio); } /* Flush outstanding errors before changing fs state */ diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 1767493dffda73b77fe3c394967c003a426fb13d..42383a5c8e951e9cbac7f3521edd546d263dd3bf 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -858,6 +858,16 @@ void wbc_detach_inode(struct writeback_control *wbc) inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX); inode->i_wb_frn_history = history; + /* + * Without wb list lock i_wb can switch at any point, so it can + * judge on the wrong wb anyway. + * + * The wb is switched to the root memcg unconditionally. We expect + * the correct wb (best candidate) is picked up in next round. + */ + if (wb == inode->i_wb && wb_dying(wb) && !(inode->i_state & I_DIRTY_ALL)) + inode_switch_wbs(inode, root_mem_cgroup->css.id); + wb_put(wbc->wb); wbc->wb = NULL; } diff --git a/fs/fs_context.c b/fs/fs_context.c index 98589aae52085c82d4e8951ca63be928cb33cec8..8cc839a46f45a66192e6fb724a57250b91c14d05 100644 --- a/fs/fs_context.c +++ b/fs/fs_context.c @@ -378,6 +378,7 @@ void fc_drop_locked(struct fs_context *fc) fc->root = NULL; deactivate_locked_super(sb); } +EXPORT_SYMBOL_GPL(fc_drop_locked); static void legacy_fs_context_free(struct fs_context *fc); diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig index 038ed0b9aaa5d619cf498c55ffd0069dce2abf14..ad36e8915364f252c81c6f12b687dc8d22a370c8 100644 --- a/fs/fuse/Kconfig +++ b/fs/fuse/Kconfig @@ -52,3 +52,14 @@ config FUSE_DAX If you want to allow mounting a Virtio Filesystem with the "dax" option, answer Y. + +config VIRT_FUSE + tristate "FUSE device virtualization extension" + depends on FUSE_FS + help + This FUSE extension provides virtualized FUSE devices for container + workloads. Each virtualized FUSE device only supports one instance + of FUSE filesystem with special treatments for user namespace. + + If you want to support FUSE device virtualization for containers, + answer Y or M. diff --git a/fs/fuse/Makefile b/fs/fuse/Makefile index 0c48b35c058d78d20966443de6a56928020729b4..d2bad0c85d8cf7b499e3c48fe381dba9ee5ccc16 100644 --- a/fs/fuse/Makefile +++ b/fs/fuse/Makefile @@ -6,8 +6,10 @@ obj-$(CONFIG_FUSE_FS) += fuse.o obj-$(CONFIG_CUSE) += cuse.o obj-$(CONFIG_VIRTIO_FS) += virtiofs.o +obj-$(CONFIG_VIRT_FUSE) += virtfuse.o fuse-y := dev.o dir.o file.o inode.o control.o xattr.o acl.o readdir.o ioctl.o +fuse-y += iomode.o fuse-$(CONFIG_FUSE_DAX) += dax.o virtiofs-y := virtio_fs.o diff --git a/fs/fuse/dax.c b/fs/fuse/dax.c index 23904a6a9a96f74a45eb8c2f9bf72d795e05ac60..12ef91d170bb3091ac35a33d2b9dc38330b00948 100644 --- a/fs/fuse/dax.c +++ b/fs/fuse/dax.c @@ -1222,6 +1222,7 @@ void fuse_dax_conn_free(struct fuse_conn *fc) if (fc->dax) { fuse_free_dax_mem_ranges(&fc->dax->free_ranges); kfree(fc->dax); + fc->dax = NULL; } } diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 1a8f82f478cb7aa4a01f7286ff2f1e939ee5caca..2a2ba223f6edd9d63d15005efc2c606c59717941 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -1775,6 +1775,61 @@ static int fuse_notify_retrieve(struct fuse_conn *fc, unsigned int size, return err; } +/* + * Resending all processing queue requests. + * + * During a FUSE daemon panics and failover, it is possible for some inflight + * requests to be lost and never returned. As a result, applications awaiting + * replies would become stuck forever. To address this, we can use notification + * to trigger resending of these pending requests to the FUSE daemon, ensuring + * they are properly processed again. + * + * Please note that this strategy is applicable only to idempotent requests or + * if the FUSE daemon takes careful measures to avoid processing duplicated + * non-idempotent requests. + */ +static void fuse_resend(struct fuse_conn *fc) +{ + struct fuse_dev *fud; + struct fuse_req *req, *next; + struct fuse_iqueue *fiq = &fc->iq; + LIST_HEAD(to_queue); + unsigned int i; + + spin_lock(&fc->lock); + if (!fc->connected) { + spin_unlock(&fc->lock); + return; + } + + list_for_each_entry(fud, &fc->devices, entry) { + struct fuse_pqueue *fpq = &fud->pq; + + spin_lock(&fpq->lock); + for (i = 0; i < FUSE_PQ_HASH_SIZE; i++) + list_splice_tail_init(&fpq->processing[i], &to_queue); + spin_unlock(&fpq->lock); + } + spin_unlock(&fc->lock); + + list_for_each_entry_safe(req, next, &to_queue, list) { + __set_bit(FR_PENDING, &req->flags); + /* mark the request as resend request */ + req->in.h.unique |= FUSE_UNIQUE_RESEND; + } + + spin_lock(&fiq->lock); + /* iq and pq requests are both oldest to newest */ + list_splice(&to_queue, &fiq->pending); + fiq->ops->wake_pending_and_unlock(fiq); +} + +static int fuse_notify_resend(struct fuse_conn *fc) +{ + fuse_resend(fc); + return 0; +} + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { @@ -1800,6 +1855,9 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, case FUSE_NOTIFY_DELETE: return fuse_notify_delete(fc, size, cs); + case FUSE_NOTIFY_RESEND: + return fuse_notify_resend(fc); + default: fuse_copy_finish(cs); return -EINVAL; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index d707e6987da91eaaf06eda157f8487d9488dd12b..58042ad7dc492ba302f72747c6b14ec18a988c2b 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -630,7 +630,7 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, goto out_err; err = -ENOMEM; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, true); if (!ff) goto out_put_forget_req; @@ -692,13 +692,15 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, d_instantiate(entry, inode); fuse_change_entry_timeout(entry, &outentry); fuse_dir_changed(dir); - err = finish_open(file, entry, generic_file_open); + err = generic_file_open(inode, file); + if (!err) { + file->private_data = ff; + err = finish_open(file, entry, fuse_finish_open); + } if (err) { fi = get_fuse_inode(inode); fuse_sync_release(fi, ff, flags); } else { - file->private_data = ff; - fuse_finish_open(inode, file); if (fm->fc->atomic_o_trunc && trunc) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) @@ -1630,7 +1632,30 @@ static const char *fuse_get_link(struct dentry *dentry, struct inode *inode, static int fuse_dir_open(struct inode *inode, struct file *file) { - return fuse_open_common(inode, file, true); + struct fuse_mount *fm = get_fuse_mount(inode); + int err; + + if (fuse_is_bad(inode)) + return -EIO; + + err = generic_file_open(inode, file); + if (err) + return err; + + err = fuse_do_open(fm, get_node_id(inode), file, true); + if (!err) { + struct fuse_file *ff = file->private_data; + + /* + * Keep handling FOPEN_STREAM and FOPEN_NONSEEKABLE for + * directories for backward compatibility, though it's unlikely + * to be useful. + */ + if (ff->open_flags & (FOPEN_STREAM | FOPEN_NONSEEKABLE)) + nonseekable_open(inode, file); + } + + return err; } static int fuse_dir_release(struct inode *inode, struct file *file) diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 1cdb6327511ef843db8936302fe7f141c4016186..68acf361566efeab37a65fc3f602ce89240e0171 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -55,7 +55,7 @@ struct fuse_release_args { struct inode *inode; }; -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release) { struct fuse_file *ff; @@ -64,11 +64,13 @@ struct fuse_file *fuse_file_alloc(struct fuse_mount *fm) return NULL; ff->fm = fm; - ff->release_args = kzalloc(sizeof(*ff->release_args), - GFP_KERNEL_ACCOUNT); - if (!ff->release_args) { - kfree(ff); - return NULL; + if (release) { + ff->release_args = kzalloc(sizeof(*ff->release_args), + GFP_KERNEL_ACCOUNT); + if (!ff->release_args) { + kfree(ff); + return NULL; + } } INIT_LIST_HEAD(&ff->write_entry); @@ -104,14 +106,17 @@ static void fuse_release_end(struct fuse_mount *fm, struct fuse_args *args, kfree(ra); } -static void fuse_file_put(struct fuse_file *ff, bool sync, bool isdir) +static void fuse_file_put(struct fuse_file *ff, bool sync) { if (refcount_dec_and_test(&ff->count)) { - struct fuse_args *args = &ff->release_args->args; + struct fuse_release_args *ra = ff->release_args; + struct fuse_args *args = (ra ? &ra->args : NULL); - if (isdir ? ff->fm->fc->no_opendir : ff->fm->fc->no_open) { - /* Do nothing when client does not implement 'open' */ - fuse_release_end(ff->fm, args, 0); + if (ra && ra->inode) + fuse_file_io_release(ff, ra->inode); + + if (!args) { + /* Do nothing when server does not implement 'open' */ } else if (sync) { fuse_simple_request(ff->fm, args); fuse_release_end(ff->fm, args, 0); @@ -131,15 +136,16 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, struct fuse_conn *fc = fm->fc; struct fuse_file *ff; int opcode = isdir ? FUSE_OPENDIR : FUSE_OPEN; + bool open = isdir ? !fc->no_opendir : !fc->no_open; - ff = fuse_file_alloc(fm); + ff = fuse_file_alloc(fm, open); if (!ff) return ERR_PTR(-ENOMEM); ff->fh = 0; /* Default for no-open */ ff->open_flags = FOPEN_KEEP_CACHE | (isdir ? FOPEN_CACHE_DIR : 0); - if (isdir ? !fc->no_opendir : !fc->no_open) { + if (open) { struct fuse_open_out outarg; int err; @@ -147,11 +153,13 @@ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, if (!err) { ff->fh = outarg.fh; ff->open_flags = outarg.open_flags; - } else if (err != -ENOSYS) { fuse_file_free(ff); return ERR_PTR(err); } else { + /* No release needed */ + kfree(ff->release_args); + ff->release_args = NULL; if (isdir) fc->no_opendir = 1; else @@ -194,40 +202,50 @@ static void fuse_link_write_file(struct file *file) spin_unlock(&fi->lock); } -void fuse_finish_open(struct inode *inode, struct file *file) +int fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); + int err; + + err = fuse_file_io_open(file, inode); + if (err) + return err; if (ff->open_flags & FOPEN_STREAM) stream_open(inode, file); else if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { - struct fuse_inode *fi = get_fuse_inode(inode); - - spin_lock(&fi->lock); - fi->attr_version = atomic64_inc_return(&fc->attr_version); - i_size_write(inode, 0); - spin_unlock(&fi->lock); - file_update_time(file); - fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); - } if ((file->f_mode & FMODE_WRITE) && fc->writeback_cache) fuse_link_write_file(file); + + return 0; +} + +static void fuse_truncate_update_attr(struct inode *inode, struct file *file) +{ + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + fi->attr_version = atomic64_inc_return(&fc->attr_version); + i_size_write(inode, 0); + spin_unlock(&fi->lock); + file_update_time(file); + fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE); } -int fuse_open_common(struct inode *inode, struct file *file, bool isdir) +static int fuse_open(struct inode *inode, struct file *file) { struct fuse_mount *fm = get_fuse_mount(inode); + struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_conn *fc = fm->fc; + struct fuse_file *ff; int err; - bool is_wb_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && - fc->writeback_cache; - bool dax_truncate = (file->f_flags & O_TRUNC) && - fc->atomic_o_trunc && FUSE_IS_DAX(inode); + bool is_truncate = (file->f_flags & O_TRUNC) && fc->atomic_o_trunc; + bool is_wb_truncate = is_truncate && fc->writeback_cache; + bool dax_truncate = is_truncate && FUSE_IS_DAX(inode); if (fuse_is_bad(inode)) return -EIO; @@ -249,16 +267,20 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) if (is_wb_truncate || dax_truncate) fuse_set_nowrite(inode); - err = fuse_do_open(fm, get_node_id(inode), file, isdir); - if (!err) - fuse_finish_open(inode, file); + err = fuse_do_open(fm, get_node_id(inode), file, false); + if (!err) { + ff = file->private_data; + err = fuse_finish_open(inode, file); + if (err) + fuse_sync_release(fi, ff, file->f_flags); + else if (is_truncate) + fuse_truncate_update_attr(inode, file); + } if (is_wb_truncate || dax_truncate) fuse_release_nowrite(inode); if (!err) { - struct fuse_file *ff = file->private_data; - - if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) + if (is_truncate) truncate_pagecache(inode, 0); else if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); @@ -273,7 +295,7 @@ int fuse_open_common(struct inode *inode, struct file *file, bool isdir) } static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, - unsigned int flags, int opcode) + unsigned int flags, int opcode, bool sync) { struct fuse_conn *fc = ff->fm->fc; struct fuse_release_args *ra = ff->release_args; @@ -291,6 +313,9 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, wake_up_interruptible_all(&ff->poll_wait); + if (!ra) + return; + ra->inarg.fh = ff->fh; ra->inarg.flags = flags; ra->args.in_numargs = 1; @@ -300,6 +325,13 @@ static void fuse_prepare_release(struct fuse_inode *fi, struct fuse_file *ff, ra->args.nodeid = ff->nodeid; ra->args.force = true; ra->args.nocreds = true; + + /* + * Hold inode until release is finished. + * From fuse_sync_release() the refcount is 1 and everything's + * synchronous, so we are fine with not doing igrab() here. + */ + ra->inode = sync ? NULL : igrab(&fi->inode); } void fuse_file_release(struct inode *inode, struct fuse_file *ff, @@ -309,14 +341,12 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, struct fuse_release_args *ra = ff->release_args; int opcode = isdir ? FUSE_RELEASEDIR : FUSE_RELEASE; - fuse_prepare_release(fi, ff, open_flags, opcode); + fuse_prepare_release(fi, ff, open_flags, opcode, false); - if (ff->flock) { + if (ra && ff->flock) { ra->inarg.release_flags |= FUSE_RELEASE_FLOCK_UNLOCK; ra->inarg.lock_owner = fuse_lock_owner_id(ff->fm->fc, id); } - /* Hold inode until release is finished */ - ra->inode = igrab(inode); /* * Normally this will send the RELEASE request, however if @@ -327,7 +357,7 @@ void fuse_file_release(struct inode *inode, struct fuse_file *ff, * synchronous RELEASE is allowed (and desirable) in this case * because the server can be trusted not to screw up. */ - fuse_file_put(ff, ff->fm->fc->destroy, isdir); + fuse_file_put(ff, ff->fm->fc->destroy); } void fuse_release_common(struct file *file, bool isdir) @@ -336,11 +366,6 @@ void fuse_release_common(struct file *file, bool isdir) (fl_owner_t) file, isdir); } -static int fuse_open(struct inode *inode, struct file *file) -{ - return fuse_open_common(inode, file, false); -} - static int fuse_release(struct inode *inode, struct file *file) { struct fuse_conn *fc = get_fuse_conn(inode); @@ -362,12 +387,8 @@ void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags) { WARN_ON(refcount_read(&ff->count) > 1); - fuse_prepare_release(fi, ff, flags, FUSE_RELEASE); - /* - * iput(NULL) is a no-op and since the refcount is 1 and everything's - * synchronous, we are fine with not doing igrab() here" - */ - fuse_file_put(ff, true, false); + fuse_prepare_release(fi, ff, flags, FUSE_RELEASE, true); + fuse_file_put(ff, true); } EXPORT_SYMBOL_GPL(fuse_sync_release); @@ -924,7 +945,7 @@ static void fuse_readpages_end(struct fuse_mount *fm, struct fuse_args *args, put_page(page); } if (ia->ff) - fuse_file_put(ia->ff, false, false); + fuse_file_put(ia->ff, false); fuse_io_free(ia); } @@ -1298,6 +1319,86 @@ static ssize_t fuse_perform_write(struct kiocb *iocb, struct iov_iter *ii) return res; } +static bool fuse_io_past_eof(struct kiocb *iocb, struct iov_iter *iter) +{ + struct inode *inode = file_inode(iocb->ki_filp); + + return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); +} + +/* + * @return true if an exclusive lock for direct IO writes is needed + */ +static bool fuse_dio_wr_exclusive_lock(struct kiocb *iocb, struct iov_iter *from) +{ + struct file *file = iocb->ki_filp; + struct fuse_file *ff = file->private_data; + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_inode *fi = get_fuse_inode(inode); + + /* Server side has to advise that it supports parallel dio writes. */ + if (!(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES)) + return true; + + /* + * Append will need to know the eventual EOF - always needs an + * exclusive lock. + */ + if (iocb->ki_flags & IOCB_APPEND) + return true; + + /* shared locks are not allowed with parallel page cache IO */ + if (test_bit(FUSE_I_CACHE_IO_MODE, &fi->state)) + return false; + + /* Parallel dio beyond EOF is not supported, at least for now. */ + if (fuse_io_past_eof(iocb, from)) + return true; + + return false; +} + +static void fuse_dio_lock(struct kiocb *iocb, struct iov_iter *from, + bool *exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + + *exclusive = fuse_dio_wr_exclusive_lock(iocb, from); + if (*exclusive) { + inode_lock(inode); + } else { + inode_lock_shared(inode); + /* + * New parallal dio allowed only if inode is not in caching + * mode and denies new opens in caching mode. This check + * should be performed only after taking shared inode lock. + * Previous past eof check was without inode lock and might + * have raced, so check it again. + */ + if (fuse_io_past_eof(iocb, from) || + fuse_file_uncached_io_start(inode, ff) != 0) { + inode_unlock_shared(inode); + inode_lock(inode); + *exclusive = true; + } + } +} + +static void fuse_dio_unlock(struct kiocb *iocb, bool exclusive) +{ + struct inode *inode = file_inode(iocb->ki_filp); + struct fuse_file *ff = iocb->ki_filp->private_data; + + if (exclusive) { + inode_unlock(inode); + } else { + /* Allow opens in caching mode after last parallel dio end */ + fuse_file_uncached_io_end(inode, ff); + inode_unlock_shared(inode); + } +} + static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; @@ -1448,7 +1549,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter, if (!ia) return -ENOMEM; - if (fopen_direct_io && fc->direct_io_relax) { + if (fopen_direct_io && fc->direct_io_allow_mmap) { res = filemap_write_and_wait_range(mapping, pos, pos + count - 1); if (res) { fuse_io_free(ia); @@ -1557,47 +1658,14 @@ static ssize_t fuse_direct_read_iter(struct kiocb *iocb, struct iov_iter *to) return res; } -static bool fuse_direct_write_extending_i_size(struct kiocb *iocb, - struct iov_iter *iter) -{ - struct inode *inode = file_inode(iocb->ki_filp); - - return iocb->ki_pos + iov_iter_count(iter) > i_size_read(inode); -} - static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct inode *inode = file_inode(iocb->ki_filp); - struct file *file = iocb->ki_filp; - struct fuse_file *ff = file->private_data; struct fuse_io_priv io = FUSE_IO_PRIV_SYNC(iocb); ssize_t res; - bool exclusive_lock = - !(ff->open_flags & FOPEN_PARALLEL_DIRECT_WRITES) || - iocb->ki_flags & IOCB_APPEND || - fuse_direct_write_extending_i_size(iocb, from); - - /* - * Take exclusive lock if - * - Parallel direct writes are disabled - a user space decision - * - Parallel direct writes are enabled and i_size is being extended. - * This might not be needed at all, but needs further investigation. - */ - if (exclusive_lock) - inode_lock(inode); - else { - inode_lock_shared(inode); - - /* A race with truncate might have come up as the decision for - * the lock type was done without holding the lock, check again. - */ - if (fuse_direct_write_extending_i_size(iocb, from)) { - inode_unlock_shared(inode); - inode_lock(inode); - exclusive_lock = true; - } - } + bool exclusive; + fuse_dio_lock(iocb, from, &exclusive); res = generic_write_checks(iocb, from); if (res > 0) { if (!is_sync_kiocb(iocb) && iocb->ki_flags & IOCB_DIRECT) { @@ -1608,10 +1676,7 @@ static ssize_t fuse_direct_write_iter(struct kiocb *iocb, struct iov_iter *from) fuse_write_update_attr(inode, iocb->ki_pos, res); } } - if (exclusive_lock) - inode_unlock(inode); - else - inode_unlock_shared(inode); + fuse_dio_unlock(iocb, exclusive); return res; } @@ -1664,7 +1729,7 @@ static void fuse_writepage_free(struct fuse_writepage_args *wpa) __free_page(ap->pages[i]); if (wpa->ia.ff) - fuse_file_put(wpa->ia.ff, false, false); + fuse_file_put(wpa->ia.ff, false); kfree(ap->pages); kfree(wpa); @@ -1906,7 +1971,7 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc) ff = __fuse_write_file_get(fi); err = fuse_flush_times(inode, ff); if (ff) - fuse_file_put(ff, false, false); + fuse_file_put(ff, false); return err; } @@ -2304,7 +2369,7 @@ static int fuse_writepages(struct address_space *mapping, fuse_writepages_send(&data); } if (data.ff) - fuse_file_put(data.ff, false, false); + fuse_file_put(data.ff, false); kfree(data.orig_pages); out: @@ -2459,21 +2524,39 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fm->fc; + int rc; /* DAX mmap is superior to direct_io mmap */ if (FUSE_IS_DAX(file_inode(file))) return fuse_dax_mmap(file, vma); + /* + * FOPEN_DIRECT_IO handling is special compared to O_DIRECT, + * as does not allow MAP_SHARED mmap without FUSE_DIRECT_IO_ALLOW_MMAP. + */ if (ff->open_flags & FOPEN_DIRECT_IO) { - /* Can't provide the coherency needed for MAP_SHARED - * if FUSE_DIRECT_IO_RELAX isn't set. + /* + * Can't provide the coherency needed for MAP_SHARED + * if FUSE_DIRECT_IO_ALLOW_MMAP isn't set. */ - if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_relax) + if ((vma->vm_flags & VM_MAYSHARE) && !fc->direct_io_allow_mmap) return -ENODEV; invalidate_inode_pages2(file->f_mapping); - return generic_file_mmap(file, vma); + if (!(vma->vm_flags & VM_MAYSHARE)) { + /* MAP_PRIVATE */ + return generic_file_mmap(file, vma); + } + + /* + * First mmap of direct_io file enters caching inode io mode. + * Also waits for parallel dio writers to go into serial mode + * (exclusive instead of shared lock). + */ + rc = fuse_file_cached_io_start(file_inode(file), ff); + if (rc) + return rc; } if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) @@ -3242,7 +3325,9 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags) INIT_LIST_HEAD(&fi->write_files); INIT_LIST_HEAD(&fi->queued_writes); fi->writectr = 0; + fi->iocachectr = 0; init_waitqueue_head(&fi->page_waitq); + init_waitqueue_head(&fi->direct_io_waitq); fi->writepages = RB_ROOT; if (IS_ENABLED(CONFIG_FUSE_DAX)) diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index bf0b85d0b95c7d21bebe30f88f0ac92b1088aeb3..3e5b3d4a4a1d32c0c194e1c7aec42ba715e8e5e4 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -31,12 +31,13 @@ #include #include #include +#include /** Default max number of pages that can be used in a single read request */ #define FUSE_DEFAULT_MAX_PAGES_PER_REQ 32 /** Maximum of max_pages received in init_out */ -#define FUSE_MAX_MAX_PAGES 256 +#define FUSE_MAX_MAX_PAGES 1024 /** Bias for fi->writectr, meaning new writepages must not be sent */ #define FUSE_NOWRITE INT_MIN @@ -98,7 +99,7 @@ struct fuse_inode { u64 attr_version; union { - /* Write related fields (regular file only) */ + /* read/write io cache (regular file only) */ struct { /* Files usable in writepage. Protected by fi->lock */ struct list_head write_files; @@ -110,9 +111,15 @@ struct fuse_inode { * (FUSE_NOWRITE) means more writes are blocked */ int writectr; + /** Number of files/maps using page cache */ + int iocachectr; + /* Waitq for writepage completion */ wait_queue_head_t page_waitq; + /* waitq for direct-io completion */ + wait_queue_head_t direct_io_waitq; + /* List of writepage requestst (pending or sent) */ struct rb_root writepages; }; @@ -172,6 +179,8 @@ enum { FUSE_I_BAD, /* Has btime */ FUSE_I_BTIME, + /* Wants or already has page cache IO */ + FUSE_I_CACHE_IO_MODE, }; struct fuse_conn; @@ -229,6 +238,9 @@ struct fuse_file { /** Wait queue head for poll */ wait_queue_head_t poll_wait; + /** Does file hold a fi->iocachectr refcount? */ + enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode; + /** Has flock been performed on this file? */ bool flock:1; }; @@ -797,8 +809,8 @@ struct fuse_conn { /* Is tmpfile not implemented by fs? */ unsigned int no_tmpfile:1; - /* relax restrictions in FOPEN_DIRECT_IO mode */ - unsigned int direct_io_relax:1; + /* Relax restrictions to allow shared mmap in FOPEN_DIRECT_IO mode */ + unsigned int direct_io_allow_mmap:1; /* Is statx not implemented by fs? */ unsigned int no_statx:1; @@ -1016,14 +1028,9 @@ void fuse_read_args_fill(struct fuse_io_args *ia, struct file *file, loff_t pos, size_t count, int opcode); -/** - * Send OPEN or OPENDIR request - */ -int fuse_open_common(struct inode *inode, struct file *file, bool isdir); - -struct fuse_file *fuse_file_alloc(struct fuse_mount *fm); +struct fuse_file *fuse_file_alloc(struct fuse_mount *fm, bool release); void fuse_file_free(struct fuse_file *ff); -void fuse_finish_open(struct inode *inode, struct file *file); +int fuse_finish_open(struct inode *inode, struct file *file); void fuse_sync_release(struct fuse_inode *fi, struct fuse_file *ff, unsigned int flags); @@ -1333,11 +1340,30 @@ int fuse_fileattr_get(struct dentry *dentry, struct fileattr *fa); int fuse_fileattr_set(struct mnt_idmap *idmap, struct dentry *dentry, struct fileattr *fa); -/* file.c */ +/* iomode.c */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff); +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff); +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff); +int fuse_file_io_open(struct file *file, struct inode *inode); +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode); + +/* file.c */ struct fuse_file *fuse_file_open(struct fuse_mount *fm, u64 nodeid, unsigned int open_flags, bool isdir); void fuse_file_release(struct inode *inode, struct fuse_file *ff, unsigned int open_flags, fl_owner_t id, bool isdir); +typedef int (*fuse_mount_cb_t)(struct file *file); +extern fuse_mount_cb_t fuse_mount_callback; + +#if IS_ENABLED(CONFIG_VIRT_FUSE) +static inline bool is_virtfuse_device(struct file *file) +{ + return iminor(file_inode(file)) != FUSE_MINOR; +} +#else +static inline bool is_virtfuse_device(struct file *file) { return false; } +#endif + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 2e4eb7cf26fb33363698c990a296b50a35aa792b..ce5a36e2ee6c3311cc6bafdcd2143d986f5e1741 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -63,6 +63,9 @@ MODULE_PARM_DESC(max_user_congthresh, static struct file_system_type fuseblk_fs_type; #endif +fuse_mount_cb_t fuse_mount_callback; +EXPORT_SYMBOL_GPL(fuse_mount_callback); + struct fuse_forget_link *fuse_alloc_forget(void) { return kzalloc(sizeof(struct fuse_forget_link), GFP_KERNEL_ACCOUNT); @@ -1060,6 +1063,11 @@ static struct dentry *fuse_get_parent(struct dentry *child) return parent; } +/* only for fid encoding; no support for file handle */ +static const struct export_operations fuse_export_fid_operations = { + .encode_fh = fuse_encode_fh, +}; + static const struct export_operations fuse_export_operations = { .fh_to_dentry = fuse_fh_to_dentry, .fh_to_parent = fuse_fh_to_parent, @@ -1232,8 +1240,12 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args, fc->init_security = 1; if (flags & FUSE_CREATE_SUPP_GROUP) fc->create_supp_group = 1; - if (flags & FUSE_DIRECT_IO_RELAX) - fc->direct_io_relax = 1; + if (flags & FUSE_DIRECT_IO_ALLOW_MMAP) + fc->direct_io_allow_mmap = 1; + if (flags & FUSE_NO_EXPORT_SUPPORT) + fm->sb->s_export_op = &fuse_export_fid_operations; + if (flags & FUSE_DELETE_STALE) + fc->delete_stale = 1; } else { ra_pages = fc->max_read / PAGE_SIZE; fc->no_lock = 1; @@ -1280,7 +1292,9 @@ void fuse_send_init(struct fuse_mount *fm) FUSE_NO_OPENDIR_SUPPORT | FUSE_EXPLICIT_INVAL_DATA | FUSE_HANDLE_KILLPRIV_V2 | FUSE_SETXATTR_EXT | FUSE_INIT_EXT | FUSE_SECURITY_CTX | FUSE_CREATE_SUPP_GROUP | - FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_RELAX; + FUSE_HAS_EXPIRE_ONLY | FUSE_DIRECT_IO_ALLOW_MMAP | + FUSE_NO_EXPORT_SUPPORT | FUSE_HAS_RESEND | + FUSE_DELETE_STALE; #ifdef CONFIG_FUSE_DAX if (fm->fc->dax) flags |= FUSE_MAP_ALIGNMENT; @@ -1473,6 +1487,7 @@ static int fuse_fill_super_submount(struct super_block *sb, sb->s_bdi = bdi_get(parent_sb->s_bdi); sb->s_xattr = parent_sb->s_xattr; + sb->s_export_op = parent_sb->s_export_op; sb->s_time_gran = parent_sb->s_time_gran; sb->s_blocksize = parent_sb->s_blocksize; sb->s_blocksize_bits = parent_sb->s_blocksize_bits; @@ -1658,10 +1673,12 @@ static int fuse_fill_super(struct super_block *sb, struct fs_context *fsc) /* * Require mount to happen from the same user namespace which - * opened /dev/fuse to prevent potential attacks. + * opened /dev/fuse to prevent potential attacks. While for + * virtual fuse, the mount is always bound to init_user_ns. */ - if ((ctx->file->f_op != &fuse_dev_operations) || - (ctx->file->f_cred->user_ns != sb->s_user_ns)) + if (!is_virtfuse_device(ctx->file) && + ((ctx->file->f_op != &fuse_dev_operations) || + (ctx->file->f_cred->user_ns != sb->s_user_ns))) return -EINVAL; ctx->fudptr = &ctx->file->private_data; @@ -1696,6 +1713,7 @@ static int fuse_get_tree(struct fs_context *fsc) struct fuse_conn *fc; struct fuse_mount *fm; struct super_block *sb; + bool is_virtfuse; int err; fc = kmalloc(sizeof(*fc), GFP_KERNEL); @@ -1732,16 +1750,29 @@ static int fuse_get_tree(struct fs_context *fsc) * Allow creating a fuse mount with an already initialized fuse * connection */ + is_virtfuse = is_virtfuse_device(ctx->file); fud = READ_ONCE(ctx->file->private_data); - if (ctx->file->f_op == &fuse_dev_operations && fud) { + if ((ctx->file->f_op == &fuse_dev_operations || is_virtfuse) && fud) { fsc->sget_key = fud->fc; sb = sget_fc(fsc, fuse_test_super, fuse_set_no_super); err = PTR_ERR_OR_ZERO(sb); if (!IS_ERR(sb)) fsc->root = dget(sb->s_root); } else { + /* bind sb to init_user_ns for virtfuse */ + if (is_virtfuse) + fsc->global = true; err = get_tree_nodev(fsc, fuse_fill_super); } + + if (is_virtfuse && !err) { + if (WARN_ON(!fuse_mount_callback)) + err = -EINVAL; + else + err = fuse_mount_callback(ctx->file); + if (err) + fc_drop_locked(fsc); + } out: if (fsc->s_fs_info) fuse_mount_destroy(fm); diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c new file mode 100644 index 0000000000000000000000000000000000000000..ea47c76b9df114fb0c83ee1650722f2ada2c27d7 --- /dev/null +++ b/fs/fuse/iomode.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * FUSE inode io modes. + * + * Copyright (c) 2024 CTERA Networks. + */ + +#include "fuse_i.h" + +#include +#include +#include +#include + +/* + * Return true if need to wait for new opens in caching mode. + */ +static inline bool fuse_is_io_cache_wait(struct fuse_inode *fi) +{ + return READ_ONCE(fi->iocachectr) < 0; +} + +/* + * Start cached io mode. + * + * Blocks new parallel dio writes and waits for the in-progress parallel dio + * writes to complete. + */ +int fuse_file_cached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + /* There are no io modes if server does not implement open */ + if (!ff->release_args) + return 0; + + spin_lock(&fi->lock); + /* + * Setting the bit advises new direct-io writes to use an exclusive + * lock - without it the wait below might be forever. + */ + while (fuse_is_io_cache_wait(fi)) { + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); + wait_event(fi->direct_io_waitq, !fuse_is_io_cache_wait(fi)); + spin_lock(&fi->lock); + } + WARN_ON(ff->iomode == IOM_UNCACHED); + if (ff->iomode == IOM_NONE) { + ff->iomode = IOM_CACHED; + if (fi->iocachectr == 0) + set_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + fi->iocachectr++; + } + spin_unlock(&fi->lock); + return 0; +} + +static void fuse_file_cached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr <= 0); + WARN_ON(ff->iomode != IOM_CACHED); + ff->iomode = IOM_NONE; + fi->iocachectr--; + if (fi->iocachectr == 0) + clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state); + spin_unlock(&fi->lock); +} + +/* Start strictly uncached io mode where cache access is not allowed */ +int fuse_file_uncached_io_start(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + int err = 0; + + spin_lock(&fi->lock); + if (fi->iocachectr > 0) { + err = -ETXTBSY; + goto unlock; + } + WARN_ON(ff->iomode != IOM_NONE); + fi->iocachectr--; + ff->iomode = IOM_UNCACHED; +unlock: + spin_unlock(&fi->lock); + return err; +} + +void fuse_file_uncached_io_end(struct inode *inode, struct fuse_file *ff) +{ + struct fuse_inode *fi = get_fuse_inode(inode); + + spin_lock(&fi->lock); + WARN_ON(fi->iocachectr >= 0); + WARN_ON(ff->iomode != IOM_UNCACHED); + ff->iomode = IOM_NONE; + fi->iocachectr++; + if (!fi->iocachectr) + wake_up(&fi->direct_io_waitq); + spin_unlock(&fi->lock); +} + +/* Request access to submit new io to inode via open file */ +int fuse_file_io_open(struct file *file, struct inode *inode) +{ + struct fuse_file *ff = file->private_data; + int err; + + /* + * io modes are not relevant with DAX and with server that does not + * implement open. + */ + if (FUSE_IS_DAX(inode) || !ff->release_args) + return 0; + + /* + * FOPEN_PARALLEL_DIRECT_WRITES requires FOPEN_DIRECT_IO. + */ + if (!(ff->open_flags & FOPEN_DIRECT_IO)) + ff->open_flags &= ~FOPEN_PARALLEL_DIRECT_WRITES; + + /* + * First caching file open enters caching inode io mode. + * + * Note that if user opens a file open with O_DIRECT, but server did + * not specify FOPEN_DIRECT_IO, a later fcntl() could remove O_DIRECT, + * so we put the inode in caching mode to prevent parallel dio. + */ + if (ff->open_flags & FOPEN_DIRECT_IO) + return 0; + + err = fuse_file_cached_io_start(inode, ff); + if (err) + goto fail; + + return 0; + +fail: + pr_debug("failed to open file in requested io mode (open_flags=0x%x, err=%i).\n", + ff->open_flags, err); + /* + * The file open mode determines the inode io mode. + * Using incorrect open mode is a server mistake, which results in + * user visible failure of open() with EIO error. + */ + return -EIO; +} + +/* No more pending io and no new io possible to inode via open/mmapped file */ +void fuse_file_io_release(struct fuse_file *ff, struct inode *inode) +{ + /* + * Last parallel dio close allows caching inode io mode. + * Last caching file close exits caching inode io mode. + */ + switch (ff->iomode) { + case IOM_NONE: + /* Nothing to do */ + break; + case IOM_UNCACHED: + fuse_file_uncached_io_end(inode, ff); + break; + case IOM_CACHED: + fuse_file_cached_io_end(inode, ff); + break; + } +} diff --git a/fs/fuse/virtfuse.c b/fs/fuse/virtfuse.c new file mode 100644 index 0000000000000000000000000000000000000000..b6ef6fbf449005e7827cb9e3dbebf5be1ac18bc7 --- /dev/null +++ b/fs/fuse/virtfuse.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2022, Alibaba Cloud + * + * Virtual FUSE Device + */ + +#include +#include +#include +#include +#include +#include +#include +#include "fuse_i.h" +#include "../mount.h" + +static uint virtfuse_dev_count = 64; +module_param_named(max_devices, virtfuse_dev_count, uint, 0644); +MODULE_PARM_DESC(max_devices, "Maximum number of devices supported"); + +struct virtfuse_dev { + char name[16]; /* adequate space for "virtfuse%d" */ + struct miscdevice dev; + atomic_t refcount; + spinlock_t lock; + struct fuse_conn *fc; +}; + +static struct virtfuse_dev *virtfuse_devices; +static struct file_operations virtfuse_fops; + +static inline struct virtfuse_dev *virtfuse_dev_get(struct file *file) +{ + dev_t devt = file_inode(file)->i_rdev; + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device->devt == devt) + return vfud; + } + + pr_err("virtfuse: failed to find virtfuse for minor %d\n", MINOR(devt)); + return NULL; +} + +static int virtfuse_dev_release(struct inode *inode, struct file *file) +{ + struct fuse_dev *fud = READ_ONCE(file->private_data); + struct virtfuse_dev *vfud; + + if (!fud) + return 0; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + /* + * 1. For the initial fuse mount after RESET, the mount may fail + * halfway and thus virtfuse_dev_alloc() is not called yet. + * + * 2. When the old fuse daemon has exited and RESET has not been + * done yet, refcount is zero while vfud->fc is still there. In + * this case, if a new fuse daemon tries to mount, the mount + * will fail and virtfuse_dev_release() will be called then. + */ + spin_lock(&vfud->lock); + if (vfud->fc && vfud->fc == fud->fc) + WARN_ON(atomic_dec_if_positive(&vfud->refcount) < 0); + spin_unlock(&vfud->lock); + + return fuse_dev_release(inode, file); +} + +static int virtfuse_dev_alloc(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_dev *fud = READ_ONCE(file->private_data); + int ret = 0; + + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + /* the initial fuse mount after RESET */ + WARN_ON(atomic_read(&vfud->refcount) != 0); + atomic_set(&vfud->refcount, 1); + vfud->fc = fuse_conn_get(fud->fc); + } else if (atomic_read(&vfud->refcount) == 0) { + pr_err_ratelimited("%s: please reset before mount\n", vfud->dev.name); + ret = -EBUSY; + } else if (fud->fc != vfud->fc) { + pr_err_ratelimited("%s: can't be mounted multiple times\n", vfud->dev.name); + ret = -EBUSY; + } + spin_unlock(&vfud->lock); + return ret; +} + +static int virtfuse_dev_clone(struct file *file, unsigned long arg) +{ + int fd, ret; + struct file *old; + + if (get_user(fd, (__u32 __user *)arg)) + return -EFAULT; + + old = fget(fd); + if (!old) + return -EINVAL; + /* + * Don't clone fuse_conn between normal fuse device and virtfuse, + * or different virtfuse. + */ + if (file_inode(old)->i_rdev != file_inode(file)->i_rdev) { + fput(old); + return -EINVAL; + } + + ret = fuse_dev_operations.unlocked_ioctl(file, FUSE_DEV_IOC_CLONE, arg); + if (!ret) + atomic_inc(&virtfuse_dev_get(file)->refcount); + fput(old); + return ret; +} + +static int virtfuse_clone(struct file *file) +{ + struct virtfuse_dev *vfud; + struct fuse_conn *fc; + struct fuse_dev *fud; + int err; + + if (file->private_data) + return -EEXIST; + + vfud = virtfuse_dev_get(file); + if (!vfud) + return -EUCLEAN; + + spin_lock(&vfud->lock); + if (!vfud->fc) { + spin_unlock(&vfud->lock); + return -ENODATA; + } + + /* acquire temporary refcount */ + fc = fuse_conn_get(vfud->fc); + atomic_inc(&vfud->refcount); + spin_unlock(&vfud->lock); + + /* follow fuse_device_clone() to clone the connection */ + fud = fuse_dev_alloc_install(fc); + if (fud) { + atomic_inc(&vfud->refcount); + file->private_data = fud; + atomic_inc(&fc->dev_count); + err = 0; + } else { + err = -ENOMEM; + } + + /* drop temporary refcount */ + atomic_dec(&vfud->refcount); + fuse_conn_put(fc); + return err; +} + +static int virtfuse_reset(struct file *file) +{ + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + + if (!vfud) + return -EUCLEAN; + + if (atomic_read(&vfud->refcount)) + return -EBUSY; + + spin_lock(&vfud->lock); + if (vfud->fc) { + fc = vfud->fc; + vfud->fc = NULL; + } + spin_unlock(&vfud->lock); + + if (fc) + fuse_conn_put(fc); + return 0; +} + +static int fillbuf(char *buf, unsigned int len, unsigned int *pcount, + const char *fmt, ...) +{ + va_list args; + unsigned int count = *pcount; + int step; + + va_start(args, fmt); + step = vsnprintf(buf + count, len - count, fmt, args); + va_end(args); + if (step >= len - count) + return -EMSGSIZE; + + *pcount += step; + return 0; +} + +static int virtfuse_get_mounts(struct file *file, unsigned long arg) +{ + struct virtfuse_mounts_buf vbuf, __user *u_vbuf; + struct virtfuse_dev *vfud = virtfuse_dev_get(file); + struct fuse_conn *fc = NULL; + struct fuse_mount *fm; + struct super_block *sb; + struct mount *mnt; + unsigned int count = 0, len; + int order, step, ret = 0; + char *buf, *name, *p; + void __user *u_buf; + + if (!vfud) + return -EUCLEAN; + + u_vbuf = (struct virtfuse_mounts_buf __user *)arg; + u_buf = (void __user *)u_vbuf->buf; + if (copy_from_user(&vbuf, u_vbuf, sizeof(vbuf)) != 0) + return -EFAULT; + + len = vbuf.len; + if (len <= 1) + return -EMSGSIZE; + + /* init the user buffer as an empty string */ + if (clear_user(u_buf, 1) != 0) + return -EFAULT; + + spin_lock(&vfud->lock); + if (vfud->fc) + fc = fuse_conn_get(vfud->fc); + spin_unlock(&vfud->lock); + if (!fc) + return 0; + + down_read(&fc->killsb); + fm = list_first_entry_or_null(&fc->mounts, struct fuse_mount, fc_entry); + if (!fm || !fm->sb) + goto out_up_killsb; + sb = fm->sb; + + name = __getname(); + if (!name) { + ret = -ENOMEM; + goto out_up_killsb; + } + + order = get_order(len); + buf = (void *)__get_free_pages(GFP_KERNEL, order); + if (!buf) { + ret = -ENOMEM; + goto out_putname; + } + + /* connection state */ + ret = fillbuf(buf, len, &count, "%s\n", + fc->connected ? "Connected" : "Aborted"); + if (ret) + goto out_free_pages; + + /* open coded lock_mount_hash() */ + write_seqlock(&mount_lock); + + list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { + struct path path = { + .dentry = mnt->mnt.mnt_root, + .mnt = &mnt->mnt + }; + + /* skip slave mounts */ + if (mnt->mnt_master) + continue; + + /* skip private mounts, e.g. from clone_private_mount() */ + if (!mnt->mnt_ns) + continue; + + /* mountpoint */ + p = d_absolute_path_locked(&path, name, PATH_MAX); + if (IS_ERR(p)) { + ret = PTR_ERR(p); + break; + } + ret = fillbuf(buf, len, &count, "%s %s", + mnt->mnt_devname ? : "none", p); + if (ret) + break; + + /* fstype */ + if (sb->s_subtype && sb->s_subtype[0]) + sprintf(name, "%s.%s", sb->s_type->name, sb->s_subtype); + else + sprintf(name, "%s", sb->s_type->name); + ret = fillbuf(buf, len, &count, " %s", name); + if (ret) + break; + + /* mount options */ + step = sprintf(name, "%s,user_id=%u,group_id=%u", + __mnt_is_readonly(&mnt->mnt) ? "ro" : "rw", + from_kuid_munged(fc->user_ns, fc->user_id), + from_kgid_munged(fc->user_ns, fc->group_id)); + if (fc->default_permissions) + step += sprintf(name + step, ",default_permissions"); + if (fc->allow_other) + step += sprintf(name + step, ",allow_other"); + ret = fillbuf(buf, len, &count, " %s\n", name); + if (ret) + break; + } + + /* open coded unlock_mount_hash() */ + write_sequnlock(&mount_lock); + + /* also copy the trailing null (ensured by vsnprintf) */ + if (!ret && (copy_to_user(u_buf, buf, count + 1) != 0)) + ret = -EFAULT; + +out_free_pages: + free_pages((unsigned long)buf, order); +out_putname: + __putname(name); +out_up_killsb: + up_read(&fc->killsb); + fuse_conn_put(fc); + return ret; +} + +static long virtfuse_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case FUSE_DEV_IOC_CLONE: + return virtfuse_dev_clone(file, arg); + case VIRTFUSE_IOC_CLONE: + return virtfuse_clone(file); + case VIRTFUSE_IOC_RESET: + return virtfuse_reset(file); + case VIRTFUSE_IOC_GET_MOUNTS: + return virtfuse_get_mounts(file, arg); + default: + return fuse_dev_operations.unlocked_ioctl(file, cmd, arg); + } +} + +static void virtfuse_free_devices(void) +{ + struct virtfuse_dev *vfud; + int i; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + if (vfud->dev.this_device) + misc_deregister(&vfud->dev); + WARN_ON(atomic_read(&vfud->refcount) != 0); + } + kfree(virtfuse_devices); + virtfuse_devices = NULL; +} + +static int __init virtfuse_init(void) +{ + struct virtfuse_dev *vfud; + int i, ret; + + if (virtfuse_dev_count == 0) { + pr_err("virtfuse: max_devices is zero\n"); + return -EINVAL; + } else if (virtfuse_dev_count > VIRT_FUSE_MAX_DEVICES) { + pr_err("virtfuse: max_devices is too big, max %d\n", + VIRT_FUSE_MAX_DEVICES); + return -EINVAL; + } + + virtfuse_fops = fuse_dev_operations; + virtfuse_fops.owner = THIS_MODULE; + virtfuse_fops.compat_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.unlocked_ioctl = virtfuse_dev_ioctl; + virtfuse_fops.release = virtfuse_dev_release; + + virtfuse_devices = kcalloc(virtfuse_dev_count, + sizeof(struct virtfuse_dev), GFP_KERNEL); + if (virtfuse_devices == NULL) + return -ENOMEM; + + for (i = 0; i < virtfuse_dev_count; i++) { + vfud = &virtfuse_devices[i]; + spin_lock_init(&vfud->lock); + snprintf(vfud->name, sizeof(vfud->name), "virtfuse%d", i); + + vfud->dev.name = vfud->name; + vfud->dev.minor = MISC_DYNAMIC_MINOR; + vfud->dev.fops = &virtfuse_fops; + + ret = misc_register(&vfud->dev); + if (ret) { + pr_err("virtfuse: failed to create virtfuse%d\n", i); + vfud->dev.this_device = NULL; + virtfuse_free_devices(); + return ret; + } + } + + fuse_mount_callback = virtfuse_dev_alloc; + return 0; +} + +static void __exit virtfuse_exit(void) +{ + fuse_mount_callback = NULL; + virtfuse_free_devices(); +} + +module_init(virtfuse_init); +module_exit(virtfuse_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Virtual FUSE Device"); +MODULE_AUTHOR("Jingbo Xu "); +MODULE_AUTHOR("Jiang Liu "); diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 118699fff2f900ccd9a1857dcbe175b81efac988..4f00fe701e8f6cbe0050cf598a6e7e4af638b957 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -85,7 +85,16 @@ __releases(&journal->j_state_lock) spin_unlock(&journal->j_list_lock); write_unlock(&journal->j_state_lock); if (chkpt) { - jbd2_log_do_checkpoint(journal); + DEFINE_WAIT(wait); + + prepare_to_wait(&journal->j_wait_done_checkpoint, &wait, + TASK_UNINTERRUPTIBLE); + mutex_unlock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + finish_wait(&journal->j_wait_done_checkpoint, &wait); + jbd2_debug(1, "wake up checkpoint thread.\n"); } else if (jbd2_cleanup_journal_tail(journal) == 0) { /* We were able to recover space; yay! */ ; diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 8d6f934c3d9543d3c87a77f5ec3566df7a2b2a37..3a3dbbc79d8d52d0d553a8aac400768e7da31a8a 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -438,6 +438,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) journal->j_fc_off = 0; J_ASSERT(commit_transaction->t_state == T_RUNNING); commit_transaction->t_state = T_LOCKED; + WRITE_ONCE(commit_transaction->t_locked_time, jiffies); trace_jbd2_commit_locking(journal, commit_transaction); stats.run.rs_wait = commit_transaction->t_max_wait; diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 30dec2bd2ecc26abb8d67a66b0d04f84e08dc718..cf478fa813882854af259a186cf127831f7ce532 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -191,6 +191,9 @@ static int kjournald2(void *arg) if (journal->j_flags & JBD2_UNMOUNT) goto end_loop; + if (kthread_should_stop()) + goto end_loop; + jbd2_debug(1, "commit_sequence=%u, commit_request=%u\n", journal->j_commit_sequence, journal->j_commit_request); @@ -261,9 +264,40 @@ static int kjournald2(void *arg) return 0; } +static int jbd2_checkpoint_thread(void *arg) +{ + journal_t *journal = arg; + DEFINE_WAIT(wait); + + jbd2_debug(1, "%s\n", __func__); + journal->j_checkpoint_task = current; + +loop: + prepare_to_wait(&journal->j_wait_checkpoint, &wait, + TASK_INTERRUPTIBLE); + wake_up_all(&journal->j_wait_done_checkpoint); + schedule(); + finish_wait(&journal->j_wait_checkpoint, &wait); + + if (journal->j_flags & JBD2_UNMOUNT) + goto end_loop; + + mutex_lock(&journal->j_checkpoint_mutex); + jbd2_log_do_checkpoint(journal); + mutex_unlock(&journal->j_checkpoint_mutex); + + goto loop; + +end_loop: + journal->j_checkpoint_task = NULL; + wake_up_all(&journal->j_wait_done_checkpoint); + jbd2_debug(1, "%s exiting.\n", __func__); + return 0; +} + static int jbd2_journal_start_thread(journal_t *journal) { - struct task_struct *t; + struct task_struct *t, *t_ckpt; t = kthread_run(kjournald2, journal, "jbd2/%s", journal->j_devname); @@ -271,6 +305,17 @@ static int jbd2_journal_start_thread(journal_t *journal) return PTR_ERR(t); wait_event(journal->j_wait_done_commit, journal->j_task != NULL); + + t_ckpt = kthread_run(jbd2_checkpoint_thread, journal, "jbd2-ckpt/%s", + journal->j_devname); + if (IS_ERR(t_ckpt)) { + kthread_stop(t); + return PTR_ERR(t_ckpt); + } + + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task != NULL); + return 0; } @@ -286,6 +331,14 @@ static void journal_kill_thread(journal_t *journal) write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); + + while (journal->j_checkpoint_task) { + mutex_lock(&journal->j_checkpoint_mutex); + wake_up(&journal->j_wait_checkpoint); + wait_event(journal->j_wait_done_checkpoint, + journal->j_checkpoint_task == NULL); + mutex_unlock(&journal->j_checkpoint_mutex); + } } /* @@ -389,6 +442,9 @@ int jbd2_journal_write_metadata_buffer(transaction_t *transaction, } kunmap_local(mapped_data); + /* force copy-out */ + if (need_copy_out == 0 && journal->j_force_copy) + need_copy_out = 1; /* * Do we need to do a data copy? */ @@ -1204,25 +1260,78 @@ static const struct seq_operations jbd2_seq_info_ops = { .show = jbd2_seq_info_show, }; -static int jbd2_seq_info_open(struct inode *inode, struct file *file) +static void *jbd2_seq_stats_start(struct seq_file *seq, loff_t *pos) +{ + return *pos ? NULL : SEQ_START_TOKEN; +} + +static void *jbd2_seq_stats_next(struct seq_file *seq, void *v, loff_t *pos) +{ + (*pos)++; + return NULL; +} + +static int jbd2_seq_stats_show(struct seq_file *seq, void *v) +{ + struct jbd2_stats_proc_session *s = seq->private; + + if (v != SEQ_START_TOKEN) + return 0; + + seq_printf(seq, "%lu %lu %d %lu %lu %lu %lu %lu %lu %llu %u %u %u %d %d\n", + s->stats->ts_tid, s->stats->ts_requested, + s->journal->j_max_transaction_buffers, s->stats->run.rs_wait, + s->stats->run.rs_request_delay, s->stats->run.rs_running, + s->stats->run.rs_locked, s->stats->run.rs_flushing, + s->stats->run.rs_logging, + div_u64(s->journal->j_average_commit_time, NSEC_PER_MSEC), + s->stats->run.rs_handle_count, s->stats->run.rs_blocks, + s->stats->run.rs_blocks_logged, HZ, jiffies_to_msecs(HZ)); + return 0; +} + +static void jbd2_seq_stats_stop(struct seq_file *seq, void *v) +{ +} + +static const struct seq_operations jbd2_seq_stats_ops = { + .start = jbd2_seq_stats_start, + .next = jbd2_seq_stats_next, + .stop = jbd2_seq_stats_stop, + .show = jbd2_seq_stats_show, +}; + +static struct jbd2_stats_proc_session *__jbd2_seq_open(struct inode *inode, + struct file *file) { journal_t *journal = pde_data(inode); struct jbd2_stats_proc_session *s; - int rc, size; + int size; s = kmalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) - return -ENOMEM; + return ERR_PTR(-ENOMEM); size = sizeof(struct transaction_stats_s); s->stats = kmalloc(size, GFP_KERNEL); if (s->stats == NULL) { kfree(s); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } spin_lock(&journal->j_history_lock); memcpy(s->stats, &journal->j_stats, size); s->journal = journal; spin_unlock(&journal->j_history_lock); + return s; +} + +static int jbd2_seq_info_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); rc = seq_open(file, &jbd2_seq_info_ops); if (rc == 0) { @@ -1233,7 +1342,6 @@ static int jbd2_seq_info_open(struct inode *inode, struct file *file) kfree(s); } return rc; - } static int jbd2_seq_info_release(struct inode *inode, struct file *file) @@ -1252,6 +1360,143 @@ static const struct proc_ops jbd2_info_proc_ops = { .proc_release = jbd2_seq_info_release, }; +static int jbd2_seq_stats_open(struct inode *inode, struct file *file) +{ + struct jbd2_stats_proc_session *s; + int rc; + + s = __jbd2_seq_open(inode, file); + if (IS_ERR(s)) + return PTR_ERR(s); + + rc = seq_open(file, &jbd2_seq_stats_ops); + if (rc == 0) { + struct seq_file *m = file->private_data; + + m->private = s; + } else { + kfree(s->stats); + kfree(s); + } + return rc; +} + +static int jbd2_seq_stats_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct jbd2_stats_proc_session *s = seq->private; + + kfree(s->stats); + kfree(s); + return seq_release(inode, file); +} + +static const struct proc_ops jbd2_stats_proc_ops = { + .proc_open = jbd2_seq_stats_open, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_release = jbd2_seq_stats_release, +}; + +static int jbd2_seq_force_copy_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%u\n", journal->j_force_copy); + return 0; +} + +static int jbd2_seq_force_copy_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_force_copy_show, journal); +} + +/* Worst case buffer size needed for holding an integer. */ +#define PROC_NUMBUF 13 + +static ssize_t jbd2_seq_force_copy_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned int force_copy; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtouint(strstrip(buffer), 0, &force_copy); + if (err) + goto out; + journal->j_force_copy = force_copy; +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_force_copy_proc_ops = { + .proc_open = jbd2_seq_force_copy_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_force_copy_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int jbd2_seq_stall_thresh_show(struct seq_file *m, void *v) +{ + journal_t *journal = m->private; + + seq_printf(m, "%lu\n", journal->j_stall_thresh); + return 0; +} + +static int jbd2_seq_stall_thresh_open(struct inode *inode, struct file *filp) +{ + journal_t *journal = pde_data(inode); + + return single_open(filp, jbd2_seq_stall_thresh_show, journal); +} + +static ssize_t jbd2_seq_stall_thresh_write(struct file *file, + const char __user *buf, size_t count, loff_t *offset) +{ + struct inode *inode = file_inode(file); + journal_t *journal = pde_data(inode); + char buffer[PROC_NUMBUF]; + unsigned long long stall_thresh; + int err; + + memset(buffer, 0, sizeof(buffer)); + if (count > sizeof(buffer) - 1) + count = sizeof(buffer) - 1; + if (copy_from_user(buffer, buf, count)) { + err = -EFAULT; + goto out; + } + + err = kstrtoull(strstrip(buffer), 0, &stall_thresh); + if (err) + goto out; + WRITE_ONCE(journal->j_stall_thresh, stall_thresh); +out: + return err < 0 ? err : count; +} + +static const struct proc_ops jbd2_stall_thresh_proc_ops = { + .proc_open = jbd2_seq_stall_thresh_open, + .proc_read = seq_read, + .proc_write = jbd2_seq_stall_thresh_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + static struct proc_dir_entry *proc_jbd2_stats; static void jbd2_stats_proc_init(journal_t *journal) @@ -1260,12 +1505,21 @@ static void jbd2_stats_proc_init(journal_t *journal) if (journal->j_proc_entry) { proc_create_data("info", S_IRUGO, journal->j_proc_entry, &jbd2_info_proc_ops, journal); + proc_create_data("force_copy", 0644, journal->j_proc_entry, + &jbd2_force_copy_proc_ops, journal); + proc_create_data("stats", 0444, journal->j_proc_entry, + &jbd2_stats_proc_ops, journal); + proc_create_data("stall_thresh", 0644, journal->j_proc_entry, + &jbd2_stall_thresh_proc_ops, journal); } } static void jbd2_stats_proc_exit(journal_t *journal) { remove_proc_entry("info", journal->j_proc_entry); + remove_proc_entry("force_copy", journal->j_proc_entry); + remove_proc_entry("stats", journal->j_proc_entry); + remove_proc_entry("stall_thresh", journal->j_proc_entry); remove_proc_entry(journal->j_devname, proc_jbd2_stats); } @@ -1542,6 +1796,8 @@ static journal_t *journal_init_common(struct block_device *bdev, init_waitqueue_head(&journal->j_wait_transaction_locked); init_waitqueue_head(&journal->j_wait_done_commit); + init_waitqueue_head(&journal->j_wait_checkpoint); + init_waitqueue_head(&journal->j_wait_done_checkpoint); init_waitqueue_head(&journal->j_wait_commit); init_waitqueue_head(&journal->j_wait_updates); init_waitqueue_head(&journal->j_wait_reserved); @@ -1557,6 +1813,7 @@ static journal_t *journal_init_common(struct block_device *bdev, journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE); journal->j_min_batch_time = 0; journal->j_max_batch_time = 15000; /* 15ms */ + journal->j_stall_thresh = JBD2_DEFAULT_TRANS_STALL_THRESH; atomic_set(&journal->j_reserved_credits, 0); lockdep_init_map(&journal->j_trans_commit_map, "jbd2_handle", &jbd2_trans_commit_key, 0); diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 5f08b5fd105a311584ffd1694667eb187f0e3975..471825347a2fc829982b2459b9a431db8b6af647 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -478,6 +478,11 @@ static handle_t *new_handle(int nblocks) return NULL; handle->h_total_credits = nblocks; handle->h_ref = 1; + handle->h_pre_start_jiffies = jiffies; +#ifdef CONFIG_SCHEDSTATS + handle->h_sched_wait_sum = current->stats.wait_sum; + handle->h_io_wait_sum = current->stats.iowait_sum; +#endif return handle; } @@ -1108,7 +1113,9 @@ do_get_write_access(handle_t *handle, struct journal_head *jh, if (buffer_shadow(bh)) { JBUFFER_TRACE(jh, "on shadow: sleep"); spin_unlock(&jh->b_state_lock); + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); wait_on_bit_io(&bh->b_state, BH_Shadow, TASK_UNINTERRUPTIBLE); + task_clear_wait_res(); goto repeat; } @@ -1942,6 +1949,40 @@ int jbd2_journal_stop(handle_t *handle) wait_for_commit = 1; } + do { + unsigned long transaction_locked_time, delta; + unsigned long journal_space_wait; + u64 sched_wait, io_wait; + + transaction_locked_time = READ_ONCE(transaction->t_locked_time); + if (!transaction_locked_time) + break; + + delta = jiffies_to_msecs(jiffies - transaction_locked_time); + if (delta < READ_ONCE(journal->j_stall_thresh)) + break; + + journal_space_wait = handle->h_start_jiffies - + handle->h_pre_start_jiffies; +#ifdef CONFIG_SCHEDSTATS + sched_wait = current->stats.wait_sum - + handle->h_sched_wait_sum; + io_wait = current->stats.iowait_sum - + handle->h_io_wait_sum; +#else + sched_wait = 0; + io_wait = 0; +#endif + trace_jbd2_slow_handle_stats(journal->j_fs_dev->bd_dev, + transaction->t_tid, handle->h_type, handle->h_line_no, + jiffies - handle->h_start_jiffies, handle->h_sync, + handle->h_requested_credits, + handle->h_requested_credits - handle->h_total_credits, + delta, jiffies_to_msecs(journal_space_wait), + div_u64(sched_wait, NSEC_PER_MSEC), + div_u64(io_wait, NSEC_PER_MSEC)); + } while (0); + /* * Once stop_this_handle() drops t_updates, the transaction could start * committing on us and eventually disappear. So we must not diff --git a/fs/namei.c b/fs/namei.c index 94565bd7e73f6f41c176513ff15a68961fa351d8..18a2eafff04dee94e565d283bb1e04a49ddedb9e 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1033,6 +1033,9 @@ static int sysctl_protected_hardlinks __read_mostly; static int sysctl_protected_fifos __read_mostly; static int sysctl_protected_regular __read_mostly; +int sysctl_hardlink_cross_projid __read_mostly; +EXPORT_SYMBOL_GPL(sysctl_hardlink_cross_projid); + #ifdef CONFIG_SYSCTL static struct ctl_table namei_sysctls[] = { { @@ -1071,6 +1074,16 @@ static struct ctl_table namei_sysctls[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_TWO, }, + { + .procname = "hardlink_cross_projid", + .data = &sysctl_hardlink_cross_projid, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + + }, { } }; diff --git a/fs/namespace.c b/fs/namespace.c index e157efc54023a051faa7dcf0b7d91813dbec7134..cb43082d1a84bac1c621013fb6bb3df28f13c3e0 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -98,6 +98,7 @@ EXPORT_SYMBOL_GPL(fs_kobj); * tree or hash is modified or when a vfsmount structure is modified. */ __cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); +EXPORT_SYMBOL_GPL(mount_lock); static inline void lock_mount_hash(void) { diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index c71d185980c08f1f36112be8c26c83b82b406d47..d2bcd72e6827b92131428a8c480a397d7625ffc9 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -403,7 +403,8 @@ static int ovl_lower_dir(const char *name, struct path *path, (ofs->config.index && ofs->config.upperdir)) && !fh_type) { ofs->config.index = false; ofs->config.nfs_export = false; - pr_warn("fs on '%s' does not support file handles, falling back to index=off,nfs_export=off.\n", + pr_warn_ratelimited("fs on '%s' does not support file handles, " + "falling back to index=off,nfs_export=off.\n", name); } ofs->nofh |= !fh_type; @@ -526,11 +527,15 @@ static int ovl_setup_trap(struct super_block *sb, struct dentry *dir, static int ovl_report_in_use(struct ovl_fs *ofs, const char *name) { if (ofs->config.index) { - pr_err("%s is in-use as upperdir/workdir of another mount, mount with '-o index=off' to override exclusive upperdir protection.\n", + pr_err("%s is in-use as upperdir/workdir of another mount, " + "mount with '-o index=off' to override exclusive " + "upperdir protection.\n", name); return -EBUSY; } else { - pr_warn("%s is in-use as upperdir/workdir of another mount, accessing files from both mounts will result in undefined behavior.\n", + pr_warn_ratelimited("%s is in-use as upperdir/workdir of " + "another mount, accessing files from both mounts will " + "result in undefined behavior.\n", name); return 0; } diff --git a/fs/proc/array.c b/fs/proc/array.c index 2c2efbe685d872201175a02377b818b1dbf31892..342a2f1657ed975fd744289ca65ebc31fca04368 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c @@ -484,6 +484,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, unsigned long rsslim = 0; unsigned long flags; int exit_code = task->exit_code; + struct task_struct *init_tsk; state = *get_task_state(task); vsize = eip = esp = 0; @@ -577,6 +578,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, start_time = nsec_to_clock_t(timens_add_boottime_ns(task->start_boottime)); + /* + * While uptime in container is fixed to container start time, + * task start time need to be fixed too, otherwise wrong start + * time will show in "ps". + */ + rcu_read_lock(); + if (in_rich_container(current)) { + init_tsk = task_active_pid_ns(current)->child_reaper; + start_time -= nsec_to_clock_t(init_tsk->start_boottime); + } + rcu_read_unlock(); + seq_put_decimal_ull(m, "", pid_nr_ns(pid, ns)); seq_puts(m, " ("); proc_task_name(m, task, false); diff --git a/fs/proc/base.c b/fs/proc/base.c index ffd54617c35478e92a9f6bef67013e16e6cd3183..f6f5d4a210be568c88c69bdcd428f35968dfbdf7 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -568,6 +568,15 @@ static int proc_oom_score(struct seq_file *m, struct pid_namespace *ns, return 0; } +static int proc_wait_res(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) +{ + seq_printf(m, "%d %px %lu %lu\n", task->wait_res_type, task->wait_folio, + task->wait_moment, jiffies); + + return 0; +} + struct limit_names { const char *name; const char *unit; @@ -3352,6 +3361,7 @@ static const struct pid_entry tgid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx) @@ -3691,6 +3701,7 @@ static const struct pid_entry tid_base_stuff[] = { ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages), ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat), #endif + ONE("wait_res", 0444, proc_wait_res), }; static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx) diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index 817981e57223ef62a362f9c57319c125e1e9b6b8..7a7e443a58c8fd31456290b272d767c76d3b05c5 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -9,19 +9,43 @@ #include #include #include +#include #include "internal.h" static int loadavg_proc_show(struct seq_file *m, void *v) { unsigned long avnrun[3]; + unsigned int nr_R = 0; + struct cpumask cpuset_allowed; + int i; - get_avenrun(avnrun, FIXED_1/200, 0); + rcu_read_lock(); + if (in_rich_container(current)) { + struct task_struct *init_tsk; + enum rich_container_source from; + + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + rich_container_source(&from); + rich_container_get_avenrun(from, init_tsk, avnrun, FIXED_1/200, 0, false); + rich_container_get_cpuset_cpus(&cpuset_allowed); + for_each_cpu(i, &cpuset_allowed) + nr_R += rich_container_get_running(from, init_tsk, i); + put_task_struct(init_tsk); + } else { + get_avenrun(avnrun, FIXED_1/200, 0); + nr_R = nr_running(); + } + rcu_read_unlock(); seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %u/%d %d\n", LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]), LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), - nr_running(), nr_threads, + nr_R, nr_threads, idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c index 45af9a989d4040135a4fe18acc9b5ef055a2affc..fc14e63d0c15f9602beaf3e816fe255efc5d92e2 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -20,6 +20,7 @@ #include #include #include "internal.h" +#include void __attribute__((weak)) arch_report_meminfo(struct seq_file *m) { @@ -35,43 +36,72 @@ static int meminfo_proc_show(struct seq_file *m, void *v) { struct sysinfo i; unsigned long committed; - long cached; - long available; - unsigned long pages[NR_LRU_LISTS]; unsigned long sreclaimable, sunreclaim; int lru; - si_meminfo(&i); - si_swapinfo(&i); - committed = vm_memory_committed(); + struct mem_cgroup *memcg = NULL; + struct sysinfo_ext ext; - cached = global_node_page_state(NR_FILE_PAGES) - - total_swapcache_pages() - i.bufferram; - if (cached < 0) - cached = 0; +#ifdef CONFIG_MEMCG + rcu_read_lock(); + if (in_rich_container(current)) { + memcg = rich_container_get_memcg(); + } + rcu_read_unlock(); +#endif - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + if (!memcg) { + si_meminfo(&i); + si_swapinfo(&i); - available = si_mem_available(); + ext.cached = global_node_page_state(NR_FILE_PAGES) - + total_swapcache_pages() - i.bufferram; + if (ext.cached < 0) + ext.cached = 0; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) { + ext.lrupages[lru] = + global_node_page_state(NR_LRU_BASE + lru); + } + ext.available = si_mem_available(); + ext.file_dirty = global_node_page_state(NR_FILE_DIRTY); + ext.writeback = global_node_page_state(NR_WRITEBACK); + ext.anon_mapped = global_node_page_state(NR_ANON_MAPPED); + ext.file_mapped = global_node_page_state(NR_FILE_MAPPED); + ext.slab_reclaimable = + global_node_page_state(NR_SLAB_RECLAIMABLE_B); + ext.slab_unreclaimable = + global_node_page_state(NR_SLAB_UNRECLAIMABLE_B); + ext.kernel_stack_kb = + global_node_page_state(NR_KERNEL_STACK_KB); + ext.writeback_temp = global_node_page_state(NR_WRITEBACK_TEMP); + ext.anon_thps = global_node_page_state(NR_ANON_THPS); + ext.shmem_thps = global_node_page_state(NR_SHMEM_THPS); + ext.shmem_pmd_mapped = + global_node_page_state(NR_SHMEM_PMDMAPPED); + } else { + memcg_meminfo(memcg, &i, &ext); + } + + committed = percpu_counter_read_positive(&vm_committed_as); sreclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B); sunreclaim = global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B); show_val_kb(m, "MemTotal: ", i.totalram); show_val_kb(m, "MemFree: ", i.freeram); - show_val_kb(m, "MemAvailable: ", available); + show_val_kb(m, "MemAvailable: ", ext.available); show_val_kb(m, "Buffers: ", i.bufferram); - show_val_kb(m, "Cached: ", cached); + show_val_kb(m, "Cached: ", ext.cached); show_val_kb(m, "SwapCached: ", total_swapcache_pages()); - show_val_kb(m, "Active: ", pages[LRU_ACTIVE_ANON] + - pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive: ", pages[LRU_INACTIVE_ANON] + - pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Active(anon): ", pages[LRU_ACTIVE_ANON]); - show_val_kb(m, "Inactive(anon): ", pages[LRU_INACTIVE_ANON]); - show_val_kb(m, "Active(file): ", pages[LRU_ACTIVE_FILE]); - show_val_kb(m, "Inactive(file): ", pages[LRU_INACTIVE_FILE]); - show_val_kb(m, "Unevictable: ", pages[LRU_UNEVICTABLE]); + show_val_kb(m, "Active: ", ext.lrupages[LRU_ACTIVE_ANON] + + ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive: ", ext.lrupages[LRU_INACTIVE_ANON] + + ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Active(anon): ", ext.lrupages[LRU_ACTIVE_ANON]); + show_val_kb(m, "Inactive(anon): ", ext.lrupages[LRU_INACTIVE_ANON]); + show_val_kb(m, "Active(file): ", ext.lrupages[LRU_ACTIVE_FILE]); + show_val_kb(m, "Inactive(file): ", ext.lrupages[LRU_INACTIVE_FILE]); + show_val_kb(m, "Unevictable: ", ext.lrupages[LRU_UNEVICTABLE]); show_val_kb(m, "Mlocked: ", global_zone_page_state(NR_MLOCK)); #ifdef CONFIG_HIGHMEM @@ -95,22 +125,19 @@ static int meminfo_proc_show(struct seq_file *m, void *v) (unsigned long)atomic_read(&zswap_stored_pages) << (PAGE_SHIFT - 10)); #endif - show_val_kb(m, "Dirty: ", - global_node_page_state(NR_FILE_DIRTY)); - show_val_kb(m, "Writeback: ", - global_node_page_state(NR_WRITEBACK)); - show_val_kb(m, "AnonPages: ", - global_node_page_state(NR_ANON_MAPPED)); - show_val_kb(m, "Mapped: ", - global_node_page_state(NR_FILE_MAPPED)); + show_val_kb(m, "Dirty: ", ext.file_dirty); + show_val_kb(m, "Writeback: ", ext.writeback); + show_val_kb(m, "AnonPages: ", ext.anon_mapped); + show_val_kb(m, "Mapped: ", ext.file_mapped); show_val_kb(m, "Shmem: ", i.sharedram); show_val_kb(m, "KReclaimable: ", sreclaimable + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE)); - show_val_kb(m, "Slab: ", sreclaimable + sunreclaim); - show_val_kb(m, "SReclaimable: ", sreclaimable); - show_val_kb(m, "SUnreclaim: ", sunreclaim); - seq_printf(m, "KernelStack: %8lu kB\n", - global_node_page_state(NR_KERNEL_STACK_KB)); + show_val_kb(m, "Slab: ", + ext.slab_reclaimable + ext.slab_unreclaimable); + + show_val_kb(m, "SReclaimable: ", ext.slab_reclaimable); + show_val_kb(m, "SUnreclaim: ", ext.slab_unreclaimable); + seq_printf(m, "KernelStack: %8lu kB\n", ext.kernel_stack_kb); #ifdef CONFIG_SHADOW_CALL_STACK seq_printf(m, "ShadowCallStack:%8lu kB\n", global_node_page_state(NR_KERNEL_SCS_KB)); @@ -123,8 +150,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v) show_val_kb(m, "NFS_Unstable: ", 0); show_val_kb(m, "Bounce: ", global_zone_page_state(NR_BOUNCE)); - show_val_kb(m, "WritebackTmp: ", - global_node_page_state(NR_WRITEBACK_TEMP)); + show_val_kb(m, "WritebackTmp: ", ext.writeback_temp); show_val_kb(m, "CommitLimit: ", vm_commit_limit()); show_val_kb(m, "Committed_AS: ", committed); seq_printf(m, "VmallocTotal: %8lu kB\n", @@ -141,12 +167,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) #endif #ifdef CONFIG_TRANSPARENT_HUGEPAGE - show_val_kb(m, "AnonHugePages: ", - global_node_page_state(NR_ANON_THPS)); - show_val_kb(m, "ShmemHugePages: ", - global_node_page_state(NR_SHMEM_THPS)); - show_val_kb(m, "ShmemPmdMapped: ", - global_node_page_state(NR_SHMEM_PMDMAPPED)); + show_val_kb(m, "AnonHugePages: ", ext.anon_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemHugePages: ", ext.shmem_thps * HPAGE_PMD_NR); + show_val_kb(m, "ShmemPmdMapped: ", ext.shmem_pmd_mapped * HPAGE_PMD_NR); show_val_kb(m, "FileHugePages: ", global_node_page_state(NR_FILE_THPS)); show_val_kb(m, "FilePmdMapped: ", @@ -168,6 +191,11 @@ static int meminfo_proc_show(struct seq_file *m, void *v) arch_report_meminfo(m); +#ifdef CONFIG_MEMCG + if (memcg) + css_put(&memcg->css); +#endif + return 0; } diff --git a/fs/proc/stat.c b/fs/proc/stat.c index da60956b29156451740530f9e37ba76b39849e56..9c1d734f006991ac2e8bc81f28797ef1ba6b65a3 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #ifndef arch_irq_stat_cpu #define arch_irq_stat_cpu(cpu) 0 @@ -38,7 +40,7 @@ u64 get_idle_time(struct kernel_cpustat *kcs, int cpu) return idle; } -static u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) +u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu) { u64 iowait, iowait_usecs = -1ULL; @@ -81,13 +83,19 @@ static void show_all_irqs(struct seq_file *p) static int show_stat(struct seq_file *p, void *v) { - int i, j; + int i, j, seq = 0; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec64 boottime; + struct cpumask cpuset_allowed; + unsigned int nr_runnable = 0; + struct task_struct *init_tsk = NULL; + struct cpuacct_usage_result res; + enum rich_container_source from; + bool rich_container; user = nice = system = idle = iowait = irq = softirq = steal = 0; @@ -96,24 +104,55 @@ static int show_stat(struct seq_file *p, void *v) /* shift boot timestamp according to the timens offset */ timens_sub_boottime(&boottime); + rcu_read_lock(); + rich_container = in_rich_container(current); + if (rich_container) { + /* fix btime in containers */ + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + boottime.tv_sec += init_tsk->start_time / NSEC_PER_SEC; + + rich_container_get_cpuset_cpus(&cpuset_allowed); + rich_container_source(&from); + for_each_cpu(i, &cpuset_allowed) { + rich_container_get_usage(from, init_tsk, i, &res); + user += res.user; + nice += res.nice; + system += res.system; + idle += res.idle; + iowait += res.iowait; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest_nice += res.guest_nice; + } + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + user += cpustat[CPUTIME_USER]; + nice += cpustat[CPUTIME_NICE]; + system += cpustat[CPUTIME_SYSTEM]; + idle += get_idle_time(&kcpustat, i); + iowait += get_iowait_time(&kcpustat, i); + irq += cpustat[CPUTIME_IRQ]; + softirq += cpustat[CPUTIME_SOFTIRQ]; + steal += cpustat[CPUTIME_STEAL]; + guest += cpustat[CPUTIME_GUEST]; + guest_nice += cpustat[CPUTIME_GUEST_NICE]; + } + } + rcu_read_unlock(); + for_each_possible_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - user += cpustat[CPUTIME_USER]; - nice += cpustat[CPUTIME_NICE]; - system += cpustat[CPUTIME_SYSTEM]; - idle += get_idle_time(&kcpustat, i); - iowait += get_iowait_time(&kcpustat, i); - irq += cpustat[CPUTIME_IRQ]; - softirq += cpustat[CPUTIME_SOFTIRQ]; - steal += cpustat[CPUTIME_STEAL]; - guest += cpustat[CPUTIME_GUEST]; - guest_nice += cpustat[CPUTIME_GUEST_NICE]; - sum += kstat_cpu_irqs_sum(i); - sum += arch_irq_stat_cpu(i); + sum += kstat_cpu_irqs_sum(i); + sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); @@ -136,40 +175,85 @@ static int show_stat(struct seq_file *p, void *v) seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); seq_putc(p, '\n'); - for_each_online_cpu(i) { - struct kernel_cpustat kcpustat; - u64 *cpustat = kcpustat.cpustat; - - kcpustat_cpu_fetch(&kcpustat, i); - - /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ - user = cpustat[CPUTIME_USER]; - nice = cpustat[CPUTIME_NICE]; - system = cpustat[CPUTIME_SYSTEM]; - idle = get_idle_time(&kcpustat, i); - iowait = get_iowait_time(&kcpustat, i); - irq = cpustat[CPUTIME_IRQ]; - softirq = cpustat[CPUTIME_SOFTIRQ]; - steal = cpustat[CPUTIME_STEAL]; - guest = cpustat[CPUTIME_GUEST]; - guest_nice = cpustat[CPUTIME_GUEST_NICE]; - seq_printf(p, "cpu%d", i); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); - seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice)); - seq_putc(p, '\n'); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) { + rich_container_get_usage(from, init_tsk, i, &res); + + seq_printf(p, "cpu%d", seq++); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.user)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.nice)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.system)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.idle)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.iowait)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.irq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.softirq)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.steal)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(res.guest_nice)); + seq_putc(p, '\n'); + } + } else { + for_each_online_cpu(i) { + struct kernel_cpustat kcpustat; + u64 *cpustat = kcpustat.cpustat; + + kcpustat_cpu_fetch(&kcpustat, i); + + /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ + user = cpustat[CPUTIME_USER]; + nice = cpustat[CPUTIME_NICE]; + system = cpustat[CPUTIME_SYSTEM]; + idle = get_idle_time(&kcpustat, i); + iowait = get_iowait_time(&kcpustat, i); + irq = cpustat[CPUTIME_IRQ]; + softirq = cpustat[CPUTIME_SOFTIRQ]; + steal = cpustat[CPUTIME_STEAL]; + guest = cpustat[CPUTIME_GUEST]; + guest_nice = cpustat[CPUTIME_GUEST_NICE]; + + seq_printf(p, "cpu%d", i); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(user)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(system)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal)); + seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest)); + seq_put_decimal_ull(p, " ", + nsec_to_clock_t(guest_nice)); + seq_putc(p, '\n'); + } } + rcu_read_unlock(); + seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); show_all_irqs(p); + rcu_read_lock(); + if (rich_container) { + for_each_cpu(i, &cpuset_allowed) + nr_runnable += rich_container_get_running(from, init_tsk, i); + } else + nr_runnable = nr_running(); + rcu_read_unlock(); + + if (rich_container) + put_task_struct(init_tsk); + seq_printf(p, "\nctxt %llu\n" "btime %llu\n" @@ -179,7 +263,7 @@ static int show_stat(struct seq_file *p, void *v) nr_context_switches(), (unsigned long long)boottime.tv_sec, total_forks, - nr_running(), + nr_runnable, nr_iowait()); seq_put_decimal_ull(p, "softirq ", (unsigned long long)sum_softirq); diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c index b5343d209381aae892d2423602607f1ce05e479e..591909b4d1113aa01a9f9bf18dceae833d749bd4 100644 --- a/fs/proc/uptime.c +++ b/fs/proc/uptime.c @@ -7,6 +7,8 @@ #include #include #include +#include +#include #include "internal.h" static int uptime_proc_show(struct seq_file *m, void *v) @@ -17,16 +19,39 @@ static int uptime_proc_show(struct seq_file *m, void *v) u32 rem; int i; + ktime_get_boottime_ts64(&uptime); + timens_add_boottime(&uptime); + idle_nsec = 0; - for_each_possible_cpu(i) { - struct kernel_cpustat kcs; - kcpustat_cpu_fetch(&kcs, i); - idle_nsec += get_idle_time(&kcs, i); - } + rcu_read_lock(); + if (in_rich_container(current)) { + enum rich_container_source from; + struct task_struct *init_tsk; + struct cpuacct_usage_result res; - ktime_get_boottime_ts64(&uptime); - timens_add_boottime(&uptime); + read_lock(&tasklist_lock); + init_tsk = task_active_pid_ns(current)->child_reaper; + get_task_struct(init_tsk); + read_unlock(&tasklist_lock); + + rich_container_source(&from); + for_each_possible_cpu(i) { + rich_container_get_usage(from, init_tsk, i, &res); + idle_nsec += res.idle; + } + uptime = timespec64_sub(uptime, + ns_to_timespec64(init_tsk->start_time)); + put_task_struct(init_tsk); + } else { + for_each_possible_cpu(i) { + struct kernel_cpustat kcs; + + kcpustat_cpu_fetch(&kcs, i); + idle_nsec += get_idle_time(&kcs, i); + } + } + rcu_read_unlock(); idle.tv_sec = div_u64_rem(idle_nsec, NSEC_PER_SEC, &rem); idle.tv_nsec = rem; diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 023b91b4e1f0ab2e25ad9291e09e65edbfb6ba76..0d3a9d3b82e4c860e1d7c282469865abca86b46c 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -83,6 +83,7 @@ #include "../internal.h" /* ugh */ #include +#include /* * There are five quota SMP locks: @@ -1288,6 +1289,13 @@ static void flush_warnings(struct dquot_warn *warn) static int ignore_hardlimit(struct dquot *dquot) { struct mem_dqinfo *info = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type]; + bool rich_container; + + rcu_read_lock(); + rich_container = in_rich_container(current); + rcu_read_unlock(); + if (rich_container) + return 0; return capable(CAP_SYS_RESOURCE) && (info->dqi_format->qf_fmt_id != QFMT_VFS_OLD || diff --git a/fs/resctrl/Kconfig b/fs/resctrl/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..36a1ddbe6c216faf4ca69eeff8273b2400481491 --- /dev/null +++ b/fs/resctrl/Kconfig @@ -0,0 +1,23 @@ +config RESCTRL_FS + bool "CPU Resource Control Filesystem (resctrl)" + depends on ARCH_HAS_CPU_RESCTRL + select KERNFS + select PROC_CPU_RESCTRL if PROC_FS + help + Resctrl is a filesystem interface + to control allocation and + monitoring of system resources + used by the CPUs. + +config RESCTRL_FS_PSEUDO_LOCK + bool + help + Software mechanism to pin data in a cache portion using + micro-architecture specific knowledge. + +config RESCTRL_RMID_DEPENDS_ON_CLOSID + bool + help + Enable by the architecture when the RMID values depend on the CLOSID. + This causes the closid allocator to search for CLOSID with clean + RMID. diff --git a/fs/resctrl/Makefile b/fs/resctrl/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..10fcfb0fdb102a4329e2cbbfdc119e3b288d7db2 --- /dev/null +++ b/fs/resctrl/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-$(CONFIG_RESCTRL_FS) += rdtgroup.o ctrlmondata.o monitor.o +obj-$(CONFIG_RESCTRL_FS_PSEUDO_LOCK) += psuedo_lock.o diff --git a/fs/resctrl/ctrlmondata.c b/fs/resctrl/ctrlmondata.c new file mode 100644 index 0000000000000000000000000000000000000000..a8f2dd66ede3a1c901d92e5b56f2751a721528b2 --- /dev/null +++ b/fs/resctrl/ctrlmondata.c @@ -0,0 +1,527 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Cache Allocation code. + * + * Copyright (C) 2016 Intel Corporation + * + * Authors: + * Fenghua Yu + * Tony Luck + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include "internal.h" + +struct rdt_parse_data { + struct rdtgroup *rdtgrp; + char *buf; +}; + +typedef int (ctrlval_parser_t)(struct rdt_parse_data *data, + struct resctrl_schema *s, + struct rdt_domain *d); + +/* + * Check whether MBA bandwidth percentage value is correct. The value is + * checked against the minimum and max bandwidth values specified by the + * hardware. The allocated bandwidth percentage is rounded to the next + * control step available on the hardware. + */ +static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r) +{ + unsigned long bw; + int ret; + + /* + * Only linear delay values is supported for current Intel SKUs. + */ + if (!r->membw.delay_linear && r->membw.arch_needs_linear) { + rdt_last_cmd_puts("No support for non-linear MB domains\n"); + return false; + } + + ret = kstrtoul(buf, 10, &bw); + if (ret) { + rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf); + return false; + } + + if ((bw < r->membw.min_bw || bw > r->default_ctrl) && + !is_mba_sc(r)) { + rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw, + r->membw.min_bw, r->default_ctrl); + return false; + } + + *data = roundup(bw, (unsigned long)r->membw.bw_gran); + return true; +} + +static int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct resctrl_staged_config *cfg; + u32 closid = data->rdtgrp->closid; + struct rdt_resource *r = s->res; + unsigned long bw_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + if (!bw_validate(data->buf, &bw_val, r)) + return -EINVAL; + + if (is_mba_sc(r)) { + d->mbps_val[closid] = bw_val; + return 0; + } + + cfg->new_ctrl = bw_val; + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Check whether a cache bit mask is valid. + * On Intel CPUs, non-contiguous 1s value support is indicated by CPUID: + * - CPUID.0x10.1:ECX[3]: L3 non-contiguous 1s value supported if 1 + * - CPUID.0x10.2:ECX[3]: L2 non-contiguous 1s value supported if 1 + * + * Haswell does not support a non-contiguous 1s value and additionally + * requires at least two bits set. + * AMD allows non-contiguous bitmasks. + */ +static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r) +{ + unsigned long first_bit, zero_bit, val; + unsigned int cbm_len = r->cache.cbm_len; + int ret; + + ret = kstrtoul(buf, 16, &val); + if (ret) { + rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf); + return false; + } + + if ((r->cache.min_cbm_bits > 0 && val == 0) || val > r->default_ctrl) { + rdt_last_cmd_puts("Mask out of range\n"); + return false; + } + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Are non-contiguous bitmasks allowed? */ + if (!r->cache.arch_has_sparse_bitmasks && + (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) { + rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val); + return false; + } + + if ((zero_bit - first_bit) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("Need at least %d bits in the mask\n", + r->cache.min_cbm_bits); + return false; + } + + *data = val; + return true; +} + +/* + * Read one cache bit mask (hex). Check that it is valid for the current + * resource type. + */ +static int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s, + struct rdt_domain *d) +{ + struct rdtgroup *rdtgrp = data->rdtgrp; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 cbm_val; + + cfg = &d->staged_config[s->conf_type]; + if (cfg->have_new_ctrl) { + rdt_last_cmd_printf("Duplicate domain %d\n", d->id); + return -EINVAL; + } + + /* + * Cannot set up more than one pseudo-locked region in a cache + * hierarchy. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + rdtgroup_pseudo_locked_in_hierarchy(d)) { + rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n"); + return -EINVAL; + } + + if (!cbm_validate(data->buf, &cbm_val, r)) + return -EINVAL; + + if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_SHAREABLE) && + rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) { + rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n"); + return -EINVAL; + } + + /* + * The CBM may not overlap with the CBM of another closid if + * either is exclusive. + */ + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) { + rdt_last_cmd_puts("Overlaps with exclusive group\n"); + return -EINVAL; + } + + if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) { + if (rdtgrp->mode == RDT_MODE_EXCLUSIVE || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + rdt_last_cmd_puts("Overlaps with other group\n"); + return -EINVAL; + } + } + + cfg->new_ctrl = cbm_val; + cfg->have_new_ctrl = true; + + return 0; +} + +static ctrlval_parser_t *get_parser(struct rdt_resource *res) +{ + if (res->fflags & RFTYPE_RES_CACHE) + return &parse_cbm; + else + return &parse_bw; +} + +/* + * For each domain in this resource we expect to find a series of: + * id=mask + * separated by ";". The "id" is in decimal, and must match one of + * the "id"s for this resource. + */ +static int parse_line(char *line, struct resctrl_schema *s, + struct rdtgroup *rdtgrp) +{ + ctrlval_parser_t *parse_ctrlval = get_parser(s->res); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + struct rdt_parse_data data; + char *dom = NULL, *id; + struct rdt_domain *d; + unsigned long dom_id; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP && + (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA)) { + rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n"); + return -EINVAL; + } + +next: + if (!line || line[0] == '\0') + return 0; + dom = strsep(&line, ";"); + id = strsep(&dom, "="); + if (!dom || kstrtoul(id, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain\n"); + return -EINVAL; + } + dom = strim(dom); + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + data.buf = dom; + data.rdtgrp = rdtgrp; + if (parse_ctrlval(&data, s, d)) + return -EINVAL; + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + cfg = &d->staged_config[t]; + /* + * In pseudo-locking setup mode and just + * parsed a valid CBM that should be + * pseudo-locked. Only one locked region per + * resource group and domain so just do + * the required initialization for single + * region and return. + */ + rdtgrp->plr->s = s; + rdtgrp->plr->d = d; + rdtgrp->plr->cbm = cfg->new_ctrl; + d->plr = rdtgrp->plr; + return 0; + } + goto next; + } + } + return -EINVAL; +} + +static int rdtgroup_parse_resource(char *resname, char *tok, + struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + + list_for_each_entry(s, &resctrl_schema_all, list) { + if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid) + return parse_line(tok, s, rdtgrp); + } + rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname); + return -EINVAL; +} + +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct resctrl_schema *s; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + char *tok, *resname; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + /* + * No changes to pseudo-locked region allowed. It has to be removed + * and re-created instead. + */ + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = -EINVAL; + rdt_last_cmd_puts("Resource group is pseudo-locked\n"); + goto out; + } + + rdt_staged_configs_clear(); + + while ((tok = strsep(&buf, "\n")) != NULL) { + resname = strim(strsep(&tok, ":")); + if (!tok) { + rdt_last_cmd_puts("Missing ':'\n"); + ret = -EINVAL; + goto out; + } + if (tok[0] == '\0') { + rdt_last_cmd_printf("Missing '%s' value\n", resname); + ret = -EINVAL; + goto out; + } + ret = rdtgroup_parse_resource(resname, tok, rdtgrp); + if (ret) + goto out; + } + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + + /* + * Writes to mba_sc resources update the software controller, + * not the control MSR. + */ + if (is_mba_sc(r)) + continue; + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret) + goto out; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * If pseudo-locking fails we keep the resource group in + * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service + * active and updated for just the domain the pseudo-locked + * region was requested for. + */ + ret = rdtgroup_pseudo_lock_create(rdtgrp); + } + +out: + rdt_staged_configs_clear(); + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid) +{ + struct rdt_resource *r = schema->res; + struct rdt_domain *dom; + bool sep = false; + u32 ctrl_val; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + if (is_mba_sc(r)) + ctrl_val = dom->mbps_val[closid]; + else + ctrl_val = resctrl_arch_get_config(r, dom, closid, + schema->conf_type); + + seq_printf(s, r->format_str, dom->id, max_data_width, + ctrl_val); + sep = true; + } + seq_puts(s, "\n"); +} + +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + struct rdtgroup *rdtgrp; + int ret = 0; + u32 closid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + list_for_each_entry(schema, &resctrl_schema_all, list) { + seq_printf(s, "%s:uninitialized\n", schema->name); + } + } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%s:%d=%x\n", + rdtgrp->plr->s->res->name, + rdtgrp->plr->d->id, + rdtgrp->plr->cbm); + } + } else { + closid = rdtgrp->closid; + list_for_each_entry(schema, &resctrl_schema_all, list) { + if (closid < schema->num_closid) + show_doms(s, schema, closid); + } + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + return ret; +} + +static int smp_mon_event_count(void *arg) +{ + mon_event_count(arg); + + return 0; +} + +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first) +{ + int cpu; + + /* When picking a CPU from cpu_mask, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + /* + * Setup the parameters to pass to mon_event_count() to read the data. + */ + rr->rgrp = rdtgrp; + rr->evtid = evtid; + rr->r = r; + rr->d = d; + rr->val = 0; + rr->first = first; + rr->arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, evtid); + if (IS_ERR(rr->arch_mon_ctx)) { + rr->err = -EINVAL; + return; + } + + cpu = cpumask_any_housekeeping(&d->cpu_mask, RESCTRL_PICK_ANY_CPU); + + /* + * cpumask_any_housekeeping() prefers housekeeping CPUs, but + * are all the CPUs nohz_full? If yes, pick a CPU to IPI. + * MPAM's resctrl_arch_rmid_read() is unable to read the + * counters on some platforms if its called in IRQ context. + */ + if (tick_nohz_full_cpu(cpu)) + smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1); + else + smp_call_on_cpu(cpu, smp_mon_event_count, rr, false); + + resctrl_arch_mon_ctx_free(r, evtid, rr->arch_mon_ctx); +} + +int rdtgroup_mondata_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + u32 resid, evtid, domid; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + union mon_data_bits md; + struct rdt_domain *d; + struct rmid_read rr; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto out; + } + + md.priv = of->kn->priv; + resid = md.u.rid; + domid = md.u.domid; + evtid = md.u.evtid; + + r = resctrl_arch_get_resource(resid); + d = resctrl_arch_find_domain(r, domid); + if (IS_ERR_OR_NULL(d)) { + ret = -ENOENT; + goto out; + } + + mon_event_read(&rr, r, d, rdtgrp, evtid, false); + + if (rr.err == -EIO) + seq_puts(m, "Error\n"); + else if (rr.err == -EINVAL) + seq_puts(m, "Unavailable\n"); + else + seq_printf(m, "%llu\n", rr.val); + +out: + rdtgroup_kn_unlock(of->kn); + return ret; +} diff --git a/fs/resctrl/internal.h b/fs/resctrl/internal.h new file mode 100644 index 0000000000000000000000000000000000000000..f73267762a87a1ef8a3b465a3bfac71b9504c6c9 --- /dev/null +++ b/fs/resctrl/internal.h @@ -0,0 +1,340 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _FS_RESCTRL_INTERNAL_H +#define _FS_RESCTRL_INTERNAL_H + +#include +#include +#include +#include +#include +#include + +#include + +/** + * cpumask_any_housekeeping() - Choose any CPU in @mask, preferring those that + * aren't marked nohz_full + * @mask: The mask to pick a CPU from. + * @exclude_cpu:The CPU to avoid picking. + * + * Returns a CPU from @mask, but not @exclude_cpu. If there are housekeeping + * CPUs that don't use nohz_full, these are preferred. Pass + * RESCTRL_PICK_ANY_CPU to avoid excluding any CPUs. + * + * When a CPU is excluded, returns >= nr_cpu_ids if no CPUs are available. + */ +static inline unsigned int +cpumask_any_housekeeping(const struct cpumask *mask, int exclude_cpu) +{ + unsigned int cpu, hk_cpu; + + if (exclude_cpu == RESCTRL_PICK_ANY_CPU) + cpu = cpumask_any(mask); + else + cpu = cpumask_any_but(mask, exclude_cpu); + + if (!IS_ENABLED(CONFIG_NO_HZ_FULL)) + return cpu; + + /* If the CPU picked isn't marked nohz_full nothing more needs doing. */ + if (cpu < nr_cpu_ids && !tick_nohz_full_cpu(cpu)) + return cpu; + + /* Try to find a CPU that isn't nohz_full to use in preference */ + hk_cpu = cpumask_nth_andnot(0, mask, tick_nohz_full_mask); + if (hk_cpu == exclude_cpu) + hk_cpu = cpumask_nth_andnot(1, mask, tick_nohz_full_mask); + + if (hk_cpu < nr_cpu_ids) + cpu = hk_cpu; + + return cpu; +} + +struct rdt_fs_context { + struct kernfs_fs_context kfc; + bool enable_cdpl2; + bool enable_cdpl3; + bool enable_mba_mbps; + bool enable_debug; +}; + +static inline struct rdt_fs_context *rdt_fc2context(struct fs_context *fc) +{ + struct kernfs_fs_context *kfc = fc->fs_private; + + return container_of(kfc, struct rdt_fs_context, kfc); +} + +/** + * struct mon_evt - Entry in the event list of a resource + * @evtid: event id + * @name: name of the event + * @configurable: true if the event is configurable + * @list: entry in &rdt_resource->evt_list + */ +struct mon_evt { + enum resctrl_event_id evtid; + char *name; + bool configurable; + struct list_head list; +}; + +/** + * union mon_data_bits - Monitoring details for each event file + * @priv: Used to store monitoring event data in @u + * as kernfs private data + * @rid: Resource id associated with the event file + * @evtid: Event id associated with the event file + * @domid: The domain to which the event file belongs + * @u: Name of the bit fields struct + */ +union mon_data_bits { + void *priv; + struct { + unsigned int rid : 10; + enum resctrl_event_id evtid : 8; + unsigned int domid : 14; + } u; +}; + +struct rmid_read { + struct rdtgroup *rgrp; + struct rdt_resource *r; + struct rdt_domain *d; + enum resctrl_event_id evtid; + bool first; + int err; + u64 val; + void *arch_mon_ctx; +}; + +extern struct list_head resctrl_schema_all; +extern bool resctrl_mounted; + +enum rdt_group_type { + RDTCTRL_GROUP = 0, + RDTMON_GROUP, + RDT_NUM_GROUP, +}; + +/** + * enum rdtgrp_mode - Mode of a RDT resource group + * @RDT_MODE_SHAREABLE: This resource group allows sharing of its allocations + * @RDT_MODE_EXCLUSIVE: No sharing of this resource group's allocations allowed + * @RDT_MODE_PSEUDO_LOCKSETUP: Resource group will be used for Pseudo-Locking + * @RDT_MODE_PSEUDO_LOCKED: No sharing of this resource group's allocations + * allowed AND the allocations are Cache Pseudo-Locked + * @RDT_NUM_MODES: Total number of modes + * + * The mode of a resource group enables control over the allowed overlap + * between allocations associated with different resource groups (classes + * of service). User is able to modify the mode of a resource group by + * writing to the "mode" resctrl file associated with the resource group. + * + * The "shareable", "exclusive", and "pseudo-locksetup" modes are set by + * writing the appropriate text to the "mode" file. A resource group enters + * "pseudo-locked" mode after the schemata is written while the resource + * group is in "pseudo-locksetup" mode. + */ +enum rdtgrp_mode { + RDT_MODE_SHAREABLE = 0, + RDT_MODE_EXCLUSIVE, + RDT_MODE_PSEUDO_LOCKSETUP, + RDT_MODE_PSEUDO_LOCKED, + + /* Must be last */ + RDT_NUM_MODES, +}; + +/** + * struct mongroup - store mon group's data in resctrl fs. + * @mon_data_kn: kernfs node for the mon_data directory + * @parent: parent rdtgrp + * @crdtgrp_list: child rdtgroup node list + * @rmid: rmid for this rdtgroup + */ +struct mongroup { + struct kernfs_node *mon_data_kn; + struct rdtgroup *parent; + struct list_head crdtgrp_list; + u32 rmid; +}; + +/** + * struct rdtgroup - store rdtgroup's data in resctrl file system. + * @kn: kernfs node + * @rdtgroup_list: linked list for all rdtgroups + * @closid: closid for this rdtgroup + * @cpu_mask: CPUs assigned to this rdtgroup + * @flags: status bits + * @waitcount: how many cpus expect to find this + * group when they acquire rdtgroup_mutex + * @type: indicates type of this rdtgroup - either + * monitor only or ctrl_mon group + * @mon: mongroup related data + * @mode: mode of resource group + * @plr: pseudo-locked region + */ +struct rdtgroup { + struct kernfs_node *kn; + struct list_head rdtgroup_list; + u32 closid; + struct cpumask cpu_mask; + int flags; + atomic_t waitcount; + enum rdt_group_type type; + struct mongroup mon; + enum rdtgrp_mode mode; + struct pseudo_lock_region *plr; +}; + +/* List of all resource groups */ +extern struct list_head rdt_all_groups; + +extern int max_name_width, max_data_width; + +/** + * struct rftype - describe each file in the resctrl file system + * @name: File name + * @mode: Access mode + * @kf_ops: File operations + * @flags: File specific RFTYPE_FLAGS_* flags + * @fflags: File specific RFTYPE_* flags + * @seq_show: Show content of the file + * @write: Write to the file + */ +struct rftype { + char *name; + umode_t mode; + const struct kernfs_ops *kf_ops; + unsigned long flags; + unsigned long fflags; + + int (*seq_show)(struct kernfs_open_file *of, + struct seq_file *sf, void *v); + /* + * write() is the generic write callback which maps directly to + * kernfs write operation and overrides all other operations. + * Maximum write size is determined by ->max_write_len. + */ + ssize_t (*write)(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +}; + +/** + * struct mbm_state - status for each MBM counter in each domain + * @prev_bw_bytes: Previous bytes value read for bandwidth calculation + * @prev_bw: The most recent bandwidth in MBps + */ +struct mbm_state { + u64 prev_bw_bytes; + u32 prev_bw; +}; + +static inline bool is_mba_sc(struct rdt_resource *r) +{ + if (!r) + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + /* + * The software controller support is only applicable to MBA resource. + * Make sure to check for resource type. + */ + if (r->rid != RDT_RESOURCE_MBA) + return false; + + return r->membw.mba_sc; +} + +extern struct mutex rdtgroup_mutex; +extern struct rdtgroup rdtgroup_default; +extern struct dentry *debugfs_resctrl; + +void rdt_last_cmd_clear(void); +void rdt_last_cmd_puts(const char *s); +__printf(1, 2) +void rdt_last_cmd_printf(const char *fmt, ...); + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn); +void rdtgroup_kn_unlock(struct kernfs_node *kn); +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name); +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask); +ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off); +int rdtgroup_schemata_show(struct kernfs_open_file *of, + struct seq_file *s, void *v); +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive); +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm); +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid); +int rdtgroup_tasks_assigned(struct rdtgroup *r); +int closids_supported(void); +void closid_free(int closid); +int alloc_rmid(u32 closid); +void free_rmid(u32 closid, u32 rmid); +void resctrl_mon_resource_exit(void); +void mon_event_count(void *info); +int rdtgroup_mondata_show(struct seq_file *m, void *arg); +void mon_event_read(struct rmid_read *rr, struct rdt_resource *r, + struct rdt_domain *d, struct rdtgroup *rdtgrp, + int evtid, int first); +int resctrl_mon_resource_init(void); +void mbm_setup_overflow_handler(struct rdt_domain *dom, + unsigned long delay_ms, + int exclude_cpu); +void mbm_handle_overflow(struct work_struct *work); +bool is_mba_sc(struct rdt_resource *r); +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu); +void cqm_handle_limbo(struct work_struct *work); +bool has_busy_rmid(struct rdt_domain *d); +void __check_limbo(struct rdt_domain *d, bool force_free); +void mbm_config_rftype_init(const char *config); +void rdt_staged_configs_clear(void); +bool closid_allocated(unsigned int closid); +int resctrl_find_cleanest_closid(void); + +#ifdef CONFIG_RESCTRL_FS_PSEUDO_LOCK +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp); +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp); +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm); +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d); +int rdt_pseudo_lock_init(void); +void rdt_pseudo_lock_release(void); +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp); +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp); +#else +static inline int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + return false; +} + +static inline bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + return false; +} + +static inline int rdt_pseudo_lock_init(void) { return 0; } +static inline void rdt_pseudo_lock_release(void) { } +static inline int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + return -EOPNOTSUPP; +} + +static inline void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) { } +#endif /* CONFIG_RESCTRL_FS_PSEUDO_LOCK */ + +#endif /* _FS_RESCTRL_INTERNAL_H */ diff --git a/fs/resctrl/monitor.c b/fs/resctrl/monitor.c new file mode 100644 index 0000000000000000000000000000000000000000..51baa0f71b651206d941208d68ee1957d82ad648 --- /dev/null +++ b/fs/resctrl/monitor.c @@ -0,0 +1,859 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Resource Director Technology(RDT) + * - Monitoring code + * + * Copyright (C) 2017 Intel Corporation + * + * Author: + * Vikas Shivappa + * + * This replaces the cqm.c based on perf but we reuse a lot of + * code and datastructures originally from Peter Zijlstra and Matt Fleming. + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual June 2016, volume 3, section 17.17. + */ + +#include +#include +#include +#include +#include "internal.h" + +/* + * struct rmid_entry - dirty tracking for all RMID. + * @closid: The CLOSID for this entry. + * @rmid: The RMID for this entry. + * @busy: The number of domains with cached data using this RMID. + * @list: Member of the rmid_free_lru list when busy == 0. + * + * Depending on the architecture the correct monitor is accessed using + * both @closid and @rmid, or @rmid only. + * + * Take the rdtgroup_mutex when accessing. + */ +struct rmid_entry { + u32 closid; + u32 rmid; + int busy; + struct list_head list; +}; + +/* + * @rmid_free_lru - A least recently used list of free RMIDs + * These RMIDs are guaranteed to have an occupancy less than the + * threshold occupancy + */ +static LIST_HEAD(rmid_free_lru); + +/* + * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has. + * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined. + * Indexed by CLOSID. Protected by rdtgroup_mutex. + */ +static u32 *closid_num_dirty_rmid; + +/* + * @rmid_limbo_count - count of currently unused but (potentially) + * dirty RMIDs. + * This counts RMIDs that no one is currently using but that + * may have a occupancy value > resctrl_rmid_realloc_threshold. User can + * change the threshold occupancy value. + */ +static unsigned int rmid_limbo_count; + +/* + * @rmid_entry - The entry in the limbo and free lists. + */ +static struct rmid_entry *rmid_ptrs; + +/* + * This is the threshold cache occupancy in bytes at which we will consider an + * RMID available for re-allocation. + */ +unsigned int resctrl_rmid_realloc_threshold; + +/* + * This is the maximum value for the reallocation threshold, in bytes. + */ +unsigned int resctrl_rmid_realloc_limit; + +/* + * x86 and arm64 differ in their handling of monitoring. + * x86's RMID are independent numbers, there is only one source of traffic + * with an RMID value of '1'. + * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of + * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID + * value is no longer unique. + * To account for this, resctrl uses an index. On x86 this is just the RMID, + * on arm64 it encodes the CLOSID and RMID. This gives a unique number. + * + * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code + * must accept an attempt to read every index. + */ +static inline struct rmid_entry *__rmid_entry(u32 idx) +{ + struct rmid_entry *entry; + u32 closid, rmid; + + entry = &rmid_ptrs[idx]; + resctrl_arch_rmid_idx_decode(idx, &closid, &rmid); + + WARN_ON_ONCE(entry->closid != closid); + WARN_ON_ONCE(entry->rmid != rmid); + + return entry; +} + +static void limbo_release_entry(struct rmid_entry *entry) +{ + lockdep_assert_held(&rdtgroup_mutex); + + rmid_limbo_count--; + list_add_tail(&entry->list, &rmid_free_lru); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]--; +} + +/* + * Check the RMIDs that are marked as busy for this domain. If the + * reported LLC occupancy is below the threshold clear the busy bit and + * decrement the count. If the busy count gets to zero on an RMID, we + * free the RMID + */ +void __check_limbo(struct rdt_domain *d, bool force_free) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + struct rmid_entry *entry; + u32 idx, cur_idx = 1; + void *arch_mon_ctx; + bool rmid_dirty; + u64 val = 0; + + arch_mon_ctx = resctrl_arch_mon_ctx_alloc(r, QOS_L3_OCCUP_EVENT_ID); + if (IS_ERR(arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(arch_mon_ctx)); + return; + } + + /* + * Skip RMID 0 and start from RMID 1 and check all the RMIDs that + * are marked as busy for occupancy < threshold. If the occupancy + * is less than the threshold decrement the busy counter of the + * RMID and move it to the free list when the counter reaches 0. + */ + for (;;) { + idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx); + if (idx >= idx_limit) + break; + + entry = __rmid_entry(idx); + if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid, + QOS_L3_OCCUP_EVENT_ID, &val, + arch_mon_ctx)) { + rmid_dirty = true; + } else { + rmid_dirty = (val >= resctrl_rmid_realloc_threshold); + } + + if (force_free || !rmid_dirty) { + clear_bit(idx, d->rmid_busy_llc); + if (!--entry->busy) + limbo_release_entry(entry); + } + cur_idx = idx + 1; + } + + resctrl_arch_mon_ctx_free(r, QOS_L3_OCCUP_EVENT_ID, arch_mon_ctx); +} + +bool has_busy_rmid(struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + + return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit; +} + +static struct rmid_entry *resctrl_find_free_rmid(u32 closid) +{ + struct rmid_entry *itr; + u32 itr_idx, cmp_idx; + + if (list_empty(&rmid_free_lru)) + return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC); + + list_for_each_entry(itr, &rmid_free_lru, list) { + /* + * Get the index of this free RMID, and the index it would need + * to be if it were used with this CLOSID. + * If the CLOSID is irrelevant on this architecture, the two + * index values are always the same on every entry and thus the + * very first entry will be returned. + */ + itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid); + cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid); + + if (itr_idx == cmp_idx) + return itr; + } + + return ERR_PTR(-ENOSPC); +} + +/** + * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated + * RMID are clean, or the CLOSID that has + * the most clean RMID. + * + * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID + * may not be able to allocate clean RMID. To avoid this the allocator will + * choose the CLOSID with the most clean RMID. + * + * When the CLOSID and RMID are independent numbers, the first free CLOSID will + * be returned. + */ +int resctrl_find_cleanest_closid(void) +{ + u32 cleanest_closid = ~0; + int i = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + return -EIO; + + for (i = 0; i < closids_supported(); i++) { + int num_dirty; + + if (closid_allocated(i)) + continue; + + num_dirty = closid_num_dirty_rmid[i]; + if (num_dirty == 0) + return i; + + if (cleanest_closid == ~0) + cleanest_closid = i; + + if (num_dirty < closid_num_dirty_rmid[cleanest_closid]) + cleanest_closid = i; + } + + if (cleanest_closid == ~0) + return -ENOSPC; + + return cleanest_closid; +} + +/* + * For MPAM the RMID value is not unique, and has to be considered with + * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which + * allows all domains to be managed by a single free list. + * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler. + */ +int alloc_rmid(u32 closid) +{ + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + entry = resctrl_find_free_rmid(closid); + if (IS_ERR(entry)) + return PTR_ERR(entry); + + list_del(&entry->list); + return entry->rmid; +} + +static void add_rmid_to_limbo(struct rmid_entry *entry) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_domain *d; + u32 idx; + + lockdep_assert_held(&rdtgroup_mutex); + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid); + + entry->busy = 0; + list_for_each_entry(d, &r->domains, list) { + /* + * For the first limbo RMID in the domain, + * setup up the limbo worker. + */ + if (!has_busy_rmid(d)) + cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL, + RESCTRL_PICK_ANY_CPU); + set_bit(idx, d->rmid_busy_llc); + entry->busy++; + } + + rmid_limbo_count++; + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) + closid_num_dirty_rmid[entry->closid]++; +} + +void free_rmid(u32 closid, u32 rmid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct rmid_entry *entry; + + lockdep_assert_held(&rdtgroup_mutex); + + /* + * Do not allow the default rmid to be free'd. Comparing by index + * allows architectures that ignore the closid parameter to avoid an + * unnecessary check. + */ + if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID)) + return; + + entry = __rmid_entry(idx); + + if (resctrl_arch_is_llc_occupancy_enabled()) + add_rmid_to_limbo(entry); + else + list_add_tail(&entry->list, &rmid_free_lru); +} + +static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid, + u32 rmid, enum resctrl_event_id evtid) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + + switch (evtid) { + case QOS_L3_MBM_TOTAL_EVENT_ID: + return &d->mbm_total[idx]; + case QOS_L3_MBM_LOCAL_EVENT_ID: + return &d->mbm_local[idx]; + default: + return NULL; + } +} + +static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + struct mbm_state *m; + u64 tval = 0; + + if (rr->first) { + resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid); + m = get_mbm_state(rr->d, closid, rmid, rr->evtid); + if (m) + memset(m, 0, sizeof(struct mbm_state)); + return 0; + } + + rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid, + &tval, rr->arch_mon_ctx); + if (rr->err) + return rr->err; + + rr->val += tval; + + return 0; +} + +/* + * mbm_bw_count() - Update bw count from values previously read by + * __mon_event_count(). + * @closid: The closid used to identify the cached mbm_state. + * @rmid: The rmid used to identify the cached mbm_state. + * @rr: The struct rmid_read populated by __mon_event_count(). + * + * Supporting function to calculate the memory bandwidth + * and delta bandwidth in MBps. The chunks value previously read by + * __mon_event_count() is compared with the chunks value from the previous + * invocation. This must be called once per second to maintain values in MBps. + */ +static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr) +{ + u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid); + struct mbm_state *m = &rr->d->mbm_local[idx]; + u64 cur_bw, bytes, cur_bytes; + + cur_bytes = rr->val; + bytes = cur_bytes - m->prev_bw_bytes; + m->prev_bw_bytes = cur_bytes; + + cur_bw = bytes / SZ_1M; + + m->prev_bw = cur_bw; +} + +/* + * This is scheduled by mon_event_read() to read the CQM/MBM counters + * on a domain. + */ +void mon_event_count(void *info) +{ + struct rdtgroup *rdtgrp, *entry; + struct rmid_read *rr = info; + struct list_head *head; + int ret; + + rdtgrp = rr->rgrp; + + ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr); + + /* + * For Ctrl groups read data from child monitor groups and + * add them together. Count events which are read successfully. + * Discard the rmid_read's reporting errors. + */ + head = &rdtgrp->mon.crdtgrp_list; + + if (rdtgrp->type == RDTCTRL_GROUP) { + list_for_each_entry(entry, head, mon.crdtgrp_list) { + if (__mon_event_count(entry->closid, entry->mon.rmid, + rr) == 0) + ret = 0; + } + } + + /* + * __mon_event_count() calls for newly created monitor groups may + * report -EINVAL/Unavailable if the monitor hasn't seen any traffic. + * Discard error if any of the monitor event reads succeeded. + */ + if (ret == 0) + rr->err = 0; +} + +/* + * Feedback loop for MBA software controller (mba_sc) + * + * mba_sc is a feedback loop where we periodically read MBM counters and + * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so + * that: + * + * current bandwidth(cur_bw) < user specified bandwidth(user_bw) + * + * This uses the MBM counters to measure the bandwidth and MBA throttle + * MSRs to control the bandwidth for a particular rdtgrp. It builds on the + * fact that resctrl rdtgroups have both monitoring and control. + * + * The frequency of the checks is 1s and we just tag along the MBM overflow + * timer. Having 1s interval makes the calculation of bandwidth simpler. + * + * Although MBA's goal is to restrict the bandwidth to a maximum, there may + * be a need to increase the bandwidth to avoid unnecessarily restricting + * the L2 <-> L3 traffic. + * + * Since MBA controls the L2 external bandwidth where as MBM measures the + * L3 external bandwidth the following sequence could lead to such a + * situation. + * + * Consider an rdtgroup which had high L3 <-> memory traffic in initial + * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but + * after some time rdtgroup has mostly L2 <-> L3 traffic. + * + * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its + * throttle MSRs already have low percentage values. To avoid + * unnecessarily restricting such rdtgroups, we also increase the bandwidth. + */ +static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm) +{ + u32 closid, rmid, cur_msr_val, new_msr_val; + struct mbm_state *pmbm_data, *cmbm_data; + struct rdt_resource *r_mba; + struct rdt_domain *dom_mba; + u32 cur_bw, user_bw, idx; + struct list_head *head; + struct rdtgroup *entry; + + if (!resctrl_arch_is_mbm_local_enabled()) + return; + + r_mba = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + closid = rgrp->closid; + rmid = rgrp->mon.rmid; + idx = resctrl_arch_rmid_idx_encode(closid, rmid); + pmbm_data = &dom_mbm->mbm_local[idx]; + + dom_mba = resctrl_get_domain_from_cpu(smp_processor_id(), r_mba); + if (!dom_mba) { + pr_warn_once("Failure to get domain for MBA update\n"); + return; + } + + cur_bw = pmbm_data->prev_bw; + user_bw = dom_mba->mbps_val[closid]; + + /* MBA resource doesn't support CDP */ + cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE); + + /* + * For Ctrl groups read data from child monitor groups. + */ + head = &rgrp->mon.crdtgrp_list; + list_for_each_entry(entry, head, mon.crdtgrp_list) { + cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid]; + cur_bw += cmbm_data->prev_bw; + } + + /* + * Scale up/down the bandwidth linearly for the ctrl group. The + * bandwidth step is the bandwidth granularity specified by the + * hardware. + * Always increase throttling if current bandwidth is above the + * target set by user. + * But avoid thrashing up and down on every poll by checking + * whether a decrease in throttling is likely to push the group + * back over target. E.g. if currently throttling to 30% of bandwidth + * on a system with 10% granularity steps, check whether moving to + * 40% would go past the limit by multiplying current bandwidth by + * "(30 + 10) / 30". + */ + if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) { + new_msr_val = cur_msr_val - r_mba->membw.bw_gran; + } else if (cur_msr_val < MAX_MBA_BW && + (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) { + new_msr_val = cur_msr_val + r_mba->membw.bw_gran; + } else { + return; + } + + resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val); +} + +static void mbm_update(struct rdt_resource *r, struct rdt_domain *d, + u32 closid, u32 rmid) +{ + struct rmid_read rr; + + rr.first = false; + rr.r = r; + rr.d = d; + + /* + * This is protected from concurrent reads from user + * as both the user and we hold the global mutex. + */ + if (resctrl_arch_is_mbm_total_enabled()) { + rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } + if (resctrl_arch_is_mbm_local_enabled()) { + rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID; + rr.val = 0; + rr.arch_mon_ctx = resctrl_arch_mon_ctx_alloc(rr.r, rr.evtid); + if (IS_ERR(rr.arch_mon_ctx)) { + pr_warn_ratelimited("Failed to allocate monitor context: %ld", + PTR_ERR(rr.arch_mon_ctx)); + return; + } + + __mon_event_count(closid, rmid, &rr); + + /* + * Call the MBA software controller only for the + * control groups and when user has enabled + * the software controller explicitly. + */ + if (is_mba_sc(NULL)) + mbm_bw_count(closid, rmid, &rr); + + resctrl_arch_mon_ctx_free(rr.r, rr.evtid, rr.arch_mon_ctx); + } +} + +/* + * Handler to scan the limbo list and move the RMIDs + * to free list whose occupancy < threshold_occupancy. + */ +void cqm_handle_limbo(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL); + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + d = container_of(work, struct rdt_domain, cqm_limbo.work); + + __check_limbo(d, false); + + if (has_busy_rmid(d)) { + d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo, + delay); + } + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * cqm_setup_limbo_handler() - Schedule the limbo handler to run for this + * domain. + * @dom: The domain the limbo handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->cqm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay); +} + +void mbm_handle_overflow(struct work_struct *work) +{ + unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL); + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + struct rdt_resource *r; + struct rdt_domain *d; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* + * If the filesystem has been unmounted this work no longer needs to + * run. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + goto out_unlock; + + r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + d = container_of(work, struct rdt_domain, mbm_over.work); + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + mbm_update(r, d, prgrp->closid, prgrp->mon.rmid); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) + mbm_update(r, d, crgrp->closid, crgrp->mon.rmid); + + if (is_mba_sc(NULL)) + update_mba_bw(prgrp, d); + } + + /* + * Re-check for housekeeping CPUs. This allows the overflow handler to + * move off a nohz_full CPU quickly. + */ + d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask, + RESCTRL_PICK_ANY_CPU); + schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +/** + * mbm_setup_overflow_handler() - Schedule the overflow handler to run for this + * domain. + * @dom: The domain the overflow handler should run for. + * @delay_ms: How far in the future the handler should run. + * @exclude_cpu: Which CPU the handler should not run on, + * RESCTRL_PICK_ANY_CPU to pick any CPU. + */ +void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms, + int exclude_cpu) +{ + unsigned long delay = msecs_to_jiffies(delay_ms); + int cpu; + + /* + * When a domain comes online there is no guarantee the filesystem is + * mounted. If not, there is no need to catch counter overflow. + */ + if (!resctrl_mounted || !resctrl_arch_mon_capable()) + return; + cpu = cpumask_any_housekeeping(&dom->cpu_mask, exclude_cpu); + dom->mbm_work_cpu = cpu; + + if (cpu < nr_cpu_ids) + schedule_delayed_work_on(cpu, &dom->mbm_over, delay); +} + +static int dom_data_init(struct rdt_resource *r) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rmid_entry *entry = NULL; + int err = 0, i; + u32 idx; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + u32 *tmp; + + /* + * If the architecture hasn't provided a sanitised value here, + * this may result in larger arrays than necessary. Resctrl will + * use a smaller system wide value based on the resources in + * use. + */ + tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL); + if (!tmp) { + err = -ENOMEM; + goto out_unlock; + } + + closid_num_dirty_rmid = tmp; + } + + rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL); + if (!rmid_ptrs) { + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + err = -ENOMEM; + goto out_unlock; + } + + for (i = 0; i < idx_limit; i++) { + entry = &rmid_ptrs[i]; + INIT_LIST_HEAD(&entry->list); + + resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid); + list_add_tail(&entry->list, &rmid_free_lru); + } + + /* + * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and + * are always allocated. These are used for the rdtgroup_default + * control group, which will be setup later in rdtgroup_init(). + */ + idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID, + RESCTRL_RESERVED_RMID); + entry = __rmid_entry(idx); + list_del(&entry->list); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +static void dom_data_exit(struct rdt_resource *r) +{ + if (!r->mon_capable) + return; + + mutex_lock(&rdtgroup_mutex); + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) { + kfree(closid_num_dirty_rmid); + closid_num_dirty_rmid = NULL; + } + + kfree(rmid_ptrs); + rmid_ptrs = NULL; + + mutex_unlock(&rdtgroup_mutex); +} + +static struct mon_evt llc_occupancy_event = { + .name = "llc_occupancy", + .evtid = QOS_L3_OCCUP_EVENT_ID, +}; + +static struct mon_evt mbm_total_event = { + .name = "mbm_total_bytes", + .evtid = QOS_L3_MBM_TOTAL_EVENT_ID, +}; + +static struct mon_evt mbm_local_event = { + .name = "mbm_local_bytes", + .evtid = QOS_L3_MBM_LOCAL_EVENT_ID, +}; + +static struct mon_evt mbm_bps_event = { + .name = "mbm_local_bytes", + .evtid = QOS_MC_MBM_BPS_EVENT_ID, +}; + +/* + * Initialize the event list for the resource. + * + * Note that MBM events are also part of RDT_RESOURCE_L3 resource + * because as per the SDM the total and local memory bandwidth + * are enumerated as part of L3 monitoring. + */ +static void l3_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_llc_occupancy_enabled()) + list_add_tail(&llc_occupancy_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_total_enabled()) + list_add_tail(&mbm_total_event.list, &r->evt_list); + if (resctrl_arch_is_mbm_local_enabled()) + list_add_tail(&mbm_local_event.list, &r->evt_list); +} + +static void mc_mon_evt_init(struct rdt_resource *r) +{ + INIT_LIST_HEAD(&r->evt_list); + + if (resctrl_arch_is_mbm_bps_enabled()) + list_add_tail(&mbm_bps_event.list, &r->evt_list); +} + +int resctrl_mon_resource_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + int ret; + + if (!r->mon_capable) + return 0; + + ret = dom_data_init(r); + if (ret) + return ret; + + l3_mon_evt_init(r); + + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_TOTAL_EVENT_ID)) { + mbm_total_event.configurable = true; + mbm_config_rftype_init("mbm_total_bytes_config"); + } + if (resctrl_arch_is_evt_configurable(QOS_L3_MBM_LOCAL_EVENT_ID)) { + mbm_local_event.configurable = true; + mbm_config_rftype_init("mbm_local_bytes_config"); + } + + r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + mc_mon_evt_init(r); + + return 0; +} + +void resctrl_mon_resource_exit(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_L3); + + dom_data_exit(r); +} diff --git a/fs/resctrl/psuedo_lock.c b/fs/resctrl/psuedo_lock.c new file mode 100644 index 0000000000000000000000000000000000000000..077c2abb6edd90cad1e1e237dd4c1861cf1dc025 --- /dev/null +++ b/fs/resctrl/psuedo_lock.c @@ -0,0 +1,1122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Resource Director Technology (RDT) + * + * Pseudo-locking support built on top of Cache Allocation Technology (CAT) + * + * Copyright (C) 2018 Intel Corporation + * + * Author: Reinette Chatre + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "internal.h" + +/* + * Major number assigned to and shared by all devices exposing + * pseudo-locked regions. + */ +static unsigned int pseudo_lock_major; +static unsigned long pseudo_lock_minor_avail = GENMASK(MINORBITS, 0); + +static char *pseudo_lock_devnode(const struct device *dev, umode_t *mode) +{ + const struct rdtgroup *rdtgrp; + + rdtgrp = dev_get_drvdata(dev); + if (mode) + *mode = 0600; + return kasprintf(GFP_KERNEL, "pseudo_lock/%s", rdtgrp->kn->name); +} + +static const struct class pseudo_lock_class = { + .name = "pseudo_lock", + .devnode = pseudo_lock_devnode, +}; + +/** + * pseudo_lock_minor_get - Obtain available minor number + * @minor: Pointer to where new minor number will be stored + * + * A bitmask is used to track available minor numbers. Here the next free + * minor number is marked as unavailable and returned. + * + * Return: 0 on success, <0 on failure. + */ +static int pseudo_lock_minor_get(unsigned int *minor) +{ + unsigned long first_bit; + + first_bit = find_first_bit(&pseudo_lock_minor_avail, MINORBITS); + + if (first_bit == MINORBITS) + return -ENOSPC; + + __clear_bit(first_bit, &pseudo_lock_minor_avail); + *minor = first_bit; + + return 0; +} + +/** + * pseudo_lock_minor_release - Return minor number to available + * @minor: The minor number made available + */ +static void pseudo_lock_minor_release(unsigned int minor) +{ + __set_bit(minor, &pseudo_lock_minor_avail); +} + +/** + * region_find_by_minor - Locate a pseudo-lock region by inode minor number + * @minor: The minor number of the device representing pseudo-locked region + * + * When the character device is accessed we need to determine which + * pseudo-locked region it belongs to. This is done by matching the minor + * number of the device to the pseudo-locked region it belongs. + * + * Minor numbers are assigned at the time a pseudo-locked region is associated + * with a cache instance. + * + * Return: On success return pointer to resource group owning the pseudo-locked + * region, NULL on failure. + */ +static struct rdtgroup *region_find_by_minor(unsigned int minor) +{ + struct rdtgroup *rdtgrp, *rdtgrp_match = NULL; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->plr && rdtgrp->plr->minor == minor) { + rdtgrp_match = rdtgrp; + break; + } + } + return rdtgrp_match; +} + +/** + * struct pseudo_lock_pm_req - A power management QoS request list entry + * @list: Entry within the @pm_reqs list for a pseudo-locked region + * @req: PM QoS request + */ +struct pseudo_lock_pm_req { + struct list_head list; + struct dev_pm_qos_request req; +}; + +static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req, *next; + + list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) { + dev_pm_qos_remove_request(&pm_req->req); + list_del(&pm_req->list); + kfree(pm_req); + } +} + +/** + * pseudo_lock_cstates_constrain - Restrict cores from entering C6 + * @plr: Pseudo-locked region + * + * To prevent the cache from being affected by power management entering + * C6 has to be avoided. This is accomplished by requesting a latency + * requirement lower than lowest C6 exit latency of all supported + * platforms as found in the cpuidle state tables in the intel_idle driver. + * At this time it is possible to do so with a single latency requirement + * for all supported platforms. + * + * Since Goldmont is supported, which is affected by X86_BUG_MONITOR, + * the ACPI latencies need to be considered while keeping in mind that C2 + * may be set to map to deeper sleep states. In this case the latency + * requirement needs to prevent entering C2 also. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr) +{ + struct pseudo_lock_pm_req *pm_req; + int cpu; + int ret; + + for_each_cpu(cpu, &plr->d->cpu_mask) { + pm_req = kzalloc(sizeof(*pm_req), GFP_KERNEL); + if (!pm_req) { + rdt_last_cmd_puts("Failure to allocate memory for PM QoS\n"); + ret = -ENOMEM; + goto out_err; + } + ret = dev_pm_qos_add_request(get_cpu_device(cpu), + &pm_req->req, + DEV_PM_QOS_RESUME_LATENCY, + 30); + if (ret < 0) { + rdt_last_cmd_printf("Failed to add latency req CPU%d\n", + cpu); + kfree(pm_req); + ret = -1; + goto out_err; + } + list_add(&pm_req->list, &plr->pm_reqs); + } + + return 0; + +out_err: + pseudo_lock_cstates_relax(plr); + return ret; +} + +/** + * pseudo_lock_region_clear - Reset pseudo-lock region data + * @plr: pseudo-lock region + * + * All content of the pseudo-locked region is reset - any memory allocated + * freed. + * + * Return: void + */ +static void pseudo_lock_region_clear(struct pseudo_lock_region *plr) +{ + plr->size = 0; + plr->line_size = 0; + kfree(plr->kmem); + plr->kmem = NULL; + plr->s = NULL; + if (plr->d) + plr->d->plr = NULL; + plr->d = NULL; + plr->cbm = 0; + plr->debugfs_dir = NULL; +} + +/** + * pseudo_lock_region_init - Initialize pseudo-lock region information + * @plr: pseudo-lock region + * + * Called after user provided a schemata to be pseudo-locked. From the + * schemata the &struct pseudo_lock_region is on entry already initialized + * with the resource, domain, and capacity bitmask. Here the information + * required for pseudo-locking is deduced from this data and &struct + * pseudo_lock_region initialized further. This information includes: + * - size in bytes of the region to be pseudo-locked + * - cache line size to know the stride with which data needs to be accessed + * to be pseudo-locked + * - a cpu associated with the cache instance on which the pseudo-locking + * flow can be executed + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_init(struct pseudo_lock_region *plr) +{ + struct cpu_cacheinfo *ci; + int ret; + int i; + + /* Pick the first cpu we find that is associated with the cache. */ + plr->cpu = cpumask_first(&plr->d->cpu_mask); + + if (!cpu_online(plr->cpu)) { + rdt_last_cmd_printf("CPU %u associated with cache not online\n", + plr->cpu); + ret = -ENODEV; + goto out_region; + } + + ci = get_cpu_cacheinfo(plr->cpu); + + plr->size = rdtgroup_cbm_to_size(plr->s->res, plr->d, plr->cbm); + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == plr->s->res->cache_level) { + plr->line_size = ci->info_list[i].coherency_line_size; + return 0; + } + } + + ret = -1; + rdt_last_cmd_puts("Unable to determine cache line size\n"); +out_region: + pseudo_lock_region_clear(plr); + return ret; +} + +/** + * pseudo_lock_init - Initialize a pseudo-lock region + * @rdtgrp: resource group to which new pseudo-locked region will belong + * + * A pseudo-locked region is associated with a resource group. When this + * association is created the pseudo-locked region is initialized. The + * details of the pseudo-locked region are not known at this time so only + * allocation is done and association established. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_init(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr; + + plr = kzalloc(sizeof(*plr), GFP_KERNEL); + if (!plr) + return -ENOMEM; + + init_waitqueue_head(&plr->lock_thread_wq); + INIT_LIST_HEAD(&plr->pm_reqs); + rdtgrp->plr = plr; + return 0; +} + +/** + * pseudo_lock_region_alloc - Allocate kernel memory that will be pseudo-locked + * @plr: pseudo-lock region + * + * Initialize the details required to set up the pseudo-locked region and + * allocate the contiguous memory that will be pseudo-locked to the cache. + * + * Return: 0 on success, <0 on failure. Descriptive error will be written + * to last_cmd_status buffer. + */ +static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr) +{ + int ret; + + ret = pseudo_lock_region_init(plr); + if (ret < 0) + return ret; + + /* + * We do not yet support contiguous regions larger than + * KMALLOC_MAX_SIZE. + */ + if (plr->size > KMALLOC_MAX_SIZE) { + rdt_last_cmd_puts("Requested region exceeds maximum size\n"); + ret = -E2BIG; + goto out_region; + } + + plr->kmem = kzalloc(plr->size, GFP_KERNEL); + if (!plr->kmem) { + rdt_last_cmd_puts("Unable to allocate memory\n"); + ret = -ENOMEM; + goto out_region; + } + + ret = 0; + goto out; +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * pseudo_lock_free - Free a pseudo-locked region + * @rdtgrp: resource group to which pseudo-locked region belonged + * + * The pseudo-locked region's resources have already been released, or not + * yet created at this point. Now it can be freed and disassociated from the + * resource group. + * + * Return: void + */ +static void pseudo_lock_free(struct rdtgroup *rdtgrp) +{ + pseudo_lock_region_clear(rdtgrp->plr); + kfree(rdtgrp->plr); + rdtgrp->plr = NULL; +} + +/** + * rdtgroup_monitor_in_progress - Test if monitoring in progress + * @rdtgrp: resource group being queried + * + * Return: 1 if monitor groups have been created for this resource + * group, 0 otherwise. + */ +static int rdtgroup_monitor_in_progress(struct rdtgroup *rdtgrp) +{ + return !list_empty(&rdtgrp->mon.crdtgrp_list); +} + +/** + * rdtgroup_locksetup_user_restrict - Restrict user access to group + * @rdtgrp: resource group needing access restricted + * + * A resource group used for cache pseudo-locking cannot have cpus or tasks + * assigned to it. This is communicated to the user by restricting access + * to all the files that can be used to make such changes. + * + * Permissions restored with rdtgroup_locksetup_user_restore() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restriction of access an attempt will be made to restore permissions but + * the state of the mode of these files will be uncertain when a failure + * occurs. + */ +static int rdtgroup_locksetup_user_restrict(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restrict(rdtgrp, "mon_groups"); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); +err_cpus: + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); +err_tasks: + rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); +out: + return ret; +} + +/** + * rdtgroup_locksetup_user_restore - Restore user access to group + * @rdtgrp: resource group needing access restored + * + * Restore all file access previously removed using + * rdtgroup_locksetup_user_restrict() + * + * Return: 0 on success, <0 on failure. If a failure occurs during the + * restoration of access an attempt will be made to restrict permissions + * again but the state of the mode of these files will be uncertain when + * a failure occurs. + */ +static int rdtgroup_locksetup_user_restore(struct rdtgroup *rdtgrp) +{ + int ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "tasks", 0777); + if (ret) + return ret; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0777); + if (ret) + goto err_tasks; + + ret = rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0777); + if (ret) + goto err_cpus; + + if (resctrl_arch_mon_capable()) { + ret = rdtgroup_kn_mode_restore(rdtgrp, "mon_groups", 0777); + if (ret) + goto err_cpus_list; + } + + ret = 0; + goto out; + +err_cpus_list: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus_list"); +err_cpus: + rdtgroup_kn_mode_restrict(rdtgrp, "cpus"); +err_tasks: + rdtgroup_kn_mode_restrict(rdtgrp, "tasks"); +out: + return ret; +} + +/** + * rdtgroup_locksetup_enter - Resource group enters locksetup mode + * @rdtgrp: resource group requested to enter locksetup mode + * + * A resource group enters locksetup mode to reflect that it would be used + * to represent a pseudo-locked region and is in the process of being set + * up to do so. A resource group used for a pseudo-locked region would + * lose the closid associated with it so we cannot allow it to have any + * tasks or cpus assigned nor permit tasks or cpus to be assigned in the + * future. Monitoring of a pseudo-locked region is not allowed either. + * + * The above and more restrictions on a pseudo-locked region are checked + * for and enforced before the resource group enters the locksetup mode. + * + * Returns: 0 if the resource group successfully entered locksetup mode, <0 + * on failure. On failure the last_cmd_status buffer is updated with text to + * communicate details of failure to the user. + */ +int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp) +{ + int ret; + + /* + * The default resource group can neither be removed nor lose the + * default closid associated with it. + */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Cannot pseudo-lock default group\n"); + return -EINVAL; + } + + /* + * Cache Pseudo-locking not supported when CDP is enabled. + * + * Some things to consider if you would like to enable this + * support (using L3 CDP as example): + * - When CDP is enabled two separate resources are exposed, + * L3DATA and L3CODE, but they are actually on the same cache. + * The implication for pseudo-locking is that if a + * pseudo-locked region is created on a domain of one + * resource (eg. L3CODE), then a pseudo-locked region cannot + * be created on that same domain of the other resource + * (eg. L3DATA). This is because the creation of a + * pseudo-locked region involves a call to wbinvd that will + * affect all cache allocations on particular domain. + * - Considering the previous, it may be possible to only + * expose one of the CDP resources to pseudo-locking and + * hide the other. For example, we could consider to only + * expose L3DATA and since the L3 cache is unified it is + * still possible to place instructions there are execute it. + * - If only one region is exposed to pseudo-locking we should + * still keep in mind that availability of a portion of cache + * for pseudo-locking should take into account both resources. + * Similarly, if a pseudo-locked region is created in one + * resource, the portion of cache used by it should be made + * unavailable to all future allocations from both resources. + */ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3) || + resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) { + rdt_last_cmd_puts("CDP enabled\n"); + return -EINVAL; + } + + /* + * Not knowing the bits to disable prefetching implies that this + * platform does not support Cache Pseudo-Locking. + */ + if (resctrl_arch_get_prefetch_disable_bits() == 0) { + rdt_last_cmd_puts("Pseudo-locking not supported\n"); + return -EINVAL; + } + + if (rdtgroup_monitor_in_progress(rdtgrp)) { + rdt_last_cmd_puts("Monitoring in progress\n"); + return -EINVAL; + } + + if (rdtgroup_tasks_assigned(rdtgrp)) { + rdt_last_cmd_puts("Tasks assigned to resource group\n"); + return -EINVAL; + } + + if (!cpumask_empty(&rdtgrp->cpu_mask)) { + rdt_last_cmd_puts("CPUs assigned to resource group\n"); + return -EINVAL; + } + + if (rdtgroup_locksetup_user_restrict(rdtgrp)) { + rdt_last_cmd_puts("Unable to modify resctrl permissions\n"); + return -EIO; + } + + ret = pseudo_lock_init(rdtgrp); + if (ret) { + rdt_last_cmd_puts("Unable to init pseudo-lock region\n"); + goto out_release; + } + + /* + * If this system is capable of monitoring a rmid would have been + * allocated when the control group was created. This is not needed + * anymore when this group would be used for pseudo-locking. This + * is safe to call on platforms not capable of monitoring. + */ + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + ret = 0; + goto out; + +out_release: + rdtgroup_locksetup_user_restore(rdtgrp); +out: + return ret; +} + +/** + * rdtgroup_locksetup_exit - resource group exist locksetup mode + * @rdtgrp: resource group + * + * When a resource group exits locksetup mode the earlier restrictions are + * lifted. + * + * Return: 0 on success, <0 on failure + */ +int rdtgroup_locksetup_exit(struct rdtgroup *rdtgrp) +{ + int ret; + + if (resctrl_arch_mon_capable()) { + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + } + + ret = rdtgroup_locksetup_user_restore(rdtgrp); + if (ret) { + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + pseudo_lock_free(rdtgrp); + return 0; +} + +/** + * rdtgroup_cbm_overlaps_pseudo_locked - Test if CBM or portion is pseudo-locked + * @d: RDT domain + * @cbm: CBM to test + * + * @d represents a cache instance and @cbm a capacity bitmask that is + * considered for it. Determine if @cbm overlaps with any existing + * pseudo-locked region on @d. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: true if @cbm overlaps with pseudo-locked region on @d, false + * otherwise. + */ +bool rdtgroup_cbm_overlaps_pseudo_locked(struct rdt_domain *d, unsigned long cbm) +{ + unsigned int cbm_len; + unsigned long cbm_b; + + if (d->plr) { + cbm_len = d->plr->s->res->cache.cbm_len; + cbm_b = d->plr->cbm; + if (bitmap_intersects(&cbm, &cbm_b, cbm_len)) + return true; + } + return false; +} + +/** + * rdtgroup_pseudo_locked_in_hierarchy - Pseudo-locked region in cache hierarchy + * @d: RDT domain under test + * + * The setup of a pseudo-locked region affects all cache instances within + * the hierarchy of the region. It is thus essential to know if any + * pseudo-locked regions exist within a cache hierarchy to prevent any + * attempts to create new pseudo-locked regions in the same hierarchy. + * + * Return: true if a pseudo-locked region exists in the hierarchy of @d or + * if it is not possible to test due to memory allocation issue, + * false otherwise. + */ +bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_domain *d) +{ + cpumask_var_t cpu_with_psl; + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *d_i; + bool ret = false; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + if (!zalloc_cpumask_var(&cpu_with_psl, GFP_KERNEL)) + return true; + + /* + * First determine which cpus have pseudo-locked regions + * associated with them. + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(d_i, &r->domains, list) { + if (d_i->plr) + cpumask_or(cpu_with_psl, cpu_with_psl, + &d_i->cpu_mask); + } + } + + /* + * Next test if new pseudo-locked region would intersect with + * existing region. + */ + if (cpumask_intersects(&d->cpu_mask, cpu_with_psl)) + ret = true; + + free_cpumask_var(cpu_with_psl); + return ret; +} + +/** + * pseudo_lock_measure_cycles - Trigger latency measure to pseudo-locked region + * @rdtgrp: Resource group to which the pseudo-locked region belongs. + * @sel: Selector of which measurement to perform on a pseudo-locked region. + * + * The measurement of latency to access a pseudo-locked region should be + * done from a cpu that is associated with that pseudo-locked region. + * Determine which cpu is associated with this region and start a thread on + * that cpu to perform the measurement, wait for that thread to complete. + * + * Return: 0 on success, <0 on failure + */ +static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int cpu; + int ret = -1; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out; + } + + if (!plr->d) { + ret = -ENODEV; + goto out; + } + + plr->thread_done = 0; + cpu = cpumask_first(&plr->d->cpu_mask); + if (!cpu_online(cpu)) { + ret = -ENODEV; + goto out; + } + + plr->cpu = cpu; + + if (sel == 1) + thread = kthread_create_on_node(resctrl_arch_measure_cycles_lat_fn, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 2) + thread = kthread_create_on_node(resctrl_arch_measure_l2_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else if (sel == 3) + thread = kthread_create_on_node(resctrl_arch_measure_l3_residency, + plr, cpu_to_node(cpu), + "pseudo_lock_measure/%u", + cpu); + else + goto out; + + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + goto out; + } + kthread_bind(thread, cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) + goto out; + + ret = 0; + +out: + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +static ssize_t pseudo_lock_measure_trigger(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct rdtgroup *rdtgrp = file->private_data; + size_t buf_size; + char buf[32]; + int ret; + int sel; + + buf_size = min(count, (sizeof(buf) - 1)); + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + ret = kstrtoint(buf, 10, &sel); + if (ret == 0) { + if (sel != 1 && sel != 2 && sel != 3) + return -EINVAL; + ret = debugfs_file_get(file->f_path.dentry); + if (ret) + return ret; + ret = pseudo_lock_measure_cycles(rdtgrp, sel); + if (ret == 0) + ret = count; + debugfs_file_put(file->f_path.dentry); + } + + return ret; +} + +static const struct file_operations pseudo_measure_fops = { + .write = pseudo_lock_measure_trigger, + .open = simple_open, + .llseek = default_llseek, +}; + +/** + * rdtgroup_pseudo_lock_create - Create a pseudo-locked region + * @rdtgrp: resource group to which pseudo-lock region belongs + * + * Called when a resource group in the pseudo-locksetup mode receives a + * valid schemata that should be pseudo-locked. Since the resource group is + * in pseudo-locksetup mode the &struct pseudo_lock_region has already been + * allocated and initialized with the essential information. If a failure + * occurs the resource group remains in the pseudo-locksetup mode with the + * &struct pseudo_lock_region associated with it, but cleared from all + * information and ready for the user to re-attempt pseudo-locking by + * writing the schemata again. + * + * Return: 0 if the pseudo-locked region was successfully pseudo-locked, <0 + * on failure. Descriptive error will be written to last_cmd_status buffer. + */ +int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + struct task_struct *thread; + unsigned int new_minor; + struct device *dev; + int ret; + + ret = pseudo_lock_region_alloc(plr); + if (ret < 0) + return ret; + + ret = pseudo_lock_cstates_constrain(plr); + if (ret < 0) { + ret = -EINVAL; + goto out_region; + } + + plr->thread_done = 0; + + plr->closid = rdtgrp->closid; + thread = kthread_create_on_node(resctrl_arch_pseudo_lock_fn, plr, + cpu_to_node(plr->cpu), + "pseudo_lock/%u", plr->cpu); + if (IS_ERR(thread)) { + ret = PTR_ERR(thread); + rdt_last_cmd_printf("Locking thread returned error %d\n", ret); + goto out_cstates; + } + + kthread_bind(thread, plr->cpu); + wake_up_process(thread); + + ret = wait_event_interruptible(plr->lock_thread_wq, + plr->thread_done == 1); + if (ret < 0) { + /* + * If the thread does not get on the CPU for whatever + * reason and the process which sets up the region is + * interrupted then this will leave the thread in runnable + * state and once it gets on the CPU it will dereference + * the cleared, but not freed, plr struct resulting in an + * empty pseudo-locking loop. + */ + rdt_last_cmd_puts("Locking thread interrupted\n"); + goto out_cstates; + } + + ret = pseudo_lock_minor_get(&new_minor); + if (ret < 0) { + rdt_last_cmd_puts("Unable to obtain a new minor number\n"); + goto out_cstates; + } + + /* + * Unlock access but do not release the reference. The + * pseudo-locked region will still be here on return. + * + * The mutex has to be released temporarily to avoid a potential + * deadlock with the mm->mmap_lock which is obtained in the + * device_create() and debugfs_create_dir() callpath below as well as + * before the mmap() callback is called. + */ + mutex_unlock(&rdtgroup_mutex); + + if (!IS_ERR_OR_NULL(debugfs_resctrl)) { + plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name, + debugfs_resctrl); + if (!IS_ERR_OR_NULL(plr->debugfs_dir)) + debugfs_create_file("pseudo_lock_measure", 0200, + plr->debugfs_dir, rdtgrp, + &pseudo_measure_fops); + } + + dev = device_create(&pseudo_lock_class, NULL, + MKDEV(pseudo_lock_major, new_minor), + rdtgrp, "%s", rdtgrp->kn->name); + + mutex_lock(&rdtgroup_mutex); + + if (IS_ERR(dev)) { + ret = PTR_ERR(dev); + rdt_last_cmd_printf("Failed to create character device: %d\n", + ret); + goto out_debugfs; + } + + /* We released the mutex - check if group was removed while we did so */ + if (rdtgrp->flags & RDT_DELETED) { + ret = -ENODEV; + goto out_device; + } + + plr->minor = new_minor; + + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKED; + closid_free(rdtgrp->closid); + rdtgroup_kn_mode_restore(rdtgrp, "cpus", 0444); + rdtgroup_kn_mode_restore(rdtgrp, "cpus_list", 0444); + + ret = 0; + goto out; + +out_device: + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, new_minor)); +out_debugfs: + debugfs_remove_recursive(plr->debugfs_dir); + pseudo_lock_minor_release(new_minor); +out_cstates: + pseudo_lock_cstates_relax(plr); +out_region: + pseudo_lock_region_clear(plr); +out: + return ret; +} + +/** + * rdtgroup_pseudo_lock_remove - Remove a pseudo-locked region + * @rdtgrp: resource group to which the pseudo-locked region belongs + * + * The removal of a pseudo-locked region can be initiated when the resource + * group is removed from user space via a "rmdir" from userspace or the + * unmount of the resctrl filesystem. On removal the resource group does + * not go back to pseudo-locksetup mode before it is removed, instead it is + * removed directly. There is thus asymmetry with the creation where the + * &struct pseudo_lock_region is removed here while it was not created in + * rdtgroup_pseudo_lock_create(). + * + * Return: void + */ +void rdtgroup_pseudo_lock_remove(struct rdtgroup *rdtgrp) +{ + struct pseudo_lock_region *plr = rdtgrp->plr; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + /* + * Default group cannot be a pseudo-locked region so we can + * free closid here. + */ + closid_free(rdtgrp->closid); + goto free; + } + + pseudo_lock_cstates_relax(plr); + debugfs_remove_recursive(rdtgrp->plr->debugfs_dir); + device_destroy(&pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor)); + pseudo_lock_minor_release(plr->minor); + +free: + pseudo_lock_free(rdtgrp); +} + +static int pseudo_lock_dev_open(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = region_find_by_minor(iminor(inode)); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + filp->private_data = rdtgrp; + atomic_inc(&rdtgrp->waitcount); + /* Perform a non-seekable open - llseek is not supported */ + filp->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); + + mutex_unlock(&rdtgroup_mutex); + + return 0; +} + +static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) +{ + struct rdtgroup *rdtgrp; + + mutex_lock(&rdtgroup_mutex); + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + filp->private_data = NULL; + atomic_dec(&rdtgrp->waitcount); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) +{ + /* Not supported */ + return -EINVAL; +} + +static const struct vm_operations_struct pseudo_mmap_ops = { + .mremap = pseudo_lock_dev_mremap, +}; + +static int pseudo_lock_dev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long vsize = vma->vm_end - vma->vm_start; + unsigned long off = vma->vm_pgoff << PAGE_SHIFT; + struct pseudo_lock_region *plr; + struct rdtgroup *rdtgrp; + unsigned long physical; + unsigned long psize; + + mutex_lock(&rdtgroup_mutex); + + rdtgrp = filp->private_data; + WARN_ON(!rdtgrp); + if (!rdtgrp) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + plr = rdtgrp->plr; + + if (!plr->d) { + mutex_unlock(&rdtgroup_mutex); + return -ENODEV; + } + + /* + * Task is required to run with affinity to the cpus associated + * with the pseudo-locked region. If this is not the case the task + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ + if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + physical = __pa(plr->kmem) >> PAGE_SHIFT; + psize = plr->size - off; + + if (off > plr->size) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + /* + * Ensure changes are carried directly to the memory being mapped, + * do not allow copy-on-write mapping. + */ + if (!(vma->vm_flags & VM_SHARED)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } + + if (vsize > psize) { + mutex_unlock(&rdtgroup_mutex); + return -ENOSPC; + } + + memset(plr->kmem + off, 0, vsize); + + if (remap_pfn_range(vma, vma->vm_start, physical + vma->vm_pgoff, + vsize, vma->vm_page_prot)) { + mutex_unlock(&rdtgroup_mutex); + return -EAGAIN; + } + vma->vm_ops = &pseudo_mmap_ops; + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static const struct file_operations pseudo_lock_dev_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .read = NULL, + .write = NULL, + .open = pseudo_lock_dev_open, + .release = pseudo_lock_dev_release, + .mmap = pseudo_lock_dev_mmap, +}; + +int rdt_pseudo_lock_init(void) +{ + int ret; + + ret = register_chrdev(0, "pseudo_lock", &pseudo_lock_dev_fops); + if (ret < 0) + return ret; + + pseudo_lock_major = ret; + + ret = class_register(&pseudo_lock_class); + if (ret) { + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + return ret; + } + + return 0; +} + +void rdt_pseudo_lock_release(void) +{ + class_unregister(&pseudo_lock_class); + unregister_chrdev(pseudo_lock_major, "pseudo_lock"); + pseudo_lock_major = 0; +} diff --git a/fs/resctrl/rdtgroup.c b/fs/resctrl/rdtgroup.c new file mode 100644 index 0000000000000000000000000000000000000000..e39f22453d84319467e70b56261ef1a9dbc38fe8 --- /dev/null +++ b/fs/resctrl/rdtgroup.c @@ -0,0 +1,4015 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * User interface for Resource Allocation in Resource Director Technology(RDT) + * + * Copyright (C) 2016 Intel Corporation + * + * Author: Fenghua Yu + * + * More information about RDT be found in the Intel (R) x86 Architecture + * Software Developer Manual. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include "internal.h" + +/* Mutex to protect rdtgroup access. */ +DEFINE_MUTEX(rdtgroup_mutex); + +static struct kernfs_root *rdt_root; +struct rdtgroup rdtgroup_default; +LIST_HEAD(rdt_all_groups); + +/* list of entries for the schemata file */ +LIST_HEAD(resctrl_schema_all); + +/* The filesystem can only be mounted once. */ +bool resctrl_mounted; + +/* Kernel fs node for "info" directory under root */ +static struct kernfs_node *kn_info; + +/* Kernel fs node for "mon_groups" directory under root */ +static struct kernfs_node *kn_mongrp; + +/* Kernel fs node for "mon_data" directory under root */ +static struct kernfs_node *kn_mondata; + +/* + * Used to store the max resource name width and max resource data width + * to display the schemata in a tabular format + */ +int max_name_width, max_data_width; + +static struct seq_buf last_cmd_status; +static char last_cmd_status_buf[512]; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx); +static void rdtgroup_destroy_root(void); + +struct dentry *debugfs_resctrl; + +static bool resctrl_debug; + +void rdt_last_cmd_clear(void) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_clear(&last_cmd_status); +} + +void rdt_last_cmd_puts(const char *s) +{ + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_puts(&last_cmd_status, s); +} + +void rdt_last_cmd_printf(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + lockdep_assert_held(&rdtgroup_mutex); + seq_buf_vprintf(&last_cmd_status, fmt, ap); + va_end(ap); +} + +void rdt_staged_configs_clear(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct rdt_domain *dom; + + lockdep_assert_held(&rdtgroup_mutex); + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + list_for_each_entry(dom, &r->domains, list) + memset(dom->staged_config, 0, sizeof(dom->staged_config)); + } +} + +static bool resctrl_is_mbm_enabled(void) +{ + return (resctrl_arch_is_mbm_total_enabled() || + resctrl_arch_is_mbm_local_enabled() || + resctrl_arch_is_mbm_bps_enabled()); +} + +static bool resctrl_is_mbm_event(int e) +{ + return (e >= QOS_L3_MBM_TOTAL_EVENT_ID && + e <= QOS_MC_MBM_BPS_EVENT_ID); +} + +/* + * Trivial allocator for CLOSIDs. Use BITMAP APIs to manipulate a bitmap + * of free CLOSIDs. + * + * Using a global CLOSID across all resources has some advantages and + * some drawbacks: + * + We can simply set current's closid to assign a task to a resource + * group. + * + Context switch code can avoid extra memory references deciding which + * CLOSID to load into the PQR_ASSOC MSR + * - We give up some options in configuring resource groups across multi-socket + * systems. + * - Our choices on how to configure each resource become progressively more + * limited as the number of resources grows. + */ +static unsigned long *closid_free_map; +static int closid_free_map_len; + +int closids_supported(void) +{ + return closid_free_map_len; +} + +static void closid_init(void) +{ + struct resctrl_schema *s; + u32 rdt_min_closid = ~0; + + /* Compute rdt_min_closid across all resources */ + list_for_each_entry(s, &resctrl_schema_all, list) + rdt_min_closid = min(rdt_min_closid, s->num_closid); + + closid_free_map = bitmap_alloc(rdt_min_closid, GFP_KERNEL); + bitmap_fill(closid_free_map, rdt_min_closid); + + /* RESCTRL_RESERVED_CLOSID is always reserved for the default group */ + __clear_bit(RESCTRL_RESERVED_CLOSID, closid_free_map); + closid_free_map_len = rdt_min_closid; +} + +static int closid_alloc(void) +{ + int cleanest_closid; + u32 closid; + + lockdep_assert_held(&rdtgroup_mutex); + + if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID) && + resctrl_arch_is_llc_occupancy_enabled()) { + cleanest_closid = resctrl_find_cleanest_closid(); + if (cleanest_closid < 0) + return cleanest_closid; + closid = cleanest_closid; + } else { + closid = find_first_bit(closid_free_map, closid_free_map_len); + if (closid == closid_free_map_len) + return -ENOSPC; + } + + __clear_bit(closid, closid_free_map); + + return closid; +} + +void closid_free(int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + __set_bit(closid, closid_free_map); +} + +/** + * closid_allocated - test if provided closid is in use + * @closid: closid to be tested + * + * Return: true if @closid is currently associated with a resource group, + * false if @closid is free + */ +bool closid_allocated(unsigned int closid) +{ + lockdep_assert_held(&rdtgroup_mutex); + + return !test_bit(closid, closid_free_map); +} + +/** + * rdtgroup_mode_by_closid - Return mode of resource group with closid + * @closid: closid if the resource group + * + * Each resource group is associated with a @closid. Here the mode + * of a resource group can be queried by searching for it using its closid. + * + * Return: mode as &enum rdtgrp_mode of resource group with closid @closid + */ +enum rdtgrp_mode rdtgroup_mode_by_closid(int closid) +{ + struct rdtgroup *rdtgrp; + + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (rdtgrp->closid == closid) + return rdtgrp->mode; + } + + return RDT_NUM_MODES; +} + +static const char * const rdt_mode_str[] = { + [RDT_MODE_SHAREABLE] = "shareable", + [RDT_MODE_EXCLUSIVE] = "exclusive", + [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup", + [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked", +}; + +/** + * rdtgroup_mode_str - Return the string representation of mode + * @mode: the resource group mode as &enum rdtgroup_mode + * + * Return: string representation of valid mode, "unknown" otherwise + */ +static const char *rdtgroup_mode_str(enum rdtgrp_mode mode) +{ + if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES) + return "unknown"; + + return rdt_mode_str[mode]; +} + +/* set uid and gid of rdtgroup dirs and files to that of the creator */ +static int rdtgroup_kn_set_ugid(struct kernfs_node *kn) +{ + struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID, + .ia_uid = current_fsuid(), + .ia_gid = current_fsgid(), }; + + if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) && + gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID)) + return 0; + + return kernfs_setattr(kn, &iattr); +} + +static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft) +{ + struct kernfs_node *kn; + int ret; + + kn = __kernfs_create_file(parent_kn, rft->name, rft->mode, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, + 0, rft->kf_ops, rft, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return 0; +} + +static int rdtgroup_seqfile_show(struct seq_file *m, void *arg) +{ + struct kernfs_open_file *of = m->private; + struct rftype *rft = of->kn->priv; + + if (rft->seq_show) + return rft->seq_show(of, m, arg); + return 0; +} + +static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf, + size_t nbytes, loff_t off) +{ + struct rftype *rft = of->kn->priv; + + if (rft->write) + return rft->write(of, buf, nbytes, off); + + return -EINVAL; +} + +static const struct kernfs_ops rdtgroup_kf_single_ops = { + .atomic_write_len = PAGE_SIZE, + .write = rdtgroup_file_write, + .seq_show = rdtgroup_seqfile_show, +}; + +static const struct kernfs_ops kf_mondata_ops = { + .atomic_write_len = PAGE_SIZE, + .seq_show = rdtgroup_mondata_show, +}; + +static bool is_cpu_list(struct kernfs_open_file *of) +{ + struct rftype *rft = of->kn->priv; + + return rft->flags & RFTYPE_FLAGS_CPUS_LIST; +} + +static int rdtgroup_cpus_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + struct cpumask *mask; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + + if (rdtgrp) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + mask = &rdtgrp->plr->d->cpu_mask; + seq_printf(s, is_cpu_list(of) ? + "%*pbl\n" : "%*pb\n", + cpumask_pr_args(mask)); + } + } else { + seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n", + cpumask_pr_args(&rdtgrp->cpu_mask)); + } + } else { + ret = -ENOENT; + } + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +/* + * Update the PGR_ASSOC MSR on all cpus in @cpu_mask, + * + * Per task closids/rmids must have been set up before calling this function. + * @r may be NULL. + */ +static void +update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r) +{ + struct resctrl_cpu_sync defaults; + struct resctrl_cpu_sync *defaults_p = NULL; + + if (r) { + defaults.closid = r->closid; + defaults.rmid = r->mon.rmid; + defaults_p = &defaults; + } + + on_each_cpu_mask(cpu_mask, resctrl_arch_sync_cpu_defaults, defaults_p, + 1); +} + +static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask) +{ + struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp; + struct list_head *head; + + /* Check whether cpus belong to parent ctrl group */ + cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n"); + return -EINVAL; + } + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Give any dropped cpus to parent rdtgroup */ + cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask); + update_closid_rmid(tmpmask, prgrp); + } + + /* + * If we added cpus, remove them from previous group that owned them + * and update per-cpu rmid + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + if (crgrp == rdtgrp) + continue; + cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask, + tmpmask); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + return 0; +} + +static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m) +{ + struct rdtgroup *crgrp; + + cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m); + /* update the child mon group masks as well*/ + list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list) + cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask); +} + +static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask, + cpumask_var_t tmpmask, cpumask_var_t tmpmask1) +{ + struct rdtgroup *r, *crgrp; + struct list_head *head; + + /* Check whether cpus are dropped from this group */ + cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask); + if (!cpumask_empty(tmpmask)) { + /* Can't drop from default group */ + if (rdtgrp == &rdtgroup_default) { + rdt_last_cmd_puts("Can't drop CPUs from default group\n"); + return -EINVAL; + } + + /* Give any dropped cpus to rdtgroup_default */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, tmpmask); + update_closid_rmid(tmpmask, &rdtgroup_default); + } + + /* + * If we added cpus, remove them from previous group and + * the prev group's child groups that owned them + * and update per-cpu closid/rmid. + */ + cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask); + if (!cpumask_empty(tmpmask)) { + list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) { + if (r == rdtgrp) + continue; + cpumask_and(tmpmask1, &r->cpu_mask, tmpmask); + if (!cpumask_empty(tmpmask1)) + cpumask_rdtgrp_clear(r, tmpmask1); + } + update_closid_rmid(tmpmask, rdtgrp); + } + + /* Done pushing/pulling - update this group with new mask */ + cpumask_copy(&rdtgrp->cpu_mask, newmask); + + /* + * Clear child mon group masks since there is a new parent mask + * now and update the rmid for the cpus the child lost. + */ + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask); + update_closid_rmid(tmpmask, rdtgrp); + cpumask_clear(&crgrp->cpu_mask); + } + + return 0; +} + +static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + cpumask_var_t tmpmask, newmask, tmpmask1; + struct rdtgroup *rdtgrp; + int ret; + + if (!buf) + return -EINVAL; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + return -ENOMEM; + } + if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) { + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + return -ENOMEM; + } + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + ret = -ENOENT; + goto unlock; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + if (is_cpu_list(of)) + ret = cpulist_parse(buf, newmask); + else + ret = cpumask_parse(buf, newmask); + + if (ret) { + rdt_last_cmd_puts("Bad CPU list/mask\n"); + goto unlock; + } + + /* check that user didn't specify any offline cpus */ + cpumask_andnot(tmpmask, newmask, cpu_online_mask); + if (!cpumask_empty(tmpmask)) { + ret = -EINVAL; + rdt_last_cmd_puts("Can only assign online CPUs\n"); + goto unlock; + } + + if (rdtgrp->type == RDTCTRL_GROUP) + ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1); + else if (rdtgrp->type == RDTMON_GROUP) + ret = cpus_mon_write(rdtgrp, newmask, tmpmask); + else + ret = -EINVAL; + +unlock: + rdtgroup_kn_unlock(of->kn); + free_cpumask_var(tmpmask); + free_cpumask_var(newmask); + free_cpumask_var(tmpmask1); + + return ret ?: nbytes; +} + +/** + * rdtgroup_remove - the helper to remove resource group safely + * @rdtgrp: resource group to remove + * + * On resource group creation via a mkdir, an extra kernfs_node reference is + * taken to ensure that the rdtgroup structure remains accessible for the + * rdtgroup_kn_unlock() calls where it is removed. + * + * Drop the extra reference here, then free the rdtgroup structure. + * + * Return: void + */ +static void rdtgroup_remove(struct rdtgroup *rdtgrp) +{ + kernfs_put(rdtgrp->kn); + kfree(rdtgrp); +} + +static void _update_task_closid_rmid(void *task) +{ + /* + * If the task is still current on this CPU, update PQR_ASSOC MSR. + * Otherwise, the MSR is updated when the task is scheduled in. + */ + if (task == current) + resctrl_arch_sched_in(task); +} + +static void update_task_closid_rmid(struct task_struct *t) +{ + if (IS_ENABLED(CONFIG_SMP) && task_curr(t)) + smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1); + else + _update_task_closid_rmid(t); +} + +static bool task_in_rdtgroup(struct task_struct *tsk, struct rdtgroup *rdtgrp) +{ + u32 closid, rmid = rdtgrp->mon.rmid; + + if (rdtgrp->type == RDTCTRL_GROUP) + closid = rdtgrp->closid; + else if (rdtgrp->type == RDTMON_GROUP) + closid = rdtgrp->mon.parent->closid; + else + return false; + + return resctrl_arch_match_closid(tsk, closid) && + resctrl_arch_match_rmid(tsk, closid, rmid); +} + +static int __rdtgroup_move_task(struct task_struct *tsk, + struct rdtgroup *rdtgrp) +{ + /* If the task is already in rdtgrp, no need to move the task. */ + if (task_in_rdtgroup(tsk, rdtgrp)) + return 0; + + /* + * Set the task's closid/rmid before the PQR_ASSOC MSR can be + * updated by them. + * + * For ctrl_mon groups, move both closid and rmid. + * For monitor groups, can move the tasks only from + * their parent CTRL group. + */ + if (rdtgrp->type == RDTMON_GROUP && + !resctrl_arch_match_closid(tsk, rdtgrp->mon.parent->closid)) { + rdt_last_cmd_puts("Can't move task to different control group\n"); + return -EINVAL; + } + + if (rdtgrp->type == RDTMON_GROUP) + resctrl_arch_set_closid_rmid(tsk, rdtgrp->mon.parent->closid, + rdtgrp->mon.rmid); + else + resctrl_arch_set_closid_rmid(tsk, rdtgrp->closid, + rdtgrp->mon.rmid); + + /* + * Ensure the task's closid and rmid are written before determining if + * the task is current that will decide if it will be interrupted. + * This pairs with the full barrier between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * By now, the task's closid and rmid are set. If the task is current + * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource + * group go into effect. If the task is not current, the MSR will be + * updated when the task is scheduled in. + */ + update_task_closid_rmid(tsk); + + return 0; +} + +static bool is_closid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_alloc_capable() && (r->type == RDTCTRL_GROUP) && + resctrl_arch_match_closid(t, r->closid)); +} + +static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r) +{ + return (resctrl_arch_mon_capable() && (r->type == RDTMON_GROUP) && + resctrl_arch_match_rmid(t, r->mon.parent->closid, + r->mon.rmid)); +} + +/** + * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group + * @r: Resource group + * + * Return: 1 if tasks have been assigned to @r, 0 otherwise + */ +int rdtgroup_tasks_assigned(struct rdtgroup *r) +{ + struct task_struct *p, *t; + int ret = 0; + + lockdep_assert_held(&rdtgroup_mutex); + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + ret = 1; + break; + } + } + rcu_read_unlock(); + + return ret; +} + +static int rdtgroup_task_write_permission(struct task_struct *task, + struct kernfs_open_file *of) +{ + const struct cred *tcred = get_task_cred(task); + const struct cred *cred = current_cred(); + int ret = 0; + + /* + * Even if we're attaching all tasks in the thread group, we only + * need to check permissions on one of them. + */ + if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) && + !uid_eq(cred->euid, tcred->uid) && + !uid_eq(cred->euid, tcred->suid)) { + rdt_last_cmd_printf("No permission to move task %d\n", task->pid); + ret = -EPERM; + } + + put_cred(tcred); + return ret; +} + +static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp, + struct kernfs_open_file *of) +{ + struct task_struct *tsk; + int ret; + + rcu_read_lock(); + if (pid) { + tsk = find_task_by_vpid(pid); + if (!tsk) { + rcu_read_unlock(); + rdt_last_cmd_printf("No task %d\n", pid); + return -ESRCH; + } + } else { + tsk = current; + } + + get_task_struct(tsk); + rcu_read_unlock(); + + ret = rdtgroup_task_write_permission(tsk, of); + if (!ret) + ret = __rdtgroup_move_task(tsk, rdtgrp); + + put_task_struct(tsk); + return ret; +} + +static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + char *pid_str; + int ret = 0; + pid_t pid; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + rdt_last_cmd_clear(); + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto unlock; + } + + while (buf && buf[0] != '\0' && buf[0] != '\n') { + pid_str = strim(strsep(&buf, ",")); + + if (kstrtoint(pid_str, 0, &pid)) { + rdt_last_cmd_printf("Task list parsing error pid %s\n", pid_str); + ret = -EINVAL; + break; + } + + if (pid < 0) { + rdt_last_cmd_printf("Invalid pid %d\n", pid); + ret = -EINVAL; + break; + } + + ret = rdtgroup_move_task(pid, rdtgrp, of); + if (ret) { + rdt_last_cmd_printf("Error while processing task %d\n", pid); + break; + } + } + +unlock: + rdtgroup_kn_unlock(of->kn); + + return ret ?: nbytes; +} + +static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s) +{ + struct task_struct *p, *t; + pid_t pid; + + rcu_read_lock(); + for_each_process_thread(p, t) { + if (is_closid_match(t, r) || is_rmid_match(t, r)) { + pid = task_pid_vnr(t); + if (pid) + seq_printf(s, "%d\n", pid); + } + } + rcu_read_unlock(); +} + +static int rdtgroup_tasks_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + show_rdt_tasks(rdtgrp, s); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_closid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->closid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static int rdtgroup_rmid_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + int ret = 0; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (rdtgrp) + seq_printf(s, "%u\n", rdtgrp->mon.rmid); + else + ret = -ENOENT; + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +#ifdef CONFIG_PROC_CPU_RESCTRL + +/* + * A task can only be part of one resctrl control group and of one monitor + * group which is associated to that control group. + * + * 1) res: + * mon: + * + * resctrl is not available. + * + * 2) res:/ + * mon: + * + * Task is part of the root resctrl control group, and it is not associated + * to any monitor group. + * + * 3) res:/ + * mon:mon0 + * + * Task is part of the root resctrl control group and monitor group mon0. + * + * 4) res:group0 + * mon: + * + * Task is part of resctrl control group group0, and it is not associated + * to any monitor group. + * + * 5) res:group0 + * mon:mon1 + * + * Task is part of resctrl control group group0 and monitor group mon1. + */ +int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns, + struct pid *pid, struct task_struct *tsk) +{ + struct rdtgroup *rdtg; + int ret = 0; + + mutex_lock(&rdtgroup_mutex); + + /* Return empty if resctrl has not been mounted. */ + if (!resctrl_mounted) { + seq_puts(s, "res:\nmon:\n"); + goto unlock; + } + + list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) { + struct rdtgroup *crg; + + /* + * Task information is only relevant for shareable + * and exclusive groups. + */ + if (rdtg->mode != RDT_MODE_SHAREABLE && + rdtg->mode != RDT_MODE_EXCLUSIVE) + continue; + + if (!resctrl_arch_match_closid(tsk, rdtg->closid)) + continue; + + seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "", + rdtg->kn->name); + seq_puts(s, "mon:"); + list_for_each_entry(crg, &rdtg->mon.crdtgrp_list, + mon.crdtgrp_list) { + if (!resctrl_arch_match_rmid(tsk, crg->mon.parent->closid, + crg->mon.rmid)) + continue; + seq_printf(s, "%s", crg->kn->name); + break; + } + seq_putc(s, '\n'); + goto unlock; + } + /* + * The above search should succeed. Otherwise return + * with an error. + */ + ret = -ENOENT; +unlock: + mutex_unlock(&rdtgroup_mutex); + + return ret; +} +#endif + +static int rdt_last_cmd_status_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + int len; + + mutex_lock(&rdtgroup_mutex); + len = seq_buf_used(&last_cmd_status); + if (len) + seq_printf(seq, "%.*s", len, last_cmd_status_buf); + else + seq_puts(seq, "ok\n"); + mutex_unlock(&rdtgroup_mutex); + return 0; +} + +static int rdt_num_closids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + + seq_printf(seq, "%u\n", s->num_closid); + return 0; +} + +static int rdt_default_ctrl_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->default_ctrl); + return 0; +} + +static int rdt_min_cbm_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.min_cbm_bits); + return 0; +} + +static int rdt_shareable_bits_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%x\n", r->cache.shareable_bits); + return 0; +} + +/* + * rdt_bit_usage_show - Display current usage of resources + * + * A domain is a shared resource that can now be allocated differently. Here + * we display the current regions of the domain as an annotated bitmask. + * For each domain of this resource its allocation bitmask + * is annotated as below to indicate the current usage of the corresponding bit: + * 0 - currently unused + * X - currently available for sharing and used by software and hardware + * H - currently used by hardware only but available for software use + * S - currently used and shareable by software only + * E - currently used exclusively by one resource group + * P - currently pseudo-locked by one resource group + */ +static int rdt_bit_usage_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + /* + * Use unsigned long even though only 32 bits are used to ensure + * test_bit() is used safely. + */ + unsigned long sw_shareable = 0, hw_shareable = 0; + unsigned long exclusive = 0, pseudo_locked = 0; + struct rdt_resource *r = s->res; + struct rdt_domain *dom; + int i, hwb, swb, excl, psl; + enum rdtgrp_mode mode; + bool sep = false; + u32 ctrl_val; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + hw_shareable = r->cache.shareable_bits; + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_putc(seq, ';'); + sw_shareable = 0; + exclusive = 0; + seq_printf(seq, "%d=", dom->id); + for (i = 0; i < closids_supported(); i++) { + if (!closid_allocated(i)) + continue; + ctrl_val = resctrl_arch_get_config(r, dom, i, + s->conf_type); + mode = rdtgroup_mode_by_closid(i); + switch (mode) { + case RDT_MODE_SHAREABLE: + sw_shareable |= ctrl_val; + break; + case RDT_MODE_EXCLUSIVE: + exclusive |= ctrl_val; + break; + case RDT_MODE_PSEUDO_LOCKSETUP: + /* + * RDT_MODE_PSEUDO_LOCKSETUP is possible + * here but not included since the CBM + * associated with this CLOSID in this mode + * is not initialized and no task or cpu can be + * assigned this CLOSID. + */ + break; + case RDT_MODE_PSEUDO_LOCKED: + case RDT_NUM_MODES: + WARN(1, + "invalid mode for closid %d\n", i); + break; + } + } + for (i = r->cache.cbm_len - 1; i >= 0; i--) { + pseudo_locked = dom->plr ? dom->plr->cbm : 0; + hwb = test_bit(i, &hw_shareable); + swb = test_bit(i, &sw_shareable); + excl = test_bit(i, &exclusive); + psl = test_bit(i, &pseudo_locked); + if (hwb && swb) + seq_putc(seq, 'X'); + else if (hwb && !swb) + seq_putc(seq, 'H'); + else if (!hwb && swb) + seq_putc(seq, 'S'); + else if (excl) + seq_putc(seq, 'E'); + else if (psl) + seq_putc(seq, 'P'); + else /* Unused bits remain */ + seq_putc(seq, '0'); + } + sep = true; + } + seq_putc(seq, '\n'); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return 0; +} + +static int rdt_min_bw_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.min_bw); + return 0; +} + +static int rdt_num_rmids_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + seq_printf(seq, "%d\n", r->num_rmid); + + return 0; +} + +static int rdt_mon_features_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + struct mon_evt *mevt; + + list_for_each_entry(mevt, &r->evt_list, list) { + seq_printf(seq, "%s\n", mevt->name); + if (mevt->configurable) + seq_printf(seq, "%s_config\n", mevt->name); + } + + return 0; +} + +static int rdt_bw_gran_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.bw_gran); + return 0; +} + +static int rdt_delay_linear_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->membw.delay_linear); + return 0; +} + +static int max_threshold_occ_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + seq_printf(seq, "%u\n", resctrl_rmid_realloc_threshold); + + return 0; +} + +static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD) + seq_puts(seq, "per-thread\n"); + else + seq_puts(seq, "max\n"); + + return 0; +} + +static ssize_t max_threshold_occ_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + unsigned int bytes; + int ret; + + ret = kstrtouint(buf, 0, &bytes); + if (ret) + return ret; + + if (bytes > resctrl_rmid_realloc_limit) + return -EINVAL; + + resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(bytes); + + return nbytes; +} + +/* + * rdtgroup_mode_show - Display mode of this resource group + */ +static int rdtgroup_mode_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct rdtgroup *rdtgrp; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode)); + + rdtgroup_kn_unlock(of->kn); + return 0; +} + +static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type) +{ + switch (my_type) { + case CDP_CODE: + return CDP_DATA; + case CDP_DATA: + return CDP_CODE; + default: + case CDP_NONE: + return CDP_NONE; + } +} + +static int rdt_has_sparse_bitmasks_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct resctrl_schema *s = of->kn->parent->priv; + struct rdt_resource *r = s->res; + + seq_printf(seq, "%u\n", r->cache.arch_has_sparse_bitmasks); + + return 0; +} + +/** + * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other + * @r: Resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @type: CDP type of @r. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Checks if provided @cbm intended to be used for @closid on domain + * @d overlaps with any other closids or other hardware usage associated + * with this domain. If @exclusive is true then only overlaps with + * resource groups in exclusive mode will be considered. If @exclusive + * is false then overlaps with any resource group or hardware entities + * will be considered. + * + * @cbm is unsigned long, even if only 32 bits are used, to make the + * bitmap functions work correctly. + * + * Return: false if CBM does not overlap, true if it does. + */ +static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d, + unsigned long cbm, int closid, + enum resctrl_conf_type type, bool exclusive) +{ + enum rdtgrp_mode mode; + unsigned long ctrl_b; + int i; + + /* Check for any overlap with regions used by hardware directly */ + if (!exclusive) { + ctrl_b = r->cache.shareable_bits; + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) + return true; + } + + /* Check for overlap with other resource groups */ + for (i = 0; i < closids_supported(); i++) { + ctrl_b = resctrl_arch_get_config(r, d, i, type); + mode = rdtgroup_mode_by_closid(i); + if (closid_allocated(i) && i != closid && + mode != RDT_MODE_PSEUDO_LOCKSETUP) { + if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) { + if (exclusive) { + if (mode == RDT_MODE_EXCLUSIVE) + return true; + continue; + } + return true; + } + } + } + + return false; +} + +/** + * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware + * @s: Schema for the resource to which domain instance @d belongs. + * @d: The domain instance for which @closid is being tested. + * @cbm: Capacity bitmask being tested. + * @closid: Intended closid for @cbm. + * @exclusive: Only check if overlaps with exclusive resource groups + * + * Resources that can be allocated using a CBM can use the CBM to control + * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test + * for overlap. Overlap test is not limited to the specific resource for + * which the CBM is intended though - when dealing with CDP resources that + * share the underlying hardware the overlap check should be performed on + * the CDP resource sharing the hardware also. + * + * Refer to description of __rdtgroup_cbm_overlaps() for the details of the + * overlap test. + * + * Return: true if CBM overlap detected, false if there is no overlap + */ +bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d, + unsigned long cbm, int closid, bool exclusive) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + struct rdt_resource *r = s->res; + + if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type, + exclusive)) + return true; + + if (!resctrl_arch_get_cdp_enabled(r->rid)) + return false; + return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive); +} + +/** + * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive + * @rdtgrp: Resource group identified through its closid. + * + * An exclusive resource group implies that there should be no sharing of + * its allocated resources. At the time this group is considered to be + * exclusive this test can determine if its current schemata supports this + * setting by testing for overlap with all other resource groups. + * + * Return: true if resource group can be exclusive, false if there is overlap + * with allocations of other resource groups and thus this resource group + * cannot be exclusive. + */ +static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp) +{ + int closid = rdtgrp->closid; + struct resctrl_schema *s; + struct rdt_resource *r; + bool has_cache = false; + struct rdt_domain *d; + u32 ctrl; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || r->rid == RDT_RESOURCE_SMBA) + continue; + has_cache = true; + list_for_each_entry(d, &r->domains, list) { + ctrl = resctrl_arch_get_config(r, d, closid, + s->conf_type); + if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) { + rdt_last_cmd_puts("Schemata overlaps\n"); + return false; + } + } + } + + if (!has_cache) { + rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n"); + return false; + } + + return true; +} + +/* + * rdtgroup_mode_write - Modify the resource group's mode + */ +static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) +{ + struct rdtgroup *rdtgrp; + enum rdtgrp_mode mode; + int ret = 0; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + buf[nbytes - 1] = '\0'; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + rdt_last_cmd_clear(); + + mode = rdtgrp->mode; + + if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) || + (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) || + (!strcmp(buf, "pseudo-locksetup") && + mode == RDT_MODE_PSEUDO_LOCKSETUP) || + (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED)) + goto out; + + if (mode == RDT_MODE_PSEUDO_LOCKED) { + rdt_last_cmd_puts("Cannot change pseudo-locked group\n"); + ret = -EINVAL; + goto out; + } + + if (!strcmp(buf, "shareable")) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_SHAREABLE; + } else if (!strcmp(buf, "exclusive")) { + if (!rdtgroup_mode_test_exclusive(rdtgrp)) { + ret = -EINVAL; + goto out; + } + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + ret = rdtgroup_locksetup_exit(rdtgrp); + if (ret) + goto out; + } + rdtgrp->mode = RDT_MODE_EXCLUSIVE; + } else if (IS_ENABLED(CONFIG_RESCTRL_FS_PSEUDO_LOCK) && + !strcmp(buf, "pseudo-locksetup")) { + ret = rdtgroup_locksetup_enter(rdtgrp); + if (ret) + goto out; + rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP; + } else { + rdt_last_cmd_puts("Unknown or unsupported mode\n"); + ret = -EINVAL; + } + +out: + rdtgroup_kn_unlock(of->kn); + return ret ?: nbytes; +} + +/** + * rdtgroup_cbm_to_size - Translate CBM to size in bytes + * @r: RDT resource to which @d belongs. + * @d: RDT domain instance. + * @cbm: bitmask for which the size should be computed. + * + * The bitmask provided associated with the RDT domain instance @d will be + * translated into how many bytes it represents. The size in bytes is + * computed by first dividing the total cache size by the CBM length to + * determine how many bytes each bit in the bitmask represents. The result + * is multiplied with the number of bits set in the bitmask. + * + * @cbm is unsigned long, even if only 32 bits are used to make the + * bitmap functions work correctly. + */ +unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r, + struct rdt_domain *d, unsigned long cbm) +{ + struct cpu_cacheinfo *ci; + unsigned int size = 0; + int num_b, i; + + num_b = bitmap_weight(&cbm, r->cache.cbm_len); + ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask)); + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == r->cache_level) { + size = ci->info_list[i].size / r->cache.cbm_len * num_b; + break; + } + } + + return size; +} + +/* + * rdtgroup_size_show - Display size in bytes of allocated regions + * + * The "size" file mirrors the layout of the "schemata" file, printing the + * size in bytes of each region instead of the capacity bitmask. + */ +static int rdtgroup_size_show(struct kernfs_open_file *of, + struct seq_file *s, void *v) +{ + struct resctrl_schema *schema; + enum resctrl_conf_type type; + struct rdtgroup *rdtgrp; + struct rdt_resource *r; + struct rdt_domain *d; + unsigned int size; + int ret = 0; + u32 closid; + bool sep; + u32 ctrl; + + rdtgrp = rdtgroup_kn_lock_live(of->kn); + if (!rdtgrp) { + rdtgroup_kn_unlock(of->kn); + return -ENOENT; + } + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + if (!rdtgrp->plr->d) { + rdt_last_cmd_clear(); + rdt_last_cmd_puts("Cache domain offline\n"); + ret = -ENODEV; + } else { + seq_printf(s, "%*s:", max_name_width, + rdtgrp->plr->s->name); + size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res, + rdtgrp->plr->d, + rdtgrp->plr->cbm); + seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size); + } + goto out; + } + + closid = rdtgrp->closid; + + list_for_each_entry(schema, &resctrl_schema_all, list) { + r = schema->res; + type = schema->conf_type; + sep = false; + seq_printf(s, "%*s:", max_name_width, schema->name); + list_for_each_entry(d, &r->domains, list) { + if (sep) + seq_putc(s, ';'); + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) { + size = 0; + } else { + if (is_mba_sc(r)) + ctrl = d->mbps_val[closid]; + else + ctrl = resctrl_arch_get_config(r, d, + closid, + type); + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) + size = ctrl; + else + size = rdtgroup_cbm_to_size(r, d, ctrl); + } + seq_printf(s, "%d=%u", d->id, size); + sep = true; + } + seq_putc(s, '\n'); + } + +out: + rdtgroup_kn_unlock(of->kn); + + return ret; +} + +static void mondata_config_read(struct resctrl_mon_config_info *mon_info) +{ + smp_call_function_any(&mon_info->d->cpu_mask, + resctrl_arch_mon_event_config_read, mon_info, 1); +} + +static int mbm_config_show(struct seq_file *s, struct rdt_resource *r, u32 evtid) +{ + struct resctrl_mon_config_info mon_info = {0}; + struct rdt_domain *dom; + bool sep = false; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + list_for_each_entry(dom, &r->domains, list) { + if (sep) + seq_puts(s, ";"); + + memset(&mon_info, 0, sizeof(struct resctrl_mon_config_info)); + mon_info.r = r; + mon_info.d = dom; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + + seq_printf(s, "%d=0x%02x", dom->id, mon_info.mon_config); + sep = true; + } + seq_puts(s, "\n"); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return 0; +} + +static int mbm_total_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_TOTAL_EVENT_ID); + + return 0; +} + +static int mbm_local_bytes_config_show(struct kernfs_open_file *of, + struct seq_file *seq, void *v) +{ + struct rdt_resource *r = of->kn->parent->priv; + + mbm_config_show(seq, r, QOS_L3_MBM_LOCAL_EVENT_ID); + + return 0; +} + +static int mbm_config_write_domain(struct rdt_resource *r, + struct rdt_domain *d, u32 evtid, u32 val) +{ + struct resctrl_mon_config_info mon_info = {0}; + + /* + * Read the current config value first. If both are the same then + * no need to write it again. + */ + mon_info.r = r; + mon_info.d = d; + mon_info.evtid = evtid; + mondata_config_read(&mon_info); + if (mon_info.mon_config == val) + return 0; + + mon_info.mon_config = val; + + /* + * Update MSR_IA32_EVT_CFG_BASE MSR on one of the CPUs in the + * domain. The MSRs offset from MSR MSR_IA32_EVT_CFG_BASE + * are scoped at the domain level. Writing any of these MSRs + * on one CPU is observed by all the CPUs in the domain. + */ + smp_call_function_any(&d->cpu_mask, resctrl_arch_mon_event_config_write, + &mon_info, 1); + if (mon_info.err) { + rdt_last_cmd_puts("Invalid event configuration\n"); + return mon_info.err; + } + + /* + * When an Event Configuration is changed, the bandwidth counters + * for all RMIDs and Events will be cleared by the hardware. The + * hardware also sets MSR_IA32_QM_CTR.Unavailable (bit 62) for + * every RMID on the next read to any event for every RMID. + * Subsequent reads will have MSR_IA32_QM_CTR.Unavailable (bit 62) + * cleared while it is tracked by the hardware. Clear the + * mbm_local and mbm_total counts for all the RMIDs. + */ + resctrl_arch_reset_rmid_all(r, d); + + return 0; +} + +static int mon_config_write(struct rdt_resource *r, char *tok, u32 evtid) +{ + char *dom_str = NULL, *id_str; + unsigned long dom_id, val; + struct rdt_domain *d; + int err; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + +next: + if (!tok || tok[0] == '\0') + return 0; + + /* Start processing the strings for each domain */ + dom_str = strim(strsep(&tok, ";")); + id_str = strsep(&dom_str, "="); + + if (!id_str || kstrtoul(id_str, 10, &dom_id)) { + rdt_last_cmd_puts("Missing '=' or non-numeric domain id\n"); + return -EINVAL; + } + + if (!dom_str || kstrtoul(dom_str, 16, &val)) { + rdt_last_cmd_puts("Non-numeric event configuration value\n"); + return -EINVAL; + } + + /* Value from user cannot be more than the supported set of events */ + if ((val & r->mbm_cfg_mask) != val) { + rdt_last_cmd_printf("Invalid event configuration: max valid mask is 0x%02x\n", + r->mbm_cfg_mask); + return -EINVAL; + } + + list_for_each_entry(d, &r->domains, list) { + if (d->id == dom_id) { + err = mbm_config_write_domain(r, d, evtid, val); + if (err) + return err; + goto next; + } + } + + return -EINVAL; +} + +static ssize_t mbm_total_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_TOTAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +static ssize_t mbm_local_bytes_config_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, + loff_t off) +{ + struct rdt_resource *r = of->kn->parent->priv; + int ret; + + /* Valid input requires a trailing newline */ + if (nbytes == 0 || buf[nbytes - 1] != '\n') + return -EINVAL; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + buf[nbytes - 1] = '\0'; + + ret = mon_config_write(r, buf, QOS_L3_MBM_LOCAL_EVENT_ID); + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + return ret ?: nbytes; +} + +/* rdtgroup information files for one cache resource. */ +static struct rftype res_common_files[] = { + { + .name = "last_cmd_status", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_last_cmd_status_show, + .fflags = RFTYPE_TOP_INFO, + }, + { + .name = "num_closids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_closids_show, + .fflags = RFTYPE_CTRL_INFO, + }, + { + .name = "mon_features", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_mon_features_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "num_rmids", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_num_rmids_show, + .fflags = RFTYPE_MON_INFO, + }, + { + .name = "cbm_mask", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_default_ctrl_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_cbm_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_cbm_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "shareable_bits", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_shareable_bits_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "bit_usage", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bit_usage_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "min_bandwidth", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_min_bw_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "bandwidth_gran", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_bw_gran_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + { + .name = "delay_linear", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_delay_linear_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB, + }, + /* + * Platform specific which (if any) capabilities are provided by + * thread_throttle_mode. Defer "fflags" initialization to platform + * discovery. + */ + { + .name = "thread_throttle_mode", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_thread_throttle_mode_show, + }, + { + .name = "max_threshold_occupancy", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = max_threshold_occ_write, + .seq_show = max_threshold_occ_show, + .fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "mbm_total_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_total_bytes_config_show, + .write = mbm_total_bytes_config_write, + }, + { + .name = "mbm_local_bytes_config", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = mbm_local_bytes_config_show, + .write = mbm_local_bytes_config_write, + }, + { + .name = "cpus", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "cpus_list", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_cpus_write, + .seq_show = rdtgroup_cpus_show, + .flags = RFTYPE_FLAGS_CPUS_LIST, + .fflags = RFTYPE_BASE, + }, + { + .name = "tasks", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_tasks_write, + .seq_show = rdtgroup_tasks_show, + .fflags = RFTYPE_BASE, + }, + { + .name = "mon_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_rmid_show, + .fflags = RFTYPE_MON_BASE | RFTYPE_DEBUG, + }, + { + .name = "schemata", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_schemata_write, + .seq_show = rdtgroup_schemata_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "mode", + .mode = 0644, + .kf_ops = &rdtgroup_kf_single_ops, + .write = rdtgroup_mode_write, + .seq_show = rdtgroup_mode_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "size", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_size_show, + .fflags = RFTYPE_CTRL_BASE, + }, + { + .name = "sparse_masks", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdt_has_sparse_bitmasks_show, + .fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_CACHE, + }, + { + .name = "ctrl_hw_id", + .mode = 0444, + .kf_ops = &rdtgroup_kf_single_ops, + .seq_show = rdtgroup_closid_show, + .fflags = RFTYPE_CTRL_BASE | RFTYPE_DEBUG, + }, + +}; + +static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags) +{ + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + lockdep_assert_held(&rdtgroup_mutex); + + if (resctrl_debug) + fflags |= RFTYPE_DEBUG; + + for (rft = rfts; rft < rfts + len; rft++) { + if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) { + ret = rdtgroup_add_file(kn, rft); + if (ret) + goto error; + } + } + + return 0; +error: + pr_warn("Failed to add %s, err=%d\n", rft->name, ret); + while (--rft >= rfts) { + if ((fflags & rft->fflags) == rft->fflags) + kernfs_remove_by_name(kn, rft->name); + } + return ret; +} + +static struct rftype *rdtgroup_get_rftype_by_name(const char *name) +{ + struct rftype *rfts, *rft; + int len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + return rft; + } + + return NULL; +} + +static void thread_throttle_mode_init(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + struct rftype *rft; + + if (!r->alloc_capable || + r->membw.throttle_mode == THREAD_THROTTLE_UNDEFINED) + return; + + rft = rdtgroup_get_rftype_by_name("thread_throttle_mode"); + if (!rft) + return; + + rft->fflags = RFTYPE_CTRL_INFO | RFTYPE_RES_MB; +} + +void mbm_config_rftype_init(const char *config) +{ + struct rftype *rft; + + rft = rdtgroup_get_rftype_by_name(config); + if (rft) + rft->fflags = RFTYPE_MON_INFO | RFTYPE_RES_CACHE; +} + +/** + * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * + * The permissions of named resctrl file, directory, or link are modified + * to not allow read, write, or execute by any user. + * + * WARNING: This function is intended to communicate to the user that the + * resctrl file has been locked down - that it is not relevant to the + * particular state the system finds itself in. It should not be relied + * on to protect from user access because after the file's permissions + * are restricted the user can still change the permissions using chmod + * from the command line. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn; + int ret = 0; + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + iattr.ia_mode = S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode = S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode = S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +/** + * rdtgroup_kn_mode_restore - Restore user access to named resctrl file + * @r: The resource group with which the file is associated. + * @name: Name of the file + * @mask: Mask of permissions that should be restored + * + * Restore the permissions of the named file. If @name is a directory the + * permissions of its parent will be used. + * + * Return: 0 on success, <0 on failure. + */ +int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name, + umode_t mask) +{ + struct iattr iattr = {.ia_valid = ATTR_MODE,}; + struct kernfs_node *kn, *parent; + struct rftype *rfts, *rft; + int ret, len; + + rfts = res_common_files; + len = ARRAY_SIZE(res_common_files); + + for (rft = rfts; rft < rfts + len; rft++) { + if (!strcmp(rft->name, name)) + iattr.ia_mode = rft->mode & mask; + } + + kn = kernfs_find_and_get_ns(r->kn, name, NULL); + if (!kn) + return -ENOENT; + + switch (kernfs_type(kn)) { + case KERNFS_DIR: + parent = kernfs_get_parent(kn); + if (parent) { + iattr.ia_mode |= parent->mode; + kernfs_put(parent); + } + iattr.ia_mode |= S_IFDIR; + break; + case KERNFS_FILE: + iattr.ia_mode |= S_IFREG; + break; + case KERNFS_LINK: + iattr.ia_mode |= S_IFLNK; + break; + } + + ret = kernfs_setattr(kn, &iattr); + kernfs_put(kn); + return ret; +} + +static int rdtgroup_mkdir_info_resdir(void *priv, char *name, + unsigned long fflags) +{ + struct kernfs_node *kn_subdir; + int ret; + + kn_subdir = kernfs_create_dir(kn_info, name, + kn_info->mode, priv); + if (IS_ERR(kn_subdir)) + return PTR_ERR(kn_subdir); + + ret = rdtgroup_kn_set_ugid(kn_subdir); + if (ret) + return ret; + + ret = rdtgroup_add_files(kn_subdir, fflags); + if (!ret) + kernfs_activate(kn_subdir); + + return ret; +} + +static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn) +{ + enum resctrl_res_level i; + struct resctrl_schema *s; + struct rdt_resource *r; + unsigned long fflags; + char name[32]; + int ret; + + /* create the directory */ + kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL); + if (IS_ERR(kn_info)) + return PTR_ERR(kn_info); + + ret = rdtgroup_add_files(kn_info, RFTYPE_TOP_INFO); + if (ret) + goto out_destroy; + + /* loop over enabled controls, these are all alloc_capable */ + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + fflags = r->fflags | RFTYPE_CTRL_INFO; + ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags); + if (ret) + goto out_destroy; + } + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + fflags = r->fflags | RFTYPE_MON_INFO; + sprintf(name, "%s_MON", r->name); + ret = rdtgroup_mkdir_info_resdir(r, name, fflags); + if (ret) + goto out_destroy; + } + + ret = rdtgroup_kn_set_ugid(kn_info); + if (ret) + goto out_destroy; + + kernfs_activate(kn_info); + + return 0; + +out_destroy: + kernfs_remove(kn_info); + return ret; +} + +static int +mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp, + char *name, struct kernfs_node **dest_kn) +{ + struct kernfs_node *kn; + int ret; + + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + if (dest_kn) + *dest_kn = kn; + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + kernfs_activate(kn); + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +static inline bool is_mba_linear(void) +{ + return resctrl_arch_get_resource(RDT_RESOURCE_MBA)->membw.delay_linear; +} + +static int mba_sc_domain_allocate(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 num_closid = resctrl_arch_get_num_closid(r); + int cpu = cpumask_any(&d->cpu_mask); + int i; + + d->mbps_val = kcalloc_node(num_closid, sizeof(*d->mbps_val), + GFP_KERNEL, cpu_to_node(cpu)); + if (!d->mbps_val) + return -ENOMEM; + + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + + return 0; +} + +static void mba_sc_domain_destroy(struct rdt_resource *r, + struct rdt_domain *d) +{ + kfree(d->mbps_val); + d->mbps_val = NULL; +} + +/* + * MBA software controller is supported only if + * MBM is supported and MBA is in linear scale. + */ +static bool supports_mba_mbps(void) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + + return (resctrl_arch_is_mbm_local_enabled() && + r->alloc_capable && is_mba_linear()); +} + +/* + * Enable or disable the MBA software controller + * which helps user specify bandwidth in MBps. + */ +static int set_mba_sc(bool mba_sc) +{ + struct rdt_resource *r = resctrl_arch_get_resource(RDT_RESOURCE_MBA); + u32 num_closid = resctrl_arch_get_num_closid(r); + struct rdt_domain *d; + int i; + + if (!supports_mba_mbps() || mba_sc == is_mba_sc(r)) + return -EINVAL; + + r->membw.mba_sc = mba_sc; + + list_for_each_entry(d, &r->domains, list) { + for (i = 0; i < num_closid; i++) + d->mbps_val[i] = MBA_MAX_MBPS; + } + + return 0; +} + +/* + * We don't allow rdtgroup directories to be created anywhere + * except the root directory. Thus when looking for the rdtgroup + * structure for a kernfs node we are either looking at a directory, + * in which case the rdtgroup structure is pointed at by the "priv" + * field, otherwise we have a file, and need only look to the parent + * to find the rdtgroup. + */ +static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn) +{ + if (kernfs_type(kn) == KERNFS_DIR) { + /* + * All the resource directories use "kn->priv" + * to point to the "struct rdtgroup" for the + * resource. "info" and its subdirectories don't + * have rdtgroup structures, so return NULL here. + */ + if (kn == kn_info || kn->parent == kn_info) + return NULL; + else + return kn->priv; + } else { + return kn->parent->priv; + } +} + +static void rdtgroup_kn_get(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + atomic_inc(&rdtgrp->waitcount); + kernfs_break_active_protection(kn); +} + +static void rdtgroup_kn_put(struct rdtgroup *rdtgrp, struct kernfs_node *kn) +{ + if (atomic_dec_and_test(&rdtgrp->waitcount) && + (rdtgrp->flags & RDT_DELETED)) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + kernfs_unbreak_active_protection(kn); + rdtgroup_remove(rdtgrp); + } else { + kernfs_unbreak_active_protection(kn); + } +} + +struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return NULL; + + rdtgroup_kn_get(rdtgrp, kn); + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + /* Was this group deleted while we waited? */ + if (rdtgrp->flags & RDT_DELETED) + return NULL; + + return rdtgrp; +} + +void rdtgroup_kn_unlock(struct kernfs_node *kn) +{ + struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn); + + if (!rdtgrp) + return; + + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + + rdtgroup_kn_put(rdtgrp, kn); +} + +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **mon_data_kn); + +static void rdt_disable_ctx(void) +{ + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); + set_mba_sc(false); + + resctrl_debug = false; +} + +static int rdt_enable_ctx(struct rdt_fs_context *ctx) +{ + int ret = 0; + + if (ctx->enable_cdpl2) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true); + if (ret) + goto out_done; + } + + if (ctx->enable_cdpl3) { + ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true); + if (ret) + goto out_cdpl2; + } + + if (ctx->enable_mba_mbps) { + ret = set_mba_sc(true); + if (ret) + goto out_cdpl3; + } + + if (ctx->enable_debug) + resctrl_debug = true; + + return 0; + +out_cdpl3: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false); +out_cdpl2: + resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false); +out_done: + return ret; +} + +static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type) +{ + struct resctrl_schema *s; + const char *suffix = ""; + int ret, cl; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + s->res = r; + s->num_closid = resctrl_arch_get_num_closid(r); + if (resctrl_arch_get_cdp_enabled(r->rid)) + s->num_closid /= 2; + + s->conf_type = type; + switch (type) { + case CDP_CODE: + suffix = "CODE"; + break; + case CDP_DATA: + suffix = "DATA"; + break; + case CDP_NONE: + suffix = ""; + break; + } + + ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix); + if (ret >= sizeof(s->name)) { + kfree(s); + return -EINVAL; + } + + cl = strlen(s->name); + + /* + * If CDP is supported by this resource, but not enabled, + * include the suffix. This ensures the tabular format of the + * schemata file does not change between mounts of the filesystem. + */ + if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid)) + cl += 4; + + if (cl > max_name_width) + max_name_width = cl; + + /* + * Choose a width for the resource data based on the resource that has + * widest name and cbm. + */ + max_data_width = max(max_data_width, r->data_width); + + INIT_LIST_HEAD(&s->list); + list_add(&s->list, &resctrl_schema_all); + + return 0; +} + +static int schemata_list_create(void) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + int ret = 0; + + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->alloc_capable) + continue; + + if (resctrl_arch_get_cdp_enabled(r->rid)) { + ret = schemata_list_add(r, CDP_CODE); + if (ret) + break; + + ret = schemata_list_add(r, CDP_DATA); + } else { + ret = schemata_list_add(r, CDP_NONE); + } + + if (ret) + break; + } + + return ret; +} + +static void schemata_list_destroy(void) +{ + struct resctrl_schema *s, *tmp; + + list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) { + list_del(&s->list); + kfree(s); + } +} + +static int rdt_get_tree(struct fs_context *fc) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdt_fs_context *ctx = rdt_fc2context(fc); + unsigned long flags = RFTYPE_CTRL_BASE; + struct rdt_domain *dom; + int ret; + + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + /* + * resctrl file system can only be mounted once. + */ + if (resctrl_mounted) { + ret = -EBUSY; + goto out; + } + + ret = rdtgroup_setup_root(ctx); + if (ret) + goto out; + + ret = rdt_enable_ctx(ctx); + if (ret) + goto out_root; + + ret = schemata_list_create(); + if (ret) { + schemata_list_destroy(); + goto out_ctx; + } + + closid_init(); + + if (resctrl_arch_mon_capable()) + flags |= RFTYPE_MON; + + ret = rdtgroup_add_files(rdtgroup_default.kn, flags); + if (ret) + goto out_schemata_free; + + kernfs_activate(rdtgroup_default.kn); + + ret = rdtgroup_create_info_dir(rdtgroup_default.kn); + if (ret < 0) + goto out_schemata_free; + + if (resctrl_arch_mon_capable()) { + ret = mongroup_create_dir(rdtgroup_default.kn, + &rdtgroup_default, "mon_groups", + &kn_mongrp); + if (ret < 0) + goto out_info; + + ret = mkdir_mondata_all(rdtgroup_default.kn, + &rdtgroup_default, &kn_mondata); + if (ret < 0) + goto out_mongrp; + rdtgroup_default.mon.mon_data_kn = kn_mondata; + } + + ret = rdt_pseudo_lock_init(); + if (ret) + goto out_mondata; + + ret = kernfs_get_tree(fc); + if (ret < 0) + goto out_psl; + + if (resctrl_arch_alloc_capable()) + resctrl_arch_enable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_enable_mon(); + + if (resctrl_arch_alloc_capable() || resctrl_arch_mon_capable()) + resctrl_mounted = true; + + if (resctrl_is_mbm_enabled()) { + list_for_each_entry(dom, &l3->domains, list) + mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + goto out; + +out_psl: + rdt_pseudo_lock_release(); +out_mondata: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mondata); +out_mongrp: + if (resctrl_arch_mon_capable()) + kernfs_remove(kn_mongrp); +out_info: + kernfs_remove(kn_info); +out_schemata_free: + schemata_list_destroy(); +out_ctx: + rdt_disable_ctx(); +out_root: + rdtgroup_destroy_root(); +out: + rdt_last_cmd_clear(); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); + return ret; +} + +enum rdt_param { + Opt_cdp, + Opt_cdpl2, + Opt_mba_mbps, + Opt_debug, + nr__rdt_params +}; + +static const struct fs_parameter_spec rdt_fs_parameters[] = { + fsparam_flag("cdp", Opt_cdp), + fsparam_flag("cdpl2", Opt_cdpl2), + fsparam_flag("mba_MBps", Opt_mba_mbps), + fsparam_flag("debug", Opt_debug), + {} +}; + +static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + struct fs_parse_result result; + int opt; + + opt = fs_parse(fc, rdt_fs_parameters, param, &result); + if (opt < 0) + return opt; + + switch (opt) { + case Opt_cdp: + ctx->enable_cdpl3 = true; + return 0; + case Opt_cdpl2: + ctx->enable_cdpl2 = true; + return 0; + case Opt_mba_mbps: + if (!supports_mba_mbps()) + return -EINVAL; + ctx->enable_mba_mbps = true; + return 0; + case Opt_debug: + ctx->enable_debug = true; + return 0; + } + + return -EINVAL; +} + +static void rdt_fs_context_free(struct fs_context *fc) +{ + struct rdt_fs_context *ctx = rdt_fc2context(fc); + + kernfs_free_fs_context(fc); + kfree(ctx); +} + +static const struct fs_context_operations rdt_fs_context_ops = { + .free = rdt_fs_context_free, + .parse_param = rdt_parse_param, + .get_tree = rdt_get_tree, +}; + +static int rdt_init_fs_context(struct fs_context *fc) +{ + struct rdt_fs_context *ctx; + + ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->kfc.magic = RDTGROUP_SUPER_MAGIC; + fc->fs_private = &ctx->kfc; + fc->ops = &rdt_fs_context_ops; + put_user_ns(fc->user_ns); + fc->user_ns = get_user_ns(&init_user_ns); + fc->global = true; + return 0; +} + +/* + * Move tasks from one to the other group. If @from is NULL, then all tasks + * in the systems are moved unconditionally (used for teardown). + * + * If @mask is not NULL the cpus on which moved tasks are running are set + * in that mask so the update smp function call is restricted to affected + * cpus. + */ +static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to, + struct cpumask *mask) +{ + struct task_struct *p, *t; + + read_lock(&tasklist_lock); + for_each_process_thread(p, t) { + if (!from || is_closid_match(t, from) || + is_rmid_match(t, from)) { + resctrl_arch_set_closid_rmid(t, to->closid, + to->mon.rmid); + + /* + * Order the closid/rmid stores above before the loads + * in task_curr(). This pairs with the full barrier + * between the rq->curr update and + * resctrl_arch_sched_in() during context switch. + */ + smp_mb(); + + /* + * If the task is on a CPU, set the CPU in the mask. + * The detection is inaccurate as tasks might move or + * schedule before the smp function call takes place. + * In such a case the function call is pointless, but + * there is no other side effect. + */ + if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t)) + cpumask_set_cpu(task_cpu(t), mask); + } + } + read_unlock(&tasklist_lock); +} + +static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp) +{ + struct rdtgroup *sentry, *stmp; + struct list_head *head; + + head = &rdtgrp->mon.crdtgrp_list; + list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) { + free_rmid(sentry->closid, sentry->mon.rmid); + list_del(&sentry->mon.crdtgrp_list); + + if (atomic_read(&sentry->waitcount) != 0) + sentry->flags = RDT_DELETED; + else + rdtgroup_remove(sentry); + } +} + +/* + * Forcibly remove all of subdirectories under root. + */ +static void rmdir_all_sub(void) +{ + struct rdtgroup *rdtgrp, *tmp; + + /* Move all tasks to the default resource group */ + rdt_move_group_tasks(NULL, &rdtgroup_default, NULL); + + list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) { + /* Free any child rmids */ + free_all_child_rdtgrp(rdtgrp); + + /* Remove each rdtgroup other than root */ + if (rdtgrp == &rdtgroup_default) + continue; + + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) + rdtgroup_pseudo_lock_remove(rdtgrp); + + /* + * Give any CPUs back to the default group. We cannot copy + * cpu_online_mask because a CPU might have executed the + * offline callback already, but is still marked online. + */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + kernfs_remove(rdtgrp->kn); + list_del(&rdtgrp->rdtgroup_list); + + if (atomic_read(&rdtgrp->waitcount) != 0) + rdtgrp->flags = RDT_DELETED; + else + rdtgroup_remove(rdtgrp); + } + /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */ + update_closid_rmid(cpu_online_mask, &rdtgroup_default); + + kernfs_remove(kn_info); + kernfs_remove(kn_mongrp); + kernfs_remove(kn_mondata); +} + +static void rdt_kill_sb(struct super_block *sb) +{ + cpus_read_lock(); + mutex_lock(&rdtgroup_mutex); + + rdt_disable_ctx(); + + /* Put everything back to default values. */ + resctrl_arch_reset_resources(); + + rmdir_all_sub(); + rdt_pseudo_lock_release(); + rdtgroup_default.mode = RDT_MODE_SHAREABLE; + schemata_list_destroy(); + rdtgroup_destroy_root(); + if (resctrl_arch_alloc_capable()) + resctrl_arch_disable_alloc(); + if (resctrl_arch_mon_capable()) + resctrl_arch_disable_mon(); + resctrl_mounted = false; + kernfs_kill_sb(sb); + mutex_unlock(&rdtgroup_mutex); + cpus_read_unlock(); +} + +static struct file_system_type rdt_fs_type = { + .name = "resctrl", + .init_fs_context = rdt_init_fs_context, + .parameters = rdt_fs_parameters, + .kill_sb = rdt_kill_sb, +}; + +static int mon_addfile(struct kernfs_node *parent_kn, const char *name, + void *priv) +{ + struct kernfs_node *kn; + int ret = 0; + + kn = __kernfs_create_file(parent_kn, name, 0444, + GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0, + &kf_mondata_ops, priv, NULL, NULL); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + kernfs_remove(kn); + return ret; + } + + return ret; +} + +/* + * Remove all subdirectories of mon_data of ctrl_mon groups + * and monitor groups with given domain id. + */ +static void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + unsigned int dom_id) +{ + struct rdtgroup *prgrp, *crgrp; + char name[32]; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + sprintf(name, "mon_%s_%02d", r->name, dom_id); + kernfs_remove_by_name(prgrp->mon.mon_data_kn, name); + + list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list) + kernfs_remove_by_name(crgrp->mon.mon_data_kn, name); + } +} + +static int mkdir_mondata_subdir(struct kernfs_node *parent_kn, + struct rdt_domain *d, + struct rdt_resource *r, struct rdtgroup *prgrp) +{ + union mon_data_bits priv; + struct kernfs_node *kn; + struct mon_evt *mevt; + struct rmid_read rr; + char name[32]; + int ret; + + sprintf(name, "mon_%s_%02d", r->name, d->id); + /* create the directory */ + kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp); + if (IS_ERR(kn)) + return PTR_ERR(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) + goto out_destroy; + + if (WARN_ON(list_empty(&r->evt_list))) { + ret = -EPERM; + goto out_destroy; + } + + priv.u.rid = r->rid; + priv.u.domid = d->id; + list_for_each_entry(mevt, &r->evt_list, list) { + priv.u.evtid = mevt->evtid; + ret = mon_addfile(kn, mevt->name, priv.priv); + if (ret) + goto out_destroy; + + if (resctrl_is_mbm_event(mevt->evtid)) + mon_event_read(&rr, r, d, prgrp, mevt->evtid, true); + } + kernfs_activate(kn); + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/* + * Add all subdirectories of mon_data for "ctrl_mon" groups + * and "monitor" groups with given domain id. + */ +static void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, + struct rdt_domain *d) +{ + struct kernfs_node *parent_kn; + struct rdtgroup *prgrp, *crgrp; + struct list_head *head; + + list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) { + parent_kn = prgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, prgrp); + + head = &prgrp->mon.crdtgrp_list; + list_for_each_entry(crgrp, head, mon.crdtgrp_list) { + parent_kn = crgrp->mon.mon_data_kn; + mkdir_mondata_subdir(parent_kn, d, r, crgrp); + } + } +} + +static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn, + struct rdt_resource *r, + struct rdtgroup *prgrp) +{ + struct rdt_domain *dom; + int ret; + + /* Walking r->domains, ensure it can't race with cpuhp */ + lockdep_assert_cpus_held(); + + list_for_each_entry(dom, &r->domains, list) { + ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp); + if (ret) + return ret; + } + + return 0; +} + +/* + * This creates a directory mon_data which contains the monitored data. + * + * mon_data has one directory for each domain which are named + * in the format mon__. For ex: A mon_data + * with L3 domain looks as below: + * ./mon_data: + * mon_L3_00 + * mon_L3_01 + * mon_L3_02 + * ... + * + * Each domain directory has one file per event: + * ./mon_L3_00/: + * llc_occupancy + * + */ +static int mkdir_mondata_all(struct kernfs_node *parent_kn, + struct rdtgroup *prgrp, + struct kernfs_node **dest_kn) +{ + enum resctrl_res_level i; + struct rdt_resource *r; + struct kernfs_node *kn; + int ret; + + /* + * Create the mon_data directory first. + */ + ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn); + if (ret) + return ret; + + if (dest_kn) + *dest_kn = kn; + + /* + * Create the subdirectories for each domain. Note that all events + * in a domain like L3 are grouped into a resource whose domain is L3 + */ + for (i = 0; i < RDT_NUM_RESOURCES; i++) { + r = resctrl_arch_get_resource(i); + if (!r->mon_capable) + continue; + + ret = mkdir_mondata_subdir_alldom(kn, r, prgrp); + if (ret) + goto out_destroy; + } + + return 0; + +out_destroy: + kernfs_remove(kn); + return ret; +} + +/** + * cbm_ensure_valid - Enforce validity on provided CBM + * @_val: Candidate CBM + * @r: RDT resource to which the CBM belongs + * + * The provided CBM represents all cache portions available for use. This + * may be represented by a bitmap that does not consist of contiguous ones + * and thus be an invalid CBM. + * Here the provided CBM is forced to be a valid CBM by only considering + * the first set of contiguous bits as valid and clearing all bits. + * The intention here is to provide a valid default CBM with which a new + * resource group is initialized. The user can follow this with a + * modification to the CBM if the default does not satisfy the + * requirements. + */ +static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r) +{ + unsigned int cbm_len = r->cache.cbm_len; + unsigned long first_bit, zero_bit; + unsigned long val = _val; + + if (!val) + return 0; + + first_bit = find_first_bit(&val, cbm_len); + zero_bit = find_next_zero_bit(&val, cbm_len, first_bit); + + /* Clear any remaining bits to ensure contiguous region */ + bitmap_clear(&val, zero_bit, cbm_len - zero_bit); + return (u32)val; +} + +/* + * Initialize cache resources per RDT domain + * + * Set the RDT domain up to start off with all usable allocations. That is, + * all shareable and unused bits. All-zero CBM is invalid. + */ +static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s, + u32 closid) +{ + enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type); + enum resctrl_conf_type t = s->conf_type; + struct resctrl_staged_config *cfg; + struct rdt_resource *r = s->res; + u32 used_b = 0, unused_b = 0; + unsigned long tmp_cbm; + enum rdtgrp_mode mode; + u32 peer_ctl, ctrl_val; + int i; + + cfg = &d->staged_config[t]; + cfg->have_new_ctrl = false; + cfg->new_ctrl = r->cache.shareable_bits; + used_b = r->cache.shareable_bits; + for (i = 0; i < closids_supported(); i++) { + if (closid_allocated(i) && i != closid) { + mode = rdtgroup_mode_by_closid(i); + if (mode == RDT_MODE_PSEUDO_LOCKSETUP) + /* + * ctrl values for locksetup aren't relevant + * until the schemata is written, and the mode + * becomes RDT_MODE_PSEUDO_LOCKED. + */ + continue; + /* + * If CDP is active include peer domain's + * usage to ensure there is no overlap + * with an exclusive group. + */ + if (resctrl_arch_get_cdp_enabled(r->rid)) + peer_ctl = resctrl_arch_get_config(r, d, i, + peer_type); + else + peer_ctl = 0; + ctrl_val = resctrl_arch_get_config(r, d, i, + s->conf_type); + used_b |= ctrl_val | peer_ctl; + if (mode == RDT_MODE_SHAREABLE) + cfg->new_ctrl |= ctrl_val | peer_ctl; + } + } + if (d->plr && d->plr->cbm > 0) + used_b |= d->plr->cbm; + unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1); + unused_b &= BIT_MASK(r->cache.cbm_len) - 1; + cfg->new_ctrl |= unused_b; + /* + * Force the initial CBM to be valid, user can + * modify the CBM based on system availability. + */ + cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r); + /* + * Assign the u32 CBM to an unsigned long to ensure that + * bitmap_weight() does not access out-of-bound memory. + */ + tmp_cbm = cfg->new_ctrl; + if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) { + rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id); + return -ENOSPC; + } + cfg->have_new_ctrl = true; + + return 0; +} + +/* + * Initialize cache resources with default values. + * + * A new RDT group is being created on an allocation capable (CAT) + * supporting system. Set this group up to start off with all usable + * allocations. + * + * If there are no more shareable bits available on any domain then + * the entire allocation will fail. + */ +static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid) +{ + struct rdt_domain *d; + int ret; + + list_for_each_entry(d, &s->res->domains, list) { + ret = __init_one_rdt_domain(d, s, closid); + if (ret < 0) + return ret; + } + + return 0; +} + +/* Initialize MBA resource with default values. */ +static void rdtgroup_init_mba(struct rdt_resource *r, u32 closid) +{ + struct resctrl_staged_config *cfg; + struct rdt_domain *d; + + list_for_each_entry(d, &r->domains, list) { + if (is_mba_sc(r)) { + d->mbps_val[closid] = MBA_MAX_MBPS; + continue; + } + + cfg = &d->staged_config[CDP_NONE]; + cfg->new_ctrl = r->default_ctrl; + cfg->have_new_ctrl = true; + } +} + +/* Initialize the RDT group's allocations. */ +static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp) +{ + struct resctrl_schema *s; + struct rdt_resource *r; + int ret = 0; + + rdt_staged_configs_clear(); + + list_for_each_entry(s, &resctrl_schema_all, list) { + r = s->res; + if (r->rid == RDT_RESOURCE_MBA || + r->rid == RDT_RESOURCE_SMBA) { + rdtgroup_init_mba(r, rdtgrp->closid); + if (is_mba_sc(r)) + continue; + } else { + ret = rdtgroup_init_cat(s, rdtgrp->closid); + if (ret < 0) + goto out; + } + + ret = resctrl_arch_update_domains(r, rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Failed to initialize allocations\n"); + goto out; + } + + } + + rdtgrp->mode = RDT_MODE_SHAREABLE; + +out: + rdt_staged_configs_clear(); + return ret; +} + +static int mkdir_rdt_prepare_rmid_alloc(struct rdtgroup *rdtgrp) +{ + int ret; + + if (!resctrl_arch_mon_capable()) + return 0; + + ret = alloc_rmid(rdtgrp->closid); + if (ret < 0) { + rdt_last_cmd_puts("Out of RMIDs\n"); + return ret; + } + rdtgrp->mon.rmid = ret; + + ret = mkdir_mondata_all(rdtgrp->kn, rdtgrp, &rdtgrp->mon.mon_data_kn); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + return ret; + } + + return 0; +} + +static void mkdir_rdt_prepare_rmid_free(struct rdtgroup *rgrp) +{ + if (resctrl_arch_mon_capable()) + free_rmid(rgrp->closid, rgrp->mon.rmid); +} + +static int mkdir_rdt_prepare(struct kernfs_node *parent_kn, + const char *name, umode_t mode, + enum rdt_group_type rtype, struct rdtgroup **r) +{ + struct rdtgroup *prdtgrp, *rdtgrp; + unsigned long files = 0; + struct kernfs_node *kn; + int ret; + + prdtgrp = rdtgroup_kn_lock_live(parent_kn); + if (!prdtgrp) { + ret = -ENODEV; + goto out_unlock; + } + + if (rtype == RDTMON_GROUP && + (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) { + ret = -EINVAL; + rdt_last_cmd_puts("Pseudo-locking in progress\n"); + goto out_unlock; + } + + /* allocate the rdtgroup. */ + rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL); + if (!rdtgrp) { + ret = -ENOSPC; + rdt_last_cmd_puts("Kernel out of memory\n"); + goto out_unlock; + } + *r = rdtgrp; + rdtgrp->mon.parent = prdtgrp; + rdtgrp->type = rtype; + INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list); + + /* kernfs creates the directory for rdtgrp */ + kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp); + if (IS_ERR(kn)) { + ret = PTR_ERR(kn); + rdt_last_cmd_puts("kernfs create error\n"); + goto out_free_rgrp; + } + rdtgrp->kn = kn; + + /* + * kernfs_remove() will drop the reference count on "kn" which + * will free it. But we still need it to stick around for the + * rdtgroup_kn_unlock(kn) call. Take one extra reference here, + * which will be dropped by kernfs_put() in rdtgroup_remove(). + */ + kernfs_get(kn); + + ret = rdtgroup_kn_set_ugid(kn); + if (ret) { + rdt_last_cmd_puts("kernfs perm error\n"); + goto out_destroy; + } + + if (rtype == RDTCTRL_GROUP) { + files = RFTYPE_BASE | RFTYPE_CTRL; + if (resctrl_arch_mon_capable()) + files |= RFTYPE_MON; + } else { + files = RFTYPE_BASE | RFTYPE_MON; + } + + ret = rdtgroup_add_files(kn, files); + if (ret) { + rdt_last_cmd_puts("kernfs fill error\n"); + goto out_destroy; + } + + /* + * The caller unlocks the parent_kn upon success. + */ + return 0; + +out_destroy: + kernfs_put(rdtgrp->kn); + kernfs_remove(rdtgrp->kn); +out_free_rgrp: + kfree(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp) +{ + kernfs_remove(rgrp->kn); + rdtgroup_remove(rgrp); +} + +/* + * Create a monitor group under "mon_groups" directory of a control + * and monitor group(ctrl_mon). This is a resource group + * to monitor a subset of tasks and cpus in its parent ctrl_mon group. + */ +static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp, *prgrp; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp); + if (ret) + return ret; + + prgrp = rdtgrp->mon.parent; + rdtgrp->closid = prgrp->closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) { + mkdir_rdt_prepare_clean(rdtgrp); + goto out_unlock; + } + + kernfs_activate(rdtgrp->kn); + + /* + * Add the rdtgrp to the list of rdtgrps the parent + * ctrl_mon group has to track. + */ + list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list); + +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * These are rdtgroups created under the root directory. Can be used + * to allocate and monitor resources. + */ +static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, + const char *name, umode_t mode) +{ + struct rdtgroup *rdtgrp; + struct kernfs_node *kn; + u32 closid; + int ret; + + ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp); + if (ret) + return ret; + + kn = rdtgrp->kn; + ret = closid_alloc(); + if (ret < 0) { + rdt_last_cmd_puts("Out of CLOSIDs\n"); + goto out_common_fail; + } + closid = ret; + ret = 0; + + rdtgrp->closid = closid; + + ret = mkdir_rdt_prepare_rmid_alloc(rdtgrp); + if (ret) + goto out_closid_free; + + kernfs_activate(rdtgrp->kn); + + ret = rdtgroup_init_alloc(rdtgrp); + if (ret < 0) + goto out_rmid_free; + + list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); + + if (resctrl_arch_mon_capable()) { + /* + * Create an empty mon_groups directory to hold the subset + * of tasks and cpus to monitor. + */ + ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL); + if (ret) { + rdt_last_cmd_puts("kernfs subdir error\n"); + goto out_del_list; + } + } + + goto out_unlock; + +out_del_list: + list_del(&rdtgrp->rdtgroup_list); +out_rmid_free: + mkdir_rdt_prepare_rmid_free(rdtgrp); +out_closid_free: + closid_free(closid); +out_common_fail: + mkdir_rdt_prepare_clean(rdtgrp); +out_unlock: + rdtgroup_kn_unlock(parent_kn); + return ret; +} + +/* + * We allow creating mon groups only with in a directory called "mon_groups" + * which is present in every ctrl_mon group. Check if this is a valid + * "mon_groups" directory. + * + * 1. The directory should be named "mon_groups". + * 2. The mon group itself should "not" be named "mon_groups". + * This makes sure "mon_groups" directory always has a ctrl_mon group + * as parent. + */ +static bool is_mon_groups(struct kernfs_node *kn, const char *name) +{ + return (!strcmp(kn->name, "mon_groups") && + strcmp(name, "mon_groups")); +} + +static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name, + umode_t mode) +{ + /* Do not accept '\n' to avoid unparsable situation. */ + if (strchr(name, '\n')) + return -EINVAL; + + /* + * If the parent directory is the root directory and RDT + * allocation is supported, add a control and monitoring + * subdirectory + */ + if (resctrl_arch_alloc_capable() && parent_kn == rdtgroup_default.kn) + return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode); + + /* + * If RDT monitoring is supported and the parent directory is a valid + * "mon_groups" directory, add a monitoring subdirectory. + */ + if (resctrl_arch_mon_capable() && is_mon_groups(parent_kn, name)) + return rdtgroup_mkdir_mon(parent_kn, name, mode); + + return -EPERM; +} + +static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the parent group */ + rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask); + + /* Update per cpu rmid of the moved CPUs first */ + closid = rdtgrp->closid; + rmid = prdtgrp->mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + rdtgrp->flags = RDT_DELETED; + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + + /* + * Remove the rdtgrp from the parent ctrl_mon group's list + */ + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_del(&rdtgrp->mon.crdtgrp_list); + + kernfs_remove(rdtgrp->kn); + + return 0; +} + +static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp) +{ + rdtgrp->flags = RDT_DELETED; + list_del(&rdtgrp->rdtgroup_list); + + kernfs_remove(rdtgrp->kn); + return 0; +} + +static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask) +{ + u32 closid, rmid; + int cpu; + + /* Give any tasks back to the default group */ + rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask); + + /* Give any CPUs back to the default group */ + cpumask_or(&rdtgroup_default.cpu_mask, + &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask); + + /* Update per cpu closid and rmid of the moved CPUs first */ + closid = rdtgroup_default.closid; + rmid = rdtgroup_default.mon.rmid; + for_each_cpu(cpu, &rdtgrp->cpu_mask) + resctrl_arch_set_cpu_default_closid_rmid(cpu, closid, rmid); + + /* + * Update the MSR on moved CPUs and CPUs which have moved + * task running on them. + */ + cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask); + update_closid_rmid(tmpmask, NULL); + + free_rmid(rdtgrp->closid, rdtgrp->mon.rmid); + closid_free(rdtgrp->closid); + + rdtgroup_ctrl_remove(rdtgrp); + + /* + * Free all the child monitor group rmids. + */ + free_all_child_rdtgrp(rdtgrp); + + return 0; +} + +static int rdtgroup_rmdir(struct kernfs_node *kn) +{ + struct kernfs_node *parent_kn = kn->parent; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret = 0; + + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) + return -ENOMEM; + + rdtgrp = rdtgroup_kn_lock_live(kn); + if (!rdtgrp) { + ret = -EPERM; + goto out; + } + + /* + * If the rdtgroup is a ctrl_mon group and parent directory + * is the root directory, remove the ctrl_mon group. + * + * If the rdtgroup is a mon group and parent directory + * is a valid "mon_groups" directory, remove the mon group. + */ + if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn && + rdtgrp != &rdtgroup_default) { + if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP || + rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) { + ret = rdtgroup_ctrl_remove(rdtgrp); + } else { + ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask); + } + } else if (rdtgrp->type == RDTMON_GROUP && + is_mon_groups(parent_kn, kn->name)) { + ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask); + } else { + ret = -EPERM; + } + +out: + rdtgroup_kn_unlock(kn); + free_cpumask_var(tmpmask); + return ret; +} + +/** + * mongrp_reparent() - replace parent CTRL_MON group of a MON group + * @rdtgrp: the MON group whose parent should be replaced + * @new_prdtgrp: replacement parent CTRL_MON group for @rdtgrp + * @cpus: cpumask provided by the caller for use during this call + * + * Replaces the parent CTRL_MON group for a MON group, resulting in all member + * tasks' CLOSID immediately changing to that of the new parent group. + * Monitoring data for the group is unaffected by this operation. + */ +static void mongrp_reparent(struct rdtgroup *rdtgrp, + struct rdtgroup *new_prdtgrp, + cpumask_var_t cpus) +{ + struct rdtgroup *prdtgrp = rdtgrp->mon.parent; + + WARN_ON(rdtgrp->type != RDTMON_GROUP); + WARN_ON(new_prdtgrp->type != RDTCTRL_GROUP); + + /* Nothing to do when simply renaming a MON group. */ + if (prdtgrp == new_prdtgrp) + return; + + WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list)); + list_move_tail(&rdtgrp->mon.crdtgrp_list, + &new_prdtgrp->mon.crdtgrp_list); + + rdtgrp->mon.parent = new_prdtgrp; + rdtgrp->closid = new_prdtgrp->closid; + + /* Propagate updated closid to all tasks in this group. */ + rdt_move_group_tasks(rdtgrp, rdtgrp, cpus); + + update_closid_rmid(cpus, NULL); +} + +static int rdtgroup_rename(struct kernfs_node *kn, + struct kernfs_node *new_parent, const char *new_name) +{ + struct rdtgroup *new_prdtgrp; + struct rdtgroup *rdtgrp; + cpumask_var_t tmpmask; + int ret; + + rdtgrp = kernfs_to_rdtgroup(kn); + new_prdtgrp = kernfs_to_rdtgroup(new_parent); + if (!rdtgrp || !new_prdtgrp) + return -ENOENT; + + /* Release both kernfs active_refs before obtaining rdtgroup mutex. */ + rdtgroup_kn_get(rdtgrp, kn); + rdtgroup_kn_get(new_prdtgrp, new_parent); + + mutex_lock(&rdtgroup_mutex); + + rdt_last_cmd_clear(); + + /* + * Don't allow kernfs_to_rdtgroup() to return a parent rdtgroup if + * either kernfs_node is a file. + */ + if (kernfs_type(kn) != KERNFS_DIR || + kernfs_type(new_parent) != KERNFS_DIR) { + rdt_last_cmd_puts("Source and destination must be directories"); + ret = -EPERM; + goto out; + } + + if ((rdtgrp->flags & RDT_DELETED) || (new_prdtgrp->flags & RDT_DELETED)) { + ret = -ENOENT; + goto out; + } + + if (rdtgrp->type != RDTMON_GROUP || !kn->parent || + !is_mon_groups(kn->parent, kn->name)) { + rdt_last_cmd_puts("Source must be a MON group\n"); + ret = -EPERM; + goto out; + } + + if (!is_mon_groups(new_parent, new_name)) { + rdt_last_cmd_puts("Destination must be a mon_groups subdirectory\n"); + ret = -EPERM; + goto out; + } + + /* + * If the MON group is monitoring CPUs, the CPUs must be assigned to the + * current parent CTRL_MON group and therefore cannot be assigned to + * the new parent, making the move illegal. + */ + if (!cpumask_empty(&rdtgrp->cpu_mask) && + rdtgrp->mon.parent != new_prdtgrp) { + rdt_last_cmd_puts("Cannot move a MON group that monitors CPUs\n"); + ret = -EPERM; + goto out; + } + + /* + * Allocate the cpumask for use in mongrp_reparent() to avoid the + * possibility of failing to allocate it after kernfs_rename() has + * succeeded. + */ + if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + /* + * Perform all input validation and allocations needed to ensure + * mongrp_reparent() will succeed before calling kernfs_rename(), + * otherwise it would be necessary to revert this call if + * mongrp_reparent() failed. + */ + ret = kernfs_rename(kn, new_parent, new_name); + if (!ret) + mongrp_reparent(rdtgrp, new_prdtgrp, tmpmask); + + free_cpumask_var(tmpmask); + +out: + mutex_unlock(&rdtgroup_mutex); + rdtgroup_kn_put(rdtgrp, kn); + rdtgroup_kn_put(new_prdtgrp, new_parent); + return ret; +} + +static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf) +{ + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3)) + seq_puts(seq, ",cdp"); + + if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2)) + seq_puts(seq, ",cdpl2"); + + if (is_mba_sc(resctrl_arch_get_resource(RDT_RESOURCE_MBA))) + seq_puts(seq, ",mba_MBps"); + + if (resctrl_debug) + seq_puts(seq, ",debug"); + + return 0; +} + +static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = { + .mkdir = rdtgroup_mkdir, + .rmdir = rdtgroup_rmdir, + .rename = rdtgroup_rename, + .show_options = rdtgroup_show_options, +}; + +static int rdtgroup_setup_root(struct rdt_fs_context *ctx) +{ + rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops, + KERNFS_ROOT_CREATE_DEACTIVATED | + KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK, + &rdtgroup_default); + if (IS_ERR(rdt_root)) + return PTR_ERR(rdt_root); + + ctx->kfc.root = rdt_root; + rdtgroup_default.kn = kernfs_root_to_node(rdt_root); + + return 0; +} + +static void rdtgroup_destroy_root(void) +{ + kernfs_destroy_root(rdt_root); + rdtgroup_default.kn = NULL; +} + +static void rdtgroup_setup_default(void) +{ + mutex_lock(&rdtgroup_mutex); + + rdtgroup_default.closid = RESCTRL_RESERVED_CLOSID; + rdtgroup_default.mon.rmid = RESCTRL_RESERVED_RMID; + rdtgroup_default.type = RDTCTRL_GROUP; + INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list); + + list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups); + + mutex_unlock(&rdtgroup_mutex); +} + +static void domain_destroy_mon_state(struct rdt_domain *d) +{ + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + kfree(d->mbm_local); +} + +void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) + mba_sc_domain_destroy(r, d); + + if (!r->mon_capable) + goto out_unlock; + + /* + * If resctrl is mounted, remove all the + * per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + rmdir_mondata_subdir_allrdtgrp(r, d->id); + + if (resctrl_is_mbm_enabled()) + cancel_delayed_work(&d->mbm_over); + if (resctrl_arch_is_llc_occupancy_enabled() && has_busy_rmid(d)) { + /* + * When a package is going down, forcefully + * decrement rmid->ebusy. There is no way to know + * that the L3 was flushed and hence may lead to + * incorrect counts in rare scenarios, but leaving + * the RMID as busy creates RMID leaks if the + * package never comes back. + */ + __check_limbo(d, true); + cancel_delayed_work(&d->cqm_limbo); + } + + domain_destroy_mon_state(d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d) +{ + u32 idx_limit = resctrl_arch_system_num_rmid_idx(); + size_t tsize; + + if (resctrl_arch_is_llc_occupancy_enabled()) { + d->rmid_busy_llc = bitmap_zalloc(idx_limit, GFP_KERNEL); + if (!d->rmid_busy_llc) + return -ENOMEM; + } + if (resctrl_arch_is_mbm_total_enabled()) { + tsize = sizeof(*d->mbm_total); + d->mbm_total = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_total) { + bitmap_free(d->rmid_busy_llc); + return -ENOMEM; + } + } + if (resctrl_arch_is_mbm_local_enabled()) { + tsize = sizeof(*d->mbm_local); + d->mbm_local = kcalloc(idx_limit, tsize, GFP_KERNEL); + if (!d->mbm_local) { + bitmap_free(d->rmid_busy_llc); + kfree(d->mbm_total); + return -ENOMEM; + } + } + + return 0; +} + +int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d) +{ + int err = 0; + + mutex_lock(&rdtgroup_mutex); + + if (supports_mba_mbps() && r->rid == RDT_RESOURCE_MBA) { + /* RDT_RESOURCE_MBA is never mon_capable */ + err = mba_sc_domain_allocate(r, d); + goto out_unlock; + } + + if (!r->mon_capable) + goto out_unlock; + + err = domain_setup_mon_state(r, d); + if (err) + goto out_unlock; + + if (resctrl_is_mbm_enabled()) { + INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow); + mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL, + RESCTRL_PICK_ANY_CPU); + } + + if (resctrl_arch_is_llc_occupancy_enabled()) + INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo); + + /* + * If the filesystem is not mounted then only the default resource group + * exists. Creation of its directories is deferred until mount time + * by rdt_get_tree() calling mkdir_mondata_all(). + * If resctrl is mounted, add per domain monitor data directories. + */ + if (resctrl_mounted && resctrl_arch_mon_capable()) + mkdir_mondata_subdir_allrdtgrp(r, d); + +out_unlock: + mutex_unlock(&rdtgroup_mutex); + + return err; +} + +void resctrl_online_cpu(unsigned int cpu) +{ + mutex_lock(&rdtgroup_mutex); + /* The CPU is set in default rdtgroup after online. */ + cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask); + mutex_unlock(&rdtgroup_mutex); +} + +static void clear_childcpus(struct rdtgroup *r, unsigned int cpu) +{ + struct rdtgroup *cr; + + list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) { + if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) + break; + } +} + +void resctrl_offline_cpu(unsigned int cpu) +{ + struct rdt_resource *l3 = resctrl_arch_get_resource(RDT_RESOURCE_L3); + struct rdtgroup *rdtgrp; + struct rdt_domain *d; + + mutex_lock(&rdtgroup_mutex); + list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) { + if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) { + clear_childcpus(rdtgrp, cpu); + break; + } + } + + if (!l3->mon_capable) + goto out_unlock; + + d = resctrl_get_domain_from_cpu(cpu, l3); + if (d) { + if (resctrl_is_mbm_enabled() && cpu == d->mbm_work_cpu) { + cancel_delayed_work(&d->mbm_over); + mbm_setup_overflow_handler(d, 0, cpu); + } + if (resctrl_arch_is_llc_occupancy_enabled() && + cpu == d->cqm_work_cpu && has_busy_rmid(d)) { + cancel_delayed_work(&d->cqm_limbo); + cqm_setup_limbo_handler(d, 0, cpu); + } + } + +out_unlock: + mutex_unlock(&rdtgroup_mutex); +} + +/* + * resctrl_init - resctrl filesystem initialization + * + * Setup resctrl file system including set up root, create mount point, + * register resctrl filesystem, and initialize files under root directory. + * + * Return: 0 on success or -errno + */ +int resctrl_init(void) +{ + int ret = 0; + + seq_buf_init(&last_cmd_status, last_cmd_status_buf, + sizeof(last_cmd_status_buf)); + + rdtgroup_setup_default(); + + thread_throttle_mode_init(); + + ret = resctrl_mon_resource_init(); + if (ret) + return ret; + + ret = sysfs_create_mount_point(fs_kobj, "resctrl"); + if (ret) + return ret; + + ret = register_filesystem(&rdt_fs_type); + if (ret) + goto cleanup_mountpoint; + + /* + * Adding the resctrl debugfs directory here may not be ideal since + * it would let the resctrl debugfs directory appear on the debugfs + * filesystem before the resctrl filesystem is mounted. + * It may also be ok since that would enable debugging of RDT before + * resctrl is mounted. + * The reason why the debugfs directory is created here and not in + * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and + * during the debugfs directory creation also &sb->s_type->i_mutex_key + * (the lockdep class of inode->i_rwsem). Other filesystem + * interactions (eg. SyS_getdents) have the lock ordering: + * &sb->s_type->i_mutex_key --> &mm->mmap_lock + * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex + * is taken, thus creating dependency: + * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause + * issues considering the other two lock dependencies. + * By creating the debugfs directory here we avoid a dependency + * that may cause deadlock (even though file operations cannot + * occur until the filesystem is mounted, but I do not know how to + * tell lockdep that). + */ + debugfs_resctrl = debugfs_create_dir("resctrl", NULL); + + return 0; + +cleanup_mountpoint: + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + return ret; +} + +void resctrl_exit(void) +{ + debugfs_remove_recursive(debugfs_resctrl); + unregister_filesystem(&rdt_fs_type); + sysfs_remove_mount_point(fs_kobj, "resctrl"); + + resctrl_mon_resource_exit(); +} diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h index 3751ae69432f1285533a61db8e0c31d42ed5fa31..8104c262bbae9b6d517841827b127ce22c584861 100644 --- a/include/acpi/actbl2.h +++ b/include/acpi/actbl2.h @@ -897,7 +897,8 @@ enum acpi_madt_type { ACPI_MADT_TYPE_APLIC = 26, ACPI_MADT_TYPE_PLIC = 27, ACPI_MADT_TYPE_RESERVED = 28, /* 28 to 0x7F are reserved */ - ACPI_MADT_TYPE_OEM_RESERVED = 0x80 /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_OEM_RESERVED = 0x80, /* 0x80 to 0xFF are reserved for OEM use */ + ACPI_MADT_TYPE_PHYTIUM_2500 = 128 }; /* diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h index be1dd4c1a917447c1d20d59ef0de7df423758e1d..82eba57ac423a0ee6822d085b9d0cc7509d72c69 100644 --- a/include/acpi/ghes.h +++ b/include/acpi/ghes.h @@ -133,4 +133,104 @@ static inline int ghes_notify_sea(void) { return -ENOENT; } struct notifier_block; extern void ghes_register_report_chain(struct notifier_block *nb); extern void ghes_unregister_report_chain(struct notifier_block *nb); + +#ifdef CONFIG_YITIAN_CPER_RAWDATA +#pragma pack(1) +struct yitian_raw_data_header { + uint32_t signature; /* 'r' 'a' 'w' 'd' */ + uint8_t type; + uint8_t common_reg_nr; + /* one record may have multiple sub-record (up to 6) */ + uint8_t sub_type[6]; +}; + +struct yitian_ras_common_reg { + uint64_t fr; + uint64_t ctrl; + uint64_t status; + uint64_t addr; + uint64_t misc0; + uint64_t misc1; + uint64_t misc2; + uint64_t misc3; +}; + +enum yitian_ras_type { + ERR_TYPE_GENERIC = 0x40, + ERR_TYPE_CORE = 0x41, + ERR_TYPE_GIC = 0x42, + ERR_TYPE_CMN = 0x43, + ERR_TYPE_SMMU = 0x44, + ERR_TYPE_DDR = 0x50, + ERR_TYPE_PCI = 0x60 +}; + +enum cmn_node_type { + NODE_TYPE_DVM = 0x1, + NODE_TYPE_CFG = 0x2, + NODE_TYPE_DTC = 0x3, + NODE_TYPE_HN_I = 0x4, + NODE_TYPE_HN_F = 0x5, + NODE_TYPE_XP = 0x6, + NODE_TYPE_SBSX = 0x7, + NODE_TYPE_MPAM_S = 0x8, + NODE_TYPE_MPAM_NS = 0x9, + NODE_TYPE_RN_I = 0xA, + NODE_TYPE_RN_D = 0xD, + NODE_TYPE_RN_SAM = 0xF, + NODE_TYPE_HN_P = 0x11, + /* Coherent Multichip Link (CML) node types */ + NODE_TYPE_CML_BASE = 0x100, + NODE_TYPE_CXRA = 0x100, + NODE_TYPE_CXHA = 0x101, + NODE_TYPE_CXLA = 0x102, + NODE_TYPE_CCRA = 0x103, + NODE_TYPE_CCHA = 0x104, + NODE_TYPE_CCLA = 0x105, +}; + +struct yitian_ddr_sys_reg { + uint64_t esr; + uint64_t elr; + uint64_t far; + uint64_t scr; + uint64_t sctlr; + uint64_t lr; +}; + +struct yitian_ddr_ecc_data { + uint32_t eccerrcnt; + uint32_t eccstat; + uint32_t adveccstat; + uint32_t eccsymbol; + uint32_t eccerrcntstat; + uint32_t eccerrcnt0; + uint32_t eccerrcnt1; + uint32_t ecccaddr0; + uint32_t ecccaddr1; + uint32_t ecccdata0; + uint32_t ecccdata1; + uint32_t eccuaddr0; + uint32_t eccuaddr1; + uint32_t eccudata0; + uint32_t eccudata1; +}; + +struct yitian_ddr_raw_data { + uint32_t intr; /* interrupt num, valid for interrupt only, for exception intr=0 */ + uint8_t ex_type; /* 1:sync exception 2:interrupt 3:Serror */ + uint8_t el_nr; /* error el, only valid for ex_type==1, 0:el0 1:el1 2:el2 */ + uint8_t err_type; /* 1:ecc 2:CA parity 3:R/W CRC */ + struct yitian_ddr_sys_reg sys_regs; /* Only valid for ex_type==1 */ + struct yitian_ddr_ecc_data ecc_data; /* Only valid for err_type==1 */ +}; + +#pragma pack() + +#define yitian_estatus_for_each_raw_reg_common(header, reg, nr) \ + for (reg = (struct yitian_ras_common_reg *)(header + 1); \ + nr < header->common_reg_nr; \ + reg++, nr++) +#endif /* CONFIG_YITIAN_CPER_RAWDATA */ + #endif /* GHES_H */ diff --git a/include/acpi/pdc_sw64.h b/include/acpi/pdc_sw64.h new file mode 100644 index 0000000000000000000000000000000000000000..4724f10e8c6af2f6c1f9553e00b0d3880fb5b661 --- /dev/null +++ b/include/acpi/pdc_sw64.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_PDC_SW64_H +#define _ASM_PDC_SW64_H + +#define ACPI_PDC_P_FFH (0x0001) +#define ACPI_PDC_C_C1_HALT (0x0002) +#define ACPI_PDC_T_FFH (0x0004) +#define ACPI_PDC_SMP_C1PT (0x0008) +#define ACPI_PDC_SMP_C2C3 (0x0010) +#define ACPI_PDC_SMP_P_SWCOORD (0x0020) +#define ACPI_PDC_SMP_C_SWCOORD (0x0040) +#define ACPI_PDC_SMP_T_SWCOORD (0x0080) +#define ACPI_PDC_C_C1_FFH (0x0100) +#define ACPI_PDC_C_C2C3_FFH (0x0200) +#define ACPI_PDC_SMP_P_HWCOORD (0x0800) + +#define ACPI_PDC_EST_CAPABILITY_SMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_EST_CAPABILITY_SWSMP (ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_SMP_P_SWCOORD | \ + ACPI_PDC_SMP_P_HWCOORD | \ + ACPI_PDC_P_FFH) + +#define ACPI_PDC_C_CAPABILITY_SMP (ACPI_PDC_SMP_C2C3 | \ + ACPI_PDC_SMP_C1PT | \ + ACPI_PDC_C_C1_HALT | \ + ACPI_PDC_C_C1_FFH | \ + ACPI_PDC_C_C2C3_FFH) + +#endif /* _ASM_PDC_SW64_H */ diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h index 9d0479f50f97f4c1b9cd717253d342c7ed2cd57e..be1ce406f48189c47eef7192bfd9f82e1ee185e1 100644 --- a/include/asm-generic/early_ioremap.h +++ b/include/asm-generic/early_ioremap.h @@ -32,12 +32,6 @@ extern void early_ioremap_setup(void); */ extern void early_ioremap_reset(void); -/* - * Early copy from unmapped memory to kernel mapped memory. - */ -extern void copy_from_early_mem(void *dest, phys_addr_t src, - unsigned long size); - #else static inline void early_ioremap_init(void) { } static inline void early_ioremap_setup(void) { } diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h index c75d4a753849398f3c0bae8d60104c7ba3849bd0..f7413f68f8d596f11d5ecee184621d8352f6b1e7 100644 --- a/include/asm-generic/pgalloc.h +++ b/include/asm-generic/pgalloc.h @@ -4,7 +4,7 @@ #ifdef CONFIG_MMU -#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO) +#define GFP_PGTABLE_KERNEL (GFP_KERNEL | __GFP_ZERO | __GFP_NOKFENCE) #define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT) /** diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h index c86abf6bc7ba27e10de5292232f37c3ca1b589c0..caad5193913c3a9d5449608bb2a422622d3c1ca1 100644 --- a/include/asm-generic/set_memory.h +++ b/include/asm-generic/set_memory.h @@ -9,5 +9,5 @@ int set_memory_ro(unsigned long addr, int numpages); int set_memory_rw(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages); - +int set_memory_np(unsigned long addr, int numpages); #endif diff --git a/include/linux/acpi.h b/include/linux/acpi.h index afd94c9b8b8afd100c54fb07b5f1e8dcce08e92c..827fbbfc1c9494e423b4d4108378b6ddd50f1ab4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -259,7 +259,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); /* the following numa functions are architecture-dependent */ void acpi_numa_slit_init (struct acpi_table_slit *slit); -#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) +#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH) || defined(CONFIG_SW64) void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); #else static inline void @@ -1495,6 +1495,11 @@ int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_cluster(unsigned int cpu); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); +int find_acpi_cache_level_from_id(u32 cache_id); +int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, cpumask_t *cpus); +int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, u32 cache_level, + cpumask_t *cpus); #else static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) { @@ -1516,6 +1521,26 @@ static inline int find_acpi_cpu_topology_hetero_id(unsigned int cpu) { return -EINVAL; } +static inline int find_acpi_cache_level_from_id(u32 cache_id) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpus_from_container(u32 acpi_cpu_id, + cpumask_t *cpus) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpumask_from_cache_id(u32 cache_id, + cpumask_t *cpus) +{ + return -EINVAL; +} +static inline int acpi_pptt_get_cpumask_from_cache_id_and_level(u32 cache_id, + u32 cache_level, + cpumask_t *cpus) +{ + return -EINVAL; +} #endif #ifdef CONFIG_ARM64 @@ -1548,4 +1573,8 @@ static inline void acpi_device_notify(struct device *dev) { } static inline void acpi_device_notify_remove(struct device *dev) { } #endif +struct acpi_pptt_processor * +acpi_pptt_find_cache_backwards(struct acpi_table_header *table_hdr, + struct acpi_pptt_cache *cache); + #endif /*_LINUX_ACPI_H*/ diff --git a/include/linux/arm_mpam.h b/include/linux/arm_mpam.h new file mode 100644 index 0000000000000000000000000000000000000000..6607764919419e7ab5dcbf6680e19da7560919cc --- /dev/null +++ b/include/linux/arm_mpam.h @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2021 Arm Ltd. + +#ifndef __LINUX_ARM_MPAM_H +#define __LINUX_ARM_MPAM_H + +#include +#include +#include + +/* + * The value of the MPAM1_EL1 sysreg when a task is in the default group. + * This is used by the context switch code to use the resctrl CPU property + * instead. The value is modified when CDP is enabled/disabled by mounting + * the resctrl filesystem. + */ +extern u64 mpam_resctrl_default_group; + +#include + +struct mpam_msc; + +enum mpam_msc_iface { + MPAM_IFACE_MMIO, /* a real MPAM MSC */ + MPAM_IFACE_PCC, /* a fake MPAM MSC */ +}; + +enum mpam_class_types { + MPAM_CLASS_CACHE, /* Well known caches, e.g. L2 */ + MPAM_CLASS_MEMORY, /* Main memory */ + MPAM_CLASS_UNKNOWN, /* Everything else, e.g. SMMU */ +}; + +enum mpam_machine_type { + MPAM_DEFAULT_MACHINE, + MPAM_YITIAN710, + + MPAM_NUM_MACHINE_TYPES, +}; + +/* Machine identifier which can be used for vendor-specific MPAM features */ +extern enum mpam_machine_type mpam_current_machine; + +#ifdef CONFIG_ACPI_MPAM +/* Parse the ACPI description of resources entries for this MSC. */ +int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc); +int acpi_mpam_count_msc(void); +enum mpam_machine_type acpi_mpam_get_machine_type(void); +#else +static inline int acpi_mpam_parse_resources(struct mpam_msc *msc, + struct acpi_mpam_msc_node *tbl_msc) +{ + return -EINVAL; +} +static inline int acpi_mpam_count_msc(void) { return -EINVAL; } +static inline enum mpam_machine_type acpi_mpam_get_machine_type(void) +{ + return MPAM_DEFAULT_MACHINE; +} +#endif + +int mpam_register_requestor(u16 partid_max, u8 pmg_max); + +int mpam_ris_create(struct mpam_msc *msc, u8 ris_idx, + enum mpam_class_types type, u8 class_id, int component_id); + +static inline unsigned int resctrl_arch_round_mon_val(unsigned int val) +{ + return val; +} + +/* MPAM counters requires a monitor to be allocated */ +static inline bool resctrl_arch_event_is_free_running(enum resctrl_event_id evt) +{ + return false; +} + +bool resctrl_arch_alloc_capable(void); +bool resctrl_arch_mon_capable(void); +bool resctrl_arch_is_llc_occupancy_enabled(void); +bool resctrl_arch_is_mbm_local_enabled(void); +bool resctrl_arch_is_mbm_total_enabled(void); +bool resctrl_arch_is_mbm_bps_enabled(void); + +/* reset cached configurations, then all devices */ +void resctrl_arch_reset_resources(void); + +bool resctrl_arch_get_cdp_enabled(enum resctrl_res_level ignored); +int resctrl_arch_set_cdp_enabled(enum resctrl_res_level ignored, bool enable); +bool resctrl_arch_match_closid(struct task_struct *tsk, u32 closid); +bool resctrl_arch_match_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid(int cpu, u32 closid); +void resctrl_arch_set_closid_rmid(struct task_struct *tsk, u32 closid, u32 rmid); +void resctrl_arch_set_cpu_default_closid_rmid(int cpu, u32 closid, u32 pmg); +void resctrl_arch_sched_in(struct task_struct *tsk); +u32 resctrl_arch_rmid_idx_encode(u32 closid, u32 rmid); +void resctrl_arch_rmid_idx_decode(u32 idx, u32 *closid, u32 *rmid); +u32 resctrl_arch_system_num_rmid_idx(void); + +struct rdt_resource; +void *resctrl_arch_mon_ctx_alloc(struct rdt_resource *r, int evtid); +void resctrl_arch_mon_ctx_free(struct rdt_resource *r, int evtid, void *ctx); + +/* Pseudo lock is not supported by MPAM */ +static inline int resctrl_arch_pseudo_lock_fn(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l2_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_l3_residency(void *_plr) { return 0; } +static inline int resctrl_arch_measure_cycles_lat_fn(void *_plr) { return 0; } +static inline u64 resctrl_arch_get_prefetch_disable_bits(void) { return 0; } + +/* + * The CPU configuration for MPAM is cheap to write, and is only written if it + * has changed. No need for fine grained enables. + */ +static inline void resctrl_arch_enable_mon(void) { } +static inline void resctrl_arch_disable_mon(void) { } +static inline void resctrl_arch_enable_alloc(void) { } +static inline void resctrl_arch_disable_alloc(void) { } + +#endif /* __LINUX_ARM_MPAM_H */ diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 255701e1251b4ac242456693f998e3940a36851c..28e247dd57731c118f4bdc51a2f1bc6b87aeca30 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h @@ -36,6 +36,11 @@ int sdei_event_unregister(u32 event_num); int sdei_event_enable(u32 event_num); int sdei_event_disable(u32 event_num); +int sdei_api_event_interrupt_bind(int hwirq); +int sdei_api_event_disable(u32 event_num); +int sdei_api_event_enable(u32 event_num); +int sdei_api_clear_eoi(int hwirq); +int sdei_api_set_secure_timer_period(int sec); /* GHES register/unregister helpers */ int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 1a97277f99b1b82de9e96eb8b9ca5544f9aa6e3a..81adf07c9637a04512dbb2b7529e0c9de2e9f436 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -159,6 +159,21 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, void wb_memcg_offline(struct mem_cgroup *memcg); void wb_blkcg_offline(struct cgroup_subsys_state *css); +extern bool cgwb_v1; + +static inline bool memcg_blkcg_on_dfl(void) +{ + return cgroup_subsys_on_dfl(memory_cgrp_subsys) && + cgroup_subsys_on_dfl(io_cgrp_subsys); +} + +static inline bool cgroup_writeback_support_v1(void) +{ + return cgwb_v1 && + !cgroup_subsys_on_dfl(memory_cgrp_subsys) && + !cgroup_subsys_on_dfl(io_cgrp_subsys); +} + /** * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode * @inode: inode of interest @@ -174,8 +189,8 @@ static inline bool inode_cgwb_enabled(struct inode *inode) { struct backing_dev_info *bdi = inode_to_bdi(inode); - return cgroup_subsys_on_dfl(memory_cgrp_subsys) && - cgroup_subsys_on_dfl(io_cgrp_subsys) && + return (memcg_blkcg_on_dfl() || + cgroup_writeback_support_v1()) && (bdi->capabilities & BDI_CAP_WRITEBACK) && (inode->i_sb->s_iflags & SB_I_CGROUPWB); } @@ -318,6 +333,13 @@ static inline void unlocked_inode_to_wb_end(struct inode *inode, rcu_read_unlock(); } +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset); +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links); +void free_memcg_blkcg_links(struct list_head *links_to_free); +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css); #else /* CONFIG_CGROUP_WRITEBACK */ static inline bool inode_cgwb_enabled(struct inode *inode) @@ -368,6 +390,30 @@ static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) { } +#ifdef CONFIG_CGROUPS +struct cgroup_subsys; + +static inline void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ +} + +static inline int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + return 0; +} + +static inline void free_memcg_blkcg_links(struct list_head *links_to_free) +{ +} + +static inline void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ +} +#endif + #endif /* CONFIG_CGROUP_WRITEBACK */ const char *bdi_dev_name(struct backing_dev_info *bdi); diff --git a/include/linux/bio.h b/include/linux/bio.h index 41d417ee1349971abadee2e4a0bf36732912e32d..22ca0719a55834aa358e537a705b741b2fc01909 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -10,7 +10,15 @@ #include #include +#ifdef CONFIG_THP_SWAP +#if HPAGE_PMD_NR > 256 +#define BIO_MAX_VECS (HPAGE_PMD_NR * 1U) +#else #define BIO_MAX_VECS 256U +#endif +#else +#define BIO_MAX_VECS 256U +#endif struct queue_limits; @@ -242,6 +250,21 @@ static inline void bio_clear_flag(struct bio *bio, unsigned int bit) bio->bi_flags &= ~(1U << bit); } +static inline bool bio_ext_flagged(struct bio *bio, unsigned int bit) +{ + return (bio->bi_ext_flags & (1U << bit)) != 0; +} + +static inline void bio_set_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags |= (1U << bit); +} + +static inline void bio_clear_ext_flag(struct bio *bio, unsigned int bit) +{ + bio->bi_ext_flags &= ~(1U << bit); +} + static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) { WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index d5c5e59ddbd25afd284a1094948850be42437fb2..bd73f2d6b50ae625820ebcfbcac79795eb404a12 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -10,6 +10,7 @@ #include #include #include +#include struct bio_set; struct bio; @@ -286,6 +287,12 @@ struct bio { */ struct blkcg_gq *bi_blkg; struct bio_issue bi_issue; +#ifdef CONFIG_BLK_DEV_THROTTLING + unsigned long long start_time_ns; /* when passed to block throttle */ + unsigned long long io_start_time_ns; /* when no more throttle */ + bio_end_io_t *bi_tg_end_io; + void *bi_tg_private; +#endif #ifdef CONFIG_BLK_CGROUP_IOCOST u64 bi_iocost_cost; #endif @@ -315,6 +322,8 @@ struct bio { struct bio_set *bi_pool; + unsigned long bi_ext_flags; /* extend the bi_flags */ + /* * We can inline a number of vecs at the end of the bio, to avoid * double allocations for a small number of bio_vecs. This member @@ -348,6 +357,37 @@ enum { BIO_FLAG_LAST }; +/* + * Extend bio flags should be added in here + */ +#define BIO_THROTL_STATED 0 /* bio already stated */ + +#ifdef CONFIG_BLK_DEV_THROTTLING +static inline void bio_set_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline void bio_set_io_start_time_ns(struct bio *bio) +{ + preempt_disable(); + bio->io_start_time_ns = sched_clock(); + preempt_enable(); +} + +static inline uint64_t bio_start_time_ns(struct bio *bio) +{ + return bio->start_time_ns; +} + +static inline uint64_t bio_io_start_time_ns(struct bio *bio) +{ + return bio->io_start_time_ns; +} +#endif + typedef __u32 __bitwise blk_mq_req_flags_t; #define REQ_OP_BITS 8 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index eef450f259828d933f1f4b96d5515aed3d7eb3c5..f6c72fd459fc1a134798aab763752798457bbe54 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -375,6 +375,12 @@ struct blk_independent_access_ranges { struct blk_independent_access_range ia_range[]; }; +/* + * default request hang threshold, unit is millisecond. If one request does + * not complete in this threashold time, consider this request as hang. + */ +#define BLK_REQ_HANG_THRESHOLD 5000 + struct request_queue { struct request *last_merge; struct elevator_queue *elevator; @@ -451,6 +457,7 @@ struct request_queue { #endif unsigned int rq_timeout; + unsigned int rq_hang_threshold; struct timer_list timeout; struct work_struct timeout_work; @@ -942,6 +949,8 @@ extern void blk_queue_required_elevator_features(struct request_queue *q, unsigned int features); extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, struct device *dev); +extern void blk_queue_rq_hang_threshold(struct request_queue *q, + unsigned int hang_threshold); bool __must_check blk_get_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *); diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 44e9de51eedfb634aa2a44420a0fc32ee61ff924..9711ae81d988a63ee59c94bcef08a8ff73c8deb3 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -369,8 +369,11 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) static inline void wait_on_buffer(struct buffer_head *bh) { might_sleep(); - if (buffer_locked(bh)) + if (buffer_locked(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __wait_on_buffer(bh); + task_clear_wait_res(); + } } static inline int trylock_buffer(struct buffer_head *bh) @@ -381,8 +384,11 @@ static inline int trylock_buffer(struct buffer_head *bh) static inline void lock_buffer(struct buffer_head *bh) { might_sleep(); - if (!trylock_buffer(bh)) + if (!trylock_buffer(bh)) { + task_set_wait_res(TASK_WAIT_FOLIO, bh->b_folio); __lock_buffer(bh); + task_clear_wait_res(); + } } static inline struct buffer_head *getblk_unmovable(struct block_device *bdev, diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h index a5cfd44fab45b482cd5505b4cfefa4ba64b3f075..467ec6a765673c1d31a1edbb2cca522eeaf71e82 100644 --- a/include/linux/cacheinfo.h +++ b/include/linux/cacheinfo.h @@ -47,7 +47,7 @@ extern unsigned int coherency_max_size; * keeping, the remaining members form the core properties of the cache */ struct cacheinfo { - unsigned int id; + unsigned long id; enum cache_type type; unsigned int level; unsigned int coherency_line_size; @@ -110,12 +110,13 @@ int acpi_get_cache_info(unsigned int cpu, #endif const struct attribute_group *cache_get_priv_group(struct cacheinfo *this_leaf); +unsigned long cache_of_get_id(struct device_node *np); /* * Get the id of the cache associated with @cpu at level @level. * cpuhp lock must be held. */ -static inline int get_cpu_cacheinfo_id(int cpu, int level) +static inline unsigned long get_cpu_cacheinfo_id(int cpu, int level) { struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); int i; @@ -124,11 +125,32 @@ static inline int get_cpu_cacheinfo_id(int cpu, int level) if (ci->info_list[i].level == level) { if (ci->info_list[i].attributes & CACHE_ID) return ci->info_list[i].id; - return -1; + return ~0UL; } } - return -1; + return ~0UL; +} + +/* + * Get the size of the cache associated with @cpu at level @level. + * cpuhp lock must be held. + */ +static inline unsigned int get_cpu_cacheinfo_size(int cpu, int level) +{ + struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu); + int i; + + if (!ci->info_list) + return 0; + + for (i = 0; i < ci->num_leaves; i++) { + if (ci->info_list[i].level == level) { + return ci->info_list[i].size; + } + } + + return 0; } #ifdef CONFIG_ARM64 diff --git a/include/linux/ccp.h b/include/linux/ccp.h index 868924dec5a17ba353d6a0e67af8a033fbcf67ff..8e34f05bc6b1681fa11cc89384cd92e555064afb 100644 --- a/include/linux/ccp.h +++ b/include/linux/ccp.h @@ -17,6 +17,7 @@ #include #include #include +#include struct ccp_device; struct ccp_cmd; @@ -587,6 +588,202 @@ struct ccp_ecc_engine { u16 ecc_result; }; +/***** SM2 engine *****/ +#define CCP_SM2_VERIFY_SRC_SIZE 160 +#define CCP_SM2_LP_SRC_SIZE 96 +#define CCP_SM2_KG_SRC_SIZE 32 +#define CCP_SM2_SIGN_SRC_SIZE 96 +#define CCP_SM2_MMUL_SRC_SIZE 64 +#define CCP_SM2_DST_SIZE 128 + +/** + * ccp_sm2_mode - SM2 operation mode + * + * @CCP_SM2_MODE_VERIFY: Verify mode + * @CCP_SM2_MODE_LP: LP mode + * @CCP_SM2_MODE_KG: KG mode + * @CCP_SM2_MODE_SIGN: SIGN mode + * @CCP_SM2_MODE_MMUL: MMUL mode + */ +enum ccp_sm2_mode { + CCP_SM2_MODE_VERIFY, + CCP_SM2_MODE_LP, + CCP_SM2_MODE_KG, + CCP_SM2_MODE_SIGN, + CCP_SM2_MODE_MMUL, + CCP_SM2_MODE__LAST, +}; + +/** + * struct ccp_sm2_engine - CCP SM2 operation + * @mode: SM2 operation mode + * @rand: indicateing that operand_k is from TRNG or not + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * @dst_len: length in bytes of data produced by this operation + */ +struct ccp_sm2_engine { + enum ccp_sm2_mode mode; + u32 rand; + + struct scatterlist *src; + u32 src_len; + + struct scatterlist *dst; + u32 dst_len; +}; + +/***** SM3 engine *****/ +/** + * ccp_sm3_type - type of SM3 operation + * + * @CCP_SM3_TYPE_256: SM3 operation + */ +enum ccp_sm3_type { + CCP_SM3_TYPE_256 = 2, + CCP_SM3_TYPE__LAST, +}; + +/** + * struct ccp_sm3_engine - CCP SM3 operation + * @type: Type of SM3 operation + * @ctx: current hash value + * @ctx_len: length in bytes of hash value + * @src: data to be used for this operation + * @src_len: length in bytes of data used for this operation + * @opad: data to be used for final HMAC operation + * @opad_len: length in bytes of data used for final HMAC operation + * @first: indicates first SM3 operation + * @final: indicates final SM3 operation + * @msg_bits: total length of the message in bits used in final SM3 operation + */ +struct ccp_sm3_engine { + enum ccp_sm3_type type; + + struct scatterlist *ctx; + u32 ctx_len; + + struct scatterlist *src; + u64 src_len; + + struct scatterlist *opad; + u32 opad_len; + + u32 first; + u32 final; + u64 msg_bits; +}; + +/***** SM4 engine *****/ +#define SM4_BLOCK_SIZE 16 +#define SM4_KEY_SIZE 16 +#define CCP_SM4_MODE_MASK 0x0F +#define CCP_SM4_MODE_HS_SEL 0x10 + +/** + * ccp_sm4_mode - SM4 operation mode + * + * @CCP_SM4_MODE_ECB: ECB mode + * @CCP_SM4_MODE_CBC: CBC mode + * @CCP_SM4_MODE_OFB: OFB mode + * @CCP_SM4_MODE_CFB: CFB mode + * @CCP_SM4_MODE_CTR: CTR mode + */ +enum ccp_sm4_mode { + CCP_SM4_MODE_ECB = 0, + CCP_SM4_MODE_CBC, + CCP_SM4_MODE_OFB, + CCP_SM4_MODE_CFB, + CCP_SM4_MODE_CTR, + CCP_SM4_MODE__LAST, +}; + +/** + * ccp_sm4_action - SM4 operation + * + * @CCP_SM4_ACTION_DECRYPT: SM4 decrypt operation + * @CCP_SM4_ACTION_ENCRYPT: SM4 encrypt operation + */ +enum ccp_sm4_action { + CCP_SM4_ACTION_DECRYPT = 0, + CCP_SM4_ACTION_ENCRYPT, + CCP_SM4_ACTION__LAST, +}; + +/** + * struct ccp_sm4_engine - CCP SM4 operation + * @mode: SM4 operation mode + * @action: SM4 operation (decrypt/encrypt) + * @select: Indicating that high-secure engine is selected + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - mode, action, select, key, key_len, src, dst, src_len + * - iv, iv_len for any mode other than ECB + * - key_len and iv_len must be 16B + * - src_len must be multiple of 16B + * - high-secure engine only for ECB and CBC mode + * + * The iv variable is used as both input and output. On completion of the + * SM4 operation the new IV overwrites the old IV. + */ +struct ccp_sm4_engine { + enum ccp_sm4_mode mode; + enum ccp_sm4_action action; + u32 select; /* Indicating that high-secure engine is selected */ + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; + +/***** SM4_CTR engine *****/ +/** + * struct ccp_sm4_ctr_engine - CCP SM4_CTR operation + * @action: SM4_CTR operation (decrypt/encrypt) + * @size: counter bit size + * @step: counter increase step + * @key: key to be used for this SM4 operation + * @key_len: length in bytes of key + * @iv: IV to be used for this SM4 operation + * @iv_len: length in bytes of iv + * @src: data to be used for this operation + * @dst: data produced by this operation + * @src_len: length in bytes of data used for this operation + * + * Variables required to be set when calling ccp_enqueue_cmd(): + * - action, size, step, key, key_len, iv, iv_len, src, dst, src_len + * - key_len and iv_len must be 16B + * + * The iv variable is used as both input and output. On completion of the + * SM4_CTR operation the new IV overwrites the old IV. + */ +struct ccp_sm4_ctr_engine { + enum ccp_sm4_action action; + u32 size; + u32 step; + + struct scatterlist *key; + u32 key_len; /* In bytes */ + + struct scatterlist *iv; + u32 iv_len; /* In bytes */ + + struct scatterlist *src, *dst; + u64 src_len; /* In bytes */ +}; /** * ccp_engine - CCP operation identifiers @@ -599,6 +796,8 @@ struct ccp_ecc_engine { * @CCP_ENGINE_PASSTHRU: pass-through operation * @CCP_ENGINE_ZLIB_DECOMPRESS: unused * @CCP_ENGINE_ECC: ECC operation + * @CCP_ENGINE_SM2: SM2 operation + * @CCP_ENGINE_SM3: SM3 operation */ enum ccp_engine { CCP_ENGINE_AES = 0, @@ -609,6 +808,10 @@ enum ccp_engine { CCP_ENGINE_PASSTHRU, CCP_ENGINE_ZLIB_DECOMPRESS, CCP_ENGINE_ECC, + CCP_ENGINE_SM2 = 8, /* fixed value */ + CCP_ENGINE_SM3, + CCP_ENGINE_SM4, + CCP_ENGINE_SM4_CTR, CCP_ENGINE__LAST, }; @@ -657,6 +860,10 @@ struct ccp_cmd { struct ccp_passthru_engine passthru; struct ccp_passthru_nomap_engine passthru_nomap; struct ccp_ecc_engine ecc; + struct ccp_sm2_engine sm2; + struct ccp_sm3_engine sm3; + struct ccp_sm4_engine sm4; + struct ccp_sm4_ctr_engine sm4_ctr; } u; /* Completion callback support */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 265da00a1a8b1b34c2405a6ae1e31d18cadb389f..13d7a1f91eded5cabf2189aa47bb6cf4c1dfcf0f 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -304,6 +304,11 @@ struct cgroup_base_stat { #ifdef CONFIG_SCHED_CORE u64 forceidle_sum; + u64 forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_sum; + u64 sibidle_task_sum; #endif }; diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index b307013b9c6c9a2c892644f7fba94698bb82133b..5eda25d7c3a5ae8df0eec44760c3572683af5a50 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -121,6 +121,8 @@ void cgroup_file_show(struct cgroup_file *cfile, bool show); int cgroupstats_build(struct cgroupstats *stats, struct dentry *dentry); int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); +extern struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid); void cgroup_fork(struct task_struct *p); extern int cgroup_can_fork(struct task_struct *p, @@ -855,4 +857,16 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} #endif /* CONFIG_CGROUP_BPF */ +#ifdef CONFIG_SCHED_SLI +void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added); +void cgroup_idle_end(struct sched_entity *se); +void cgroup_idle_start(struct sched_entity *se); +#else +static inline void cpuacct_cpuset_changed(struct cgroup *cgrp, + struct cpumask *effective, struct cpumask *new_added) { } +static inline void cgroup_idle_end(struct sched_entity *se) { } +static inline void cgroup_idle_start(struct sched_entity *se) { } +#endif + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/ck_kabi.h b/include/linux/ck_kabi.h new file mode 100644 index 0000000000000000000000000000000000000000..a2ecc950c93a94c28c9430d103f71d7752158a23 --- /dev/null +++ b/include/linux/ck_kabi.h @@ -0,0 +1,532 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ck_kabi.h - Anolis Cloud-Kernel kABI abstraction header + * + * Copyright (c) 2014 Don Zickus + * Copyright (c) 2015-2018 Jiri Benc + * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa + * Copyright (c) 2016-2018 Prarit Bhargava + * Copyright (c) 2017 Paolo Abeni, Larry Woodman + * Copyright (c) 2023 Guixin Liu + * + * This file is released under the GPLv2. + * See the file COPYING for more details. + * + * These kabi macros hide the changes from the kabi checker and from the + * process that computes the exported symbols' checksums. + * They have 2 variants: one (defined under __GENKSYMS__) used when + * generating the checksums, and the other used when building the kernel's + * binaries. + * + * The use of these macros does not guarantee that the usage and modification + * of code is correct. As with all Anolis only changes, an engineer must + * explain why the use of the macro is valid in the patch containing the + * changes. + * + */ + +#ifndef _LINUX_CK_KABI_H +#define _LINUX_CK_KABI_H + +#include +#include +#include + +/* + * NOTE + * Unless indicated otherwise, don't use ';' after these macros as it + * messes up the kABI checker by changing what the resulting token string + * looks like. Instead let the macros add the ';' so it can be properly + * hidden from the kABI checker (mainly for CK_KABI_EXTEND, but applied to + * most macros for uniformity). + * + * + * CK_KABI_CONST + * Adds a new const modifier to a function parameter preserving the old + * checksum. + * + * CK_KABI_ADD_MODIFIER + * Adds a new modifier to a function parameter or a typedef, preserving + * the old checksum. Useful e.g. for adding rcu annotations or changing + * int to unsigned. Beware that this may change the semantics; if you're + * sure this is safe, always explain why binary compatibility with 3rd + * party modules is retained. + * + * CK_KABI_DEPRECATE + * Marks the element as deprecated and make it unusable by modules while + * keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_DEPRECATE_FN + * Marks the function pointer as deprecated and make it unusable by modules + * while keeping a hole in its place to preserve binary compatibility. + * + * CK_KABI_EXTEND + * Adds a new field to a struct. This must always be added to the end of + * the struct. Before using this macro, make sure this is actually safe + * to do - there is a number of conditions under which it is *not* safe. + * In particular (but not limited to), this macro cannot be used: + * - if the struct in question is embedded in another struct, or + * - if the struct is allocated by drivers either statically or + * dynamically, or + * - if the struct is allocated together with driver data (an example of + * such behavior is struct net_device or struct request). + * + * CK_KABI_EXTEND_WITH_SIZE + * Adds a new element (usually a struct) to a struct and reserves extra + * space for the new element. The provided 'size' is the total space to + * be added in longs (i.e. it's 8 * 'size' bytes), including the size of + * the added element. It is automatically checked that the new element + * does not overflow the reserved space, now nor in the future. However, + * no attempt is done to check the content of the added element (struct) + * for kABI conformance - kABI checking inside the added element is + * effectively switched off. + * For any struct being added by CK_KABI_EXTEND_WITH_SIZE, it is + * recommended its content to be documented as not covered by kABI + * guarantee. + * + * CK_KABI_FILL_HOLE + * Fills a hole in a struct. + * + * Warning: only use if a hole exists for _all_ arches. Use pahole to verify. + * + * CK_KABI_RENAME + * Renames an element without changing its type. This macro can be used in + * bitfields, for example. + * + * NOTE: this macro does not add the final ';' + * + * CK_KABI_REPLACE + * Replaces the _orig field by the _new field. The size of the occupied + * space is preserved, it's fine if the _new field is smaller than the + * _orig field. If a _new field is larger or has a different alignment, + * compilation will abort. + * + * CK_KABI_REPLACE_SPLIT + * Works the same as CK_KABI_REPLACE but replaces a single _orig field by + * multiple new fields. The checks for size and alignment done by + * CK_KABI_REPLACE are still applied. + * + * CK_KABI_HIDE_INCLUDE + * Hides the given include file from kABI checksum computations. This is + * used when a newly added #include makes a previously opaque struct + * visible. + * + * Example usage: + * #include CK_KABI_HIDE_INCLUDE() + * + * CK_KABI_FAKE_INCLUDE + * Pretends inclusion of the given file for kABI checksum computations. + * This is used when upstream removed a particular #include but that made + * some structures opaque that were previously visible and is causing kABI + * checker failures. + * + * Example usage: + * #include CK_KABI_FAKE_INCLUDE() + * + * CK_KABI_RESERVE + * Adds a reserved field to a struct. This is done prior to kABI freeze + * for structs that cannot be expanded later using CK_KABI_EXTEND (for + * example because they are embedded in another struct or because they are + * allocated by drivers or because they use unusual memory layout). The + * size of the reserved field is 'unsigned long' and is assumed to be + * 8 bytes. + * + * The argument is a number unique for the given struct; usually, multiple + * CK_KABI_RESERVE macros are added to a struct with numbers starting from + * one. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_RESERVE(2) + * CK_KABI_RESERVE(3) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE + * Uses a previously reserved field or multiple fields. The arguments are + * one or more numbers assigned to CK_KABI_RESERVE, followed by a field to + * be put in their place. The compiler ensures that the new field is not + * larger than the reserved area. + * + * Example usage: + * struct foo { + * int a; + * CK_KABI_USE(1, int b) + * CK_KABI_USE(2, 3, int c[3]) + * CK_KABI_RESERVE(4) + * }; + * + * CK_KABI_USE_SPLIT + * Works the same as CK_KABI_USE but replaces a single reserved field by + * multiple new fields. + * + * CK_KABI_AUX_EMBED + * CK_KABI_AUX_PTR + * Adds an extension of a struct in the form of "auxiliary structure". + * This is done prior to kABI freeze for structs that cannot be expanded + * later using CK_KABI_EXTEND. See also CK_KABI_RESERVED, these two + * approaches can (and often are) combined. + * + * To use this for 'struct foo' (the "base structure"), define a new + * structure called 'struct foo_ck_reserved'; this new struct is called "auxiliary + * structure". Then add CK_KABI_AUX_EMBED or CK_KABI_AUX_PTR to the end + * of the base structure. The argument is the name of the base structure, + * without the 'struct' keyword. + * + * CK_KABI_AUX_PTR stores a pointer to the aux structure in the base + * struct. The lifecycle of the aux struct needs to be properly taken + * care of. + * + * CK_KABI_AUX_EMBED embeds the aux struct into the base struct. This + * cannot be used when the base struct is itself embedded into another + * struct, allocated in an array, etc. + * + * Both approaches (ptr and embed) work correctly even when the aux struct + * is allocated by modules. To ensure this, the code responsible for + * allocation/assignment of the aux struct has to properly set the size of + * the aux struct; see the CK_KABI_AUX_SET_SIZE and CK_KABI_AUX_INIT_SIZE + * macros. + * + * New fields can be later added to the auxiliary structure, always to its + * end. Note the auxiliary structure cannot be shrunk in size later (i.e., + * fields cannot be removed, only deprecated). Any code accessing fields + * from the aux struct must guard the access using the CK_KABI_AUX macro. + * The access itself is then done via a '_ck_reserved' field in the base struct. + * + * The auxiliary structure is not guaranteed for access by modules unless + * explicitly commented as such in the declaration of the aux struct + * itself or some of its elements. + * + * Example: + * + * struct foo_ck_reserved { + * int newly_added; + * }; + * + * struct foo { + * bool big_hammer; + * CK_KABI_AUX_PTR(foo) + * }; + * + * void use(struct foo *f) + * { + * if (CK_KABI_AUX(f, foo, newly_added)) + * f->_ck_reserved->newly_added = 123; + * else + * // the field 'newly_added' is not present in the passed + * // struct, fall back to old behavior + * f->big_hammer = true; + * } + * + * static struct foo_ck_reserved my_foo_ck_reserved { + * .newly_added = 0; + * } + * + * static struct foo my_foo = { + * .big_hammer = false, + * ._ck_reserved = &my_foo_ck_reserved, + * CK_KABI_AUX_INIT_SIZE(foo) + * }; + * + * CK_KABI_USE_AUX_PTR + * Creates an auxiliary structure post kABI freeze. This works by using + * two reserved fields (thus there has to be two reserved fields still + * available) and converting them to CK_KABI_AUX_PTR. + * + * Example: + * + * struct foo_ck_reserved { + * }; + * + * struct foo { + * int a; + * CK_KABI_RESERVE(1) + * CK_KABI_USE_AUX_PTR(2, 3, foo) + * }; + * + * CK_KABI_AUX_SET_SIZE + * CK_KABI_AUX_INIT_SIZE + * Calculates and stores the size of the auxiliary structure. + * + * CK_KABI_AUX_SET_SIZE is for dynamically allocated base structs, + * CK_KABI_AUX_INIT_SIZE is for statically allocated case structs. + * + * These macros must be called from the allocation (CK_KABI_AUX_SET_SIZE) + * or declaration (CK_KABI_AUX_INIT_SIZE) site, regardless of whether + * that happens in the kernel or in a module. Without calling one of + * these macros, the aux struct will appear to have no fields to the + * kernel. + * + * Note: since CK_KABI_AUX_SET_SIZE is intended to be invoked outside of + * a struct definition, it does not add the semicolon and must be + * terminated by semicolon by the caller. + * + * CK_KABI_AUX + * Verifies that the given field exists in the given auxiliary structure. + * This MUST be called prior to accessing that field; failing to do that + * may lead to invalid memory access. + * + * The first argument is a pointer to the base struct, the second argument + * is the name of the base struct (without the 'struct' keyword), the + * third argument is the field name. + * + * This macro works for structs extended by either of CK_KABI_AUX_EMBED, + * CK_KABI_AUX_PTR and CK_KABI_USE_AUX_PTR. + * + * CK_KABI_FORCE_CHANGE + * Force change of the symbol checksum. The argument of the macro is a + * version for cases we need to do this more than once. + * + * This macro does the opposite: it changes the symbol checksum without + * actually changing anything about the exported symbol. It is useful for + * symbols that are not whitelisted, we're changing them in an + * incompatible way and want to prevent 3rd party modules to silently + * corrupt memory. Instead, by changing the symbol checksum, such modules + * won't be loaded by the kernel. This macro should only be used as a + * last resort when all other KABI workarounds have failed. + * + * CK_KABI_EXCLUDE + * !!! WARNING: DANGEROUS, DO NOT USE unless you are aware of all the !!! + * !!! implications. This should be used ONLY EXCEPTIONALLY and only !!! + * !!! under specific circumstances. Very likely, this macro does not !!! + * !!! do what you expect it to do. Note that any usage of this macro !!! + * !!! MUST be paired with a CK_KABI_FORCE_CHANGE annotation of !!! + * !!! a suitable symbol (or an equivalent safeguard) and the commit !!! + * !!! log MUST explain why the chosen solution is appropriate. !!! + * + * Exclude the element from checksum generation. Any such element is + * considered not to be part of the kABI whitelist and may be changed at + * will. Note however that it's the responsibility of the developer + * changing the element to ensure 3rd party drivers using this element + * won't panic, for example by not allowing them to be loaded. That can + * be achieved by changing another, non-whitelisted symbol they use, + * either by nature of the change or by using CK_KABI_FORCE_CHANGE. + * + * Also note that any change to the element must preserve its size. Change + * of the size is not allowed and would constitute a silent kABI breakage. + * Beware that the CK_KABI_EXCLUDE macro does not do any size checks. + * + * CK_KABI_BROKEN_INSERT + * CK_KABI_BROKEN_REMOVE + * Insert a field to the middle of a struct / delete a field from a struct. + * Note that this breaks kABI! It can be done only when it's certain that + * no 3rd party driver can validly reach into the struct. A typical + * example is a struct that is: both (a) referenced only through a long + * chain of pointers from another struct that is part of a whitelisted + * symbol and (b) kernel internal only, it should have never been visible + * to genksyms in the first place. + * + * Another example are structs that are explicitly exempt from kABI + * guarantee but we did not have enough foresight to use CK_KABI_EXCLUDE. + * In this case, the warning for CK_KABI_EXCLUDE applies. + * + * A detailed explanation of correctness of every CK_KABI_BROKEN_* macro + * use is especially important. + * + * CK_KABI_BROKEN_INSERT_BLOCK + * CK_KABI_BROKEN_REMOVE_BLOCK + * A version of CK_KABI_BROKEN_INSERT / REMOVE that allows multiple fields + * to be inserted or removed together. All fields need to be terminated + * by ';' inside(!) the macro parameter. The macro itself must not be + * terminated by ';'. + * + * CK_KABI_BROKEN_REPLACE + * Replace a field by a different one without doing any checking. This + * allows replacing a field by another with a different size. Similarly + * to other CK_KABI_BROKEN macros, use of this indicates a kABI breakage. + * + * CK_KABI_BROKEN_INSERT_ENUM + * CK_KABI_BROKEN_REMOVE_ENUM + * Insert a field to the middle of an enumaration type / delete a field from + * an enumaration type. Note that this can break kABI especially if the + * number of enum fields is used in an array within a structure. It can be + * done only when it is certain that no 3rd party driver will use the + * enumeration type or a structure that embeds an array with size determined + * by an enumeration type. + * + * CK_KABI_EXTEND_ENUM + * Adds a new field to an enumeration type. This must always be added to + * the end of the enum. Before using this macro, make sure this is actually + * safe to do. + */ + +#ifdef __GENKSYMS__ + +# define CK_KABI_CONST +# define CK_KABI_ADD_MODIFIER(_new) +# define CK_KABI_EXTEND(_new) +# define CK_KABI_FILL_HOLE(_new) +# define CK_KABI_FORCE_CHANGE(ver) __attribute__((ck_kabi_change ## ver)) +# define CK_KABI_RENAME(_orig, _new) _orig +# define CK_KABI_HIDE_INCLUDE(_file) +# define CK_KABI_FAKE_INCLUDE(_file) _file +# define CK_KABI_BROKEN_INSERT(_new) +# define CK_KABI_BROKEN_REMOVE(_orig) _orig; +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) _orig +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _orig; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) _orig, +# define CK_KABI_EXTEND_ENUM(_new) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type _orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) _type (*_orig)(_args) +# define _CK_KABI_REPLACE(_orig, _new) _orig +# define _CK_KABI_EXCLUDE(_elem) + +#else + +# define CK_KABI_ALIGN_WARNING ". Disable CONFIG_CK_KABI_SIZE_ALIGN_CHECKS if debugging." + +# define CK_KABI_CONST const +# define CK_KABI_ADD_MODIFIER(_new) _new +# define CK_KABI_EXTEND(_new) _new; +# define CK_KABI_FILL_HOLE(_new) _new; +# define CK_KABI_FORCE_CHANGE(ver) +# define CK_KABI_RENAME(_orig, _new) _new +# define CK_KABI_HIDE_INCLUDE(_file) _file +# define CK_KABI_FAKE_INCLUDE(_file) +# define CK_KABI_BROKEN_INSERT(_new) _new; +# define CK_KABI_BROKEN_REMOVE(_orig) +# define CK_KABI_BROKEN_INSERT_BLOCK(_new) _new +# define CK_KABI_BROKEN_REMOVE_BLOCK(_orig) +# define CK_KABI_BROKEN_REPLACE(_orig, _new) _new; +# define CK_KABI_BROKEN_INSERT_ENUM(_new) _new, +# define CK_KABI_BROKEN_REMOVE_ENUM(_orig) +# define CK_KABI_EXTEND_ENUM(_new) _new, + +#if IS_BUILTIN(CONFIG_CK_KABI_SIZE_ALIGN_CHECKS) +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) \ + union { \ + _Static_assert(sizeof(struct{_new;}) <= sizeof(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_new) " is larger than " __stringify(_orig) CK_KABI_ALIGN_WARNING); \ + _Static_assert(__alignof__(struct{_new;}) <= __alignof__(struct{_orig;}), \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_orig) " is not aligned the same as " __stringify(_new) CK_KABI_ALIGN_WARNING); \ + } +# define __CK_KABI_CHECK_SIZE(_item, _size) \ + _Static_assert(sizeof(struct{_item;}) <= _size, \ + __FILE__ ":" __stringify(__LINE__) ": " __stringify(_item) " is larger than the reserved size (" __stringify(_size) " bytes)" CK_KABI_ALIGN_WARNING) +#else +# define __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new) +# define __CK_KABI_CHECK_SIZE(_item, _size) +#endif + +#define CK_KABI_UNIQUE_ID __PASTE(ck_kabi_hidden_, __LINE__) + +# define _CK_KABI_DEPRECATE(_type, _orig) _type ck_reserved_##_orig +# define _CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _type (* ck_reserved_##_orig)(_args) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_REPLACE(_orig, _new) \ + union { \ + _new; \ + struct { \ + _orig; \ + } CK_KABI_UNIQUE_ID; \ + __CK_KABI_CHECK_SIZE_ALIGN(_orig, _new); \ + } +#else +# define _CK_KABI_REPLACE(_orig, _new) CK_KABI_BROKEN_REPLACE(_orig, _new) +#endif + +# define _CK_KABI_EXCLUDE(_elem) _elem + +#endif /* __GENKSYMS__ */ + +# define CK_KABI_DEPRECATE(_type, _orig) _CK_KABI_DEPRECATE(_type, _orig); +# define CK_KABI_DEPRECATE_FN(_type, _orig, _args...) \ + _CK_KABI_DEPRECATE_FN(_type, _orig, _args); +# define CK_KABI_REPLACE(_orig, _new) _CK_KABI_REPLACE(_orig, _new); + +#define _CK_KABI_REPLACE1(_new) _new; +#define _CK_KABI_REPLACE2(_new, ...) _new; _CK_KABI_REPLACE1(__VA_ARGS__) +#define _CK_KABI_REPLACE3(_new, ...) _new; _CK_KABI_REPLACE2(__VA_ARGS__) +#define _CK_KABI_REPLACE4(_new, ...) _new; _CK_KABI_REPLACE3(__VA_ARGS__) +#define _CK_KABI_REPLACE5(_new, ...) _new; _CK_KABI_REPLACE4(__VA_ARGS__) +#define _CK_KABI_REPLACE6(_new, ...) _new; _CK_KABI_REPLACE5(__VA_ARGS__) +#define _CK_KABI_REPLACE7(_new, ...) _new; _CK_KABI_REPLACE6(__VA_ARGS__) +#define _CK_KABI_REPLACE8(_new, ...) _new; _CK_KABI_REPLACE7(__VA_ARGS__) +#define _CK_KABI_REPLACE9(_new, ...) _new; _CK_KABI_REPLACE8(__VA_ARGS__) +#define _CK_KABI_REPLACE10(_new, ...) _new; _CK_KABI_REPLACE9(__VA_ARGS__) +#define _CK_KABI_REPLACE11(_new, ...) _new; _CK_KABI_REPLACE10(__VA_ARGS__) +#define _CK_KABI_REPLACE12(_new, ...) _new; _CK_KABI_REPLACE11(__VA_ARGS__) + +#define CK_KABI_REPLACE_SPLIT(_orig, ...) _CK_KABI_REPLACE(_orig, \ + struct { __PASTE(_CK_KABI_REPLACE, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) }); + +# define CK_KABI_RESERVE(n) _CK_KABI_RESERVE(n); + +#define _CK_KABI_USE1(n, _new) _CK_KABI_RESERVE(n), _new +#define _CK_KABI_USE2(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE1(__VA_ARGS__) +#define _CK_KABI_USE3(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE2(__VA_ARGS__) +#define _CK_KABI_USE4(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE3(__VA_ARGS__) +#define _CK_KABI_USE5(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE4(__VA_ARGS__) +#define _CK_KABI_USE6(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE5(__VA_ARGS__) +#define _CK_KABI_USE7(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE6(__VA_ARGS__) +#define _CK_KABI_USE8(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE7(__VA_ARGS__) +#define _CK_KABI_USE9(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE8(__VA_ARGS__) +#define _CK_KABI_USE10(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE9(__VA_ARGS__) +#define _CK_KABI_USE11(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE10(__VA_ARGS__) +#define _CK_KABI_USE12(n, ...) _CK_KABI_RESERVE(n); _CK_KABI_USE11(__VA_ARGS__) + +#define _CK_KABI_USE(...) _CK_KABI_REPLACE(__VA_ARGS__) +#define CK_KABI_USE(n, ...) _CK_KABI_USE(__PASTE(_CK_KABI_USE, COUNT_ARGS(__VA_ARGS__))(n, __VA_ARGS__)); + +# define CK_KABI_USE_SPLIT(n, ...) CK_KABI_REPLACE_SPLIT(_CK_KABI_RESERVE(n), __VA_ARGS__) + +#ifdef CONFIG_CK_KABI_RESERVE +# define _CK_KABI_RESERVE(n) unsigned long ck_reserved##n +#else +# define _CK_KABI_RESERVE(n) +#endif + +#define CK_KABI_EXCLUDE(_elem) _CK_KABI_EXCLUDE(_elem); + +#define CK_KABI_EXTEND_WITH_SIZE(_new, _size) \ + CK_KABI_EXTEND(union { \ + _new; \ + unsigned long CK_KABI_UNIQUE_ID[_size]; \ + __CK_KABI_CHECK_SIZE(_new, 8 * (_size)); \ + }) + +#ifdef CONFIG_CK_KABI_RESERVE +#define _CK_KABI_AUX_PTR(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved *_ck_reserved) +#define CK_KABI_AUX_PTR(_struct) \ + _CK_KABI_AUX_PTR(_struct); + +#define _CK_KABI_AUX_EMBED(_struct) \ + size_t _struct##_size_ck_reserved; \ + _CK_KABI_EXCLUDE(struct _struct##_ck_reserved _ck_reserved) +#define CK_KABI_AUX_EMBED(_struct) \ + _CK_KABI_AUX_EMBED(_struct); + +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) \ + CK_KABI_USE(n1, n2, \ + struct { CK_KABI_AUX_PTR(_struct) }) + +#define CK_KABI_AUX_SET_SIZE(_name, _struct) ({ \ + (_name)->_struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved); \ +}) + +#define CK_KABI_AUX_INIT_SIZE(_struct) \ + ._struct##_size_ck_reserved = sizeof(struct _struct##_ck_reserved), + +#define CK_KABI_AUX(_ptr, _struct, _field) ({ \ + size_t __off = offsetof(struct _struct##_ck_reserved, _field); \ + (_ptr)->_struct##_size_ck_reserved > __off ? true : false; \ +}) +#else +#define CK_KABI_AUX_PTR(_struct) +#define CK_KABI_AUX_EMBED(_struct) +#define CK_KABI_USE_AUX_PTR(n1, n2, _struct) +#define CK_KABI_AUX_SET_SIZE(_name, _struct) +#define CK_KABI_AUX_INIT_SIZE(_struct) +#define CK_KABI_AUX(_ptr, _struct, _field) (false) +#endif /* CONFIG_CK_KABI_RESERVE */ + +#endif /* _LINUX_CK_KABI_H */ diff --git a/include/linux/cma.h b/include/linux/cma.h index 63873b93deaa62634da9ed3fb19543ba0131abdf..326ec54b8efaabed425be610e681887f6ef1bbd0 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -56,4 +56,6 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern void cma_reserve_pages_on_error(struct cma *cma); +extern int __init cma_alloc_areas(unsigned int max_cma_size); +extern void cma_enable_concurrency(struct cma *cma); #endif diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 6b351e009f5976db447e8da82e579713d02f20b5..344f41a3e052d0276c5f9b9aa1cd6f3faee48e1e 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -296,6 +296,7 @@ extern char *d_absolute_path(const struct path *, char *, int); extern char *d_path(const struct path *, char *, int); extern char *dentry_path_raw(const struct dentry *, char *, int); extern char *dentry_path(const struct dentry *, char *, int); +extern char *d_absolute_path_locked(const struct path *, char *, int); /* Allocation counts.. */ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1a253d3abad99402d9e7da148d2ea..0ce2ae6c944d7c7b3035dea42f699e09cb4d178f 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -509,4 +509,21 @@ pci_p2pdma_map_segment(struct pci_p2pdma_map_state *state, struct device *dev, } #endif /* CONFIG_PCI_P2PDMA */ +#if defined CONFIG_PCI && defined CONFIG_X86 + +extern bool is_zhaoxin_kh40000; +extern const struct dma_map_ops kh40000_dma_direct_ops; +void kh40000_set_iommu_dma_ops(struct device *dev); + +#else + +bool __weak is_zhaoxin_kh40000; +static inline void kh40000_set_iommu_dma_ops(struct device *dev) +{ + +} + + +#endif + #endif /* _LINUX_DMA_MAP_OPS_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 4a40823c3c6784f19bc98911cd1557736eafba7a..64eb9a3624634f565e4adc94d7f59f9b9bb16b9e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -482,10 +482,10 @@ struct address_space { pgoff_t writeback_index; const struct address_space_operations *a_ops; unsigned long flags; - struct rw_semaphore i_mmap_rwsem; errseq_t wb_err; spinlock_t private_lock; struct list_head private_list; + struct rw_semaphore i_mmap_rwsem; void *private_data; } __attribute__((aligned(sizeof(long)))) __randomize_layout; /* diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 6583a58670c571050ad410e0dcf0718b6477292b..7dbe56452544670e95b90262c658865f045380c1 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -58,6 +58,11 @@ typedef unsigned int __bitwise gfp_t; #else #define ___GFP_NOLOCKDEP 0 #endif +#ifdef CONFIG_KFENCE +#define ___GFP_NOKFENCE 0x8000000u +#else +#define ___GFP_NOKFENCE 0 +#endif /* If the above are modified, __GFP_BITS_SHIFT may need updating */ /* @@ -99,12 +104,15 @@ typedef unsigned int __bitwise gfp_t; * node with no fallbacks or placement policy enforcements. * * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg. + * + * %__GFP_NOKFENCE informs DO NOT try to alloc page from kfence pool. */ #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) #define __GFP_HARDWALL ((__force gfp_t)___GFP_HARDWALL) #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE) #define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT) +#define __GFP_NOKFENCE ((__force gfp_t)___GFP_NOKFENCE) /** * DOC: Watermark modifiers @@ -249,7 +257,7 @@ typedef unsigned int __bitwise gfp_t; #define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP) /* Room for N __GFP_FOO bits */ -#define __GFP_BITS_SHIFT (26 + IS_ENABLED(CONFIG_LOCKDEP)) +#define __GFP_BITS_SHIFT (28) #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1)) /** diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 75607d4ba26cb7b75802522445aba6cbb8d2b681..bf4ea9f2f45716ebba8c60b2a4b7c4933a30d954 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -371,19 +371,23 @@ static inline int copy_mc_highpage(struct page *to, struct page *from) return ret; } #else +#ifndef __HAVE_ARCH_COPY_MC_USER_HIGHPAGE static inline int copy_mc_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { copy_user_highpage(to, from, vaddr, vma); return 0; } +#endif +#ifndef __HAVE_ARCH_COPY_MC_HIGHPAGE static inline int copy_mc_highpage(struct page *to, struct page *from) { copy_highpage(to, from); return 0; } #endif +#endif static inline void memcpy_page(struct page *dst_page, size_t dst_off, struct page *src_page, size_t src_off, diff --git a/include/linux/irqchip/arm-gic-phytium-2500.h b/include/linux/irqchip/arm-gic-phytium-2500.h new file mode 100644 index 0000000000000000000000000000000000000000..f212a29390bf654941489686151e7b1e08ff2bf2 --- /dev/null +++ b/include/linux/irqchip/arm-gic-phytium-2500.h @@ -0,0 +1,661 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H +#define __LINUX_IRQCHIP_ARM_GIC_PHYTIUM_2500_H + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_TYPER2 0x000C +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_IGROUPR 0x0080 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IGRPMODR 0x0D00 +#define GICD_NSACR 0x0E00 +#define GICD_IGROUPRnE 0x1000 +#define GICD_ISENABLERnE 0x1200 +#define GICD_ICENABLERnE 0x1400 +#define GICD_ISPENDRnE 0x1600 +#define GICD_ICPENDRnE 0x1800 +#define GICD_ISACTIVERnE 0x1A00 +#define GICD_ICACTIVERnE 0x1C00 +#define GICD_IPRIORITYRnE 0x2000 +#define GICD_ICFGRnE 0x3000 +#define GICD_IROUTER 0x6000 +#define GICD_IROUTERnE 0x8000 +#define GICD_IDREGS 0xFFD0 +#define GICD_PIDR2 0xFFE8 + +#define ESPI_BASE_INTID 4096 + +/* + * Those registers are actually from GICv2, but the spec demands that they + * are implemented as RES0 if ARE is 1 (which we do in KVM's emulated GICv3). + */ +#define GICD_ITARGETSR 0x0800 +#define GICD_SGIR 0x0F00 +#define GICD_CPENDSGIR 0x0F10 +#define GICD_SPENDSGIR 0x0F20 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_nASSGIreq (1U << 8) +#define GICD_CTLR_DS (1U << 6) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IIDR_IMPLEMENTER_SHIFT 0 +#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT) +#define GICD_IIDR_REVISION_SHIFT 12 +#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT) +#define GICD_IIDR_VARIANT_SHIFT 16 +#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT) +#define GICD_IIDR_PRODUCT_ID_SHIFT 24 +#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT) + + +/* + * In systems with a single security state (what we emulate in KVM) + * the meaning of the interrupt group enable bits is slightly different + */ +#define GICD_CTLR_ENABLE_SS_G1 (1U << 1) +#define GICD_CTLR_ENABLE_SS_G0 (1U << 0) + +#define GICD_TYPER_RSS (1U << 26) +#define GICD_TYPER_LPIS (1U << 17) +#define GICD_TYPER_MBIS (1U << 16) +#define GICD_TYPER_ESPI (1U << 8) + +#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) +#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1) +#define GICD_TYPER_SPIS(typer) ((((typer) & 0x1f) + 1) * 32) +#define GICD_TYPER_ESPIS(typer) \ + (((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0) + +#define GICD_TYPER2_nASSGIcap (1U << 8) +#define GICD_TYPER2_VIL (1U << 7) +#define GICD_TYPER2_VID GENMASK(4, 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +#define GIC_V3_DIST_SIZE 0x10000 + +#define GIC_PAGE_SIZE_4K 0ULL +#define GIC_PAGE_SIZE_16K 1ULL +#define GIC_PAGE_SIZE_64K 2ULL +#define GIC_PAGE_SIZE_MASK 3ULL + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_IDREGS GICD_IDREGS +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_CTLR_ENABLE_LPIS (1UL << 0) +#define GICR_CTLR_CES (1UL << 1) +#define GICR_CTLR_IR (1UL << 2) +#define GICR_CTLR_RWP (1UL << 3) + +#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) + +#define EPPI_BASE_INTID 1056 + +#define GICR_TYPER_NR_PPIS(r) \ + ({ \ + unsigned int __ppinum = ((r) >> 27) & 0x1f; \ + unsigned int __nr_ppis = 16; \ + if (__ppinum == 1 || __ppinum == 2) \ + __nr_ppis += __ppinum * 32; \ + \ + __nr_ppis; \ + }) + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +#define GIC_BASER_CACHE_nCnB 0ULL +#define GIC_BASER_CACHE_SameAsInner 0ULL +#define GIC_BASER_CACHE_nC 1ULL +#define GIC_BASER_CACHE_RaWt 2ULL +#define GIC_BASER_CACHE_RaWb 3ULL +#define GIC_BASER_CACHE_WaWt 4ULL +#define GIC_BASER_CACHE_WaWb 5ULL +#define GIC_BASER_CACHE_RaWaWt 6ULL +#define GIC_BASER_CACHE_RaWaWb 7ULL +#define GIC_BASER_CACHE_MASK 7ULL +#define GIC_BASER_NonShareable 0ULL +#define GIC_BASER_InnerShareable 1ULL +#define GIC_BASER_OuterShareable 2ULL +#define GIC_BASER_SHAREABILITY_MASK 3ULL + +#define GIC_BASER_CACHEABILITY(reg, inner_outer, type) \ + (GIC_BASER_CACHE_##type << reg##_##inner_outer##_CACHEABILITY_SHIFT) + +#define GIC_BASER_SHAREABILITY(reg, type) \ + (GIC_BASER_##type << reg##_SHAREABILITY_SHIFT) + +/* encode a size field of width @w containing @n - 1 units */ +#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0)) + +#define GICR_PROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, SHAREABILITY_MASK) +#define GICR_PROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, MASK) +#define GICR_PROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PROPBASER, OUTER, MASK) +#define GICR_PROPBASER_CACHEABILITY_MASK GICR_PROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_PROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PROPBASER, InnerShareable) + +#define GICR_PROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nCnB) +#define GICR_PROPBASER_nC GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, nC) +#define GICR_PROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWt) +#define GICR_PROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWb) +#define GICR_PROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWt) +#define GICR_PROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, WaWb) +#define GICR_PROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWt) +#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb) + +#define GICR_PROPBASER_IDBITS_MASK (0x1f) +#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12)) +#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16)) + +#define GICR_PENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_PENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_PENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, SHAREABILITY_MASK) +#define GICR_PENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, MASK) +#define GICR_PENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_PENDBASER, OUTER, MASK) +#define GICR_PENDBASER_CACHEABILITY_MASK GICR_PENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_PENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_PENDBASER, InnerShareable) + +#define GICR_PENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nCnB) +#define GICR_PENDBASER_nC GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, nC) +#define GICR_PENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWt) +#define GICR_PENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWb) +#define GICR_PENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWt) +#define GICR_PENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, WaWb) +#define GICR_PENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWt) +#define GICR_PENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PENDBASER, INNER, RaWaWb) + +#define GICR_PENDBASER_PTZ BIT_ULL(62) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_IGROUPR0 GICD_IGROUPR +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR +#define GICR_IGRPMODR0 GICD_IGRPMODR +#define GICR_NSACR GICD_NSACR + +#define GICR_TYPER_PLPIS (1U << 0) +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_DIRTY (1U << 2) +#define GICR_TYPER_DirectLPIS (1U << 3) +#define GICR_TYPER_LAST (1U << 4) +#define GICR_TYPER_RVPEID (1U << 7) +#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24) +#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32) + +#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0) +#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32) +#define GICR_INVLPIR_V GENMASK_ULL(63, 63) + +#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID +#define GICR_INVALLR_V GICR_INVLPIR_V + +#define GIC_V3_REDIST_SIZE 0x20000 + +#define LPI_PROP_GROUP1 (1 << 1) +#define LPI_PROP_ENABLED (1 << 0) + +/* + * Re-Distributor registers, offsets from VLPI_base + */ +#define GICR_VPROPBASER 0x0070 + +#define GICR_VPROPBASER_IDBITS_MASK 0x1f + +#define GICR_VPROPBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPROPBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_SHIFT (56) + +#define GICR_VPROPBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, SHAREABILITY_MASK) +#define GICR_VPROPBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, MASK) +#define GICR_VPROPBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPROPBASER, OUTER, MASK) +#define GICR_VPROPBASER_CACHEABILITY_MASK \ + GICR_VPROPBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPROPBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPROPBASER, InnerShareable) + +#define GICR_VPROPBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nCnB) +#define GICR_VPROPBASER_nC GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, nC) +#define GICR_VPROPBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWt) +#define GICR_VPROPBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWb) +#define GICR_VPROPBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWt) +#define GICR_VPROPBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, WaWb) +#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt) +#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb) + +/* + * GICv4.1 VPROPBASER reinvention. A subtle mix between the old + * VPROPBASER and ITS_BASER. Just not quite any of the two. + */ +#define GICR_VPROPBASER_4_1_VALID (1ULL << 63) +#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59) +#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55) +#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53) +#define GICR_VPROPBASER_4_1_Z (1ULL << 52) +#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12) +#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0) + +#define GICR_VPENDBASER 0x0078 + +#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10) +#define GICR_VPENDBASER_INNER_CACHEABILITY_SHIFT (7) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_SHIFT (56) +#define GICR_VPENDBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, SHAREABILITY_MASK) +#define GICR_VPENDBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, MASK) +#define GICR_VPENDBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GICR_VPENDBASER, OUTER, MASK) +#define GICR_VPENDBASER_CACHEABILITY_MASK \ + GICR_VPENDBASER_INNER_CACHEABILITY_MASK + +#define GICR_VPENDBASER_NonShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable) + +#define GICR_VPENDBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable) + +#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB) +#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC) +#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt) +#define GICR_VPENDBASER_RaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWb) +#define GICR_VPENDBASER_WaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWt) +#define GICR_VPENDBASER_WaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, WaWb) +#define GICR_VPENDBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWt) +#define GICR_VPENDBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWaWb) + +#define GICR_VPENDBASER_Dirty (1ULL << 60) +#define GICR_VPENDBASER_PendingLast (1ULL << 61) +#define GICR_VPENDBASER_IDAI (1ULL << 62) +#define GICR_VPENDBASER_Valid (1ULL << 63) + +/* + * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields, + * also use the above Valid, PendingLast and Dirty. + */ +#define GICR_VPENDBASER_4_1_DB (1ULL << 62) +#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59) +#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58) +#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0) + +#define GICR_VSGIR 0x0080 + +#define GICR_VSGIR_VPEID GENMASK(15, 0) + +#define GICR_VSGIPENDR 0x0088 + +#define GICR_VSGIPENDR_BUSY (1U << 31) +#define GICR_VSGIPENDR_PENDING GENMASK(15, 0) + +/* + * ITS registers, offsets from ITS_base + */ +#define GITS_CTLR 0x0000 +#define GITS_IIDR 0x0004 +#define GITS_TYPER 0x0008 +#define GITS_MPIDR 0x0018 +#define GITS_CBASER 0x0080 +#define GITS_CWRITER 0x0088 +#define GITS_CREADR 0x0090 +#define GITS_BASER 0x0100 +#define GITS_IDREGS_BASE 0xffd0 +#define GITS_PIDR0 0xffe0 +#define GITS_PIDR1 0xffe4 +#define GITS_PIDR2 GICR_PIDR2 +#define GITS_PIDR4 0xffd0 +#define GITS_CIDR0 0xfff0 +#define GITS_CIDR1 0xfff4 +#define GITS_CIDR2 0xfff8 +#define GITS_CIDR3 0xfffc + +#define GITS_TRANSLATER 0x10040 + +#define GITS_SGIR 0x20020 + +#define GITS_SGIR_VPEID GENMASK_ULL(47, 32) +#define GITS_SGIR_VINTID GENMASK_ULL(3, 0) + +#define GITS_CTLR_ENABLE (1U << 0) +#define GITS_CTLR_ImDe (1U << 1) +#define GITS_CTLR_ITS_NUMBER_SHIFT 4 +#define GITS_CTLR_ITS_NUMBER (0xFU << GITS_CTLR_ITS_NUMBER_SHIFT) +#define GITS_CTLR_QUIESCENT (1U << 31) + +#define GITS_TYPER_PLPIS (1UL << 0) +#define GITS_TYPER_VLPIS (1UL << 1) +#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 +#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4) +#define GITS_TYPER_IDBITS_SHIFT 8 +#define GITS_TYPER_DEVBITS_SHIFT 13 +#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13) +#define GITS_TYPER_PTA (1UL << 19) +#define GITS_TYPER_HCC_SHIFT 24 +#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff) +#define GITS_TYPER_VMOVP (1ULL << 37) +#define GITS_TYPER_VMAPP (1ULL << 40) +#define GITS_TYPER_SVPET GENMASK_ULL(42, 41) + +#define GITS_IIDR_REV_SHIFT 12 +#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT) +#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf) +#define GITS_IIDR_PRODUCTID_SHIFT 24 + +#define GITS_CBASER_VALID (1ULL << 63) +#define GITS_CBASER_SHAREABILITY_SHIFT (10) +#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_CBASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_CBASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_CBASER, SHAREABILITY_MASK) +#define GITS_CBASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, MASK) +#define GITS_CBASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_CBASER, OUTER, MASK) +#define GITS_CBASER_CACHEABILITY_MASK GITS_CBASER_INNER_CACHEABILITY_MASK + +#define GITS_CBASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_CBASER, InnerShareable) + +#define GITS_CBASER_nCnB GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nCnB) +#define GITS_CBASER_nC GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, nC) +#define GITS_CBASER_RaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWt) +#define GITS_CBASER_RaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWb) +#define GITS_CBASER_WaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWt) +#define GITS_CBASER_WaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, WaWb) +#define GITS_CBASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWt) +#define GITS_CBASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_CBASER, INNER, RaWaWb) + +#define GITS_CBASER_ADDRESS(cbaser) ((cbaser) & GENMASK_ULL(51, 12)) + +#define GITS_BASER_NR_REGS 8 + +#define GITS_BASER_VALID (1ULL << 63) +#define GITS_BASER_INDIRECT (1ULL << 62) + +#define GITS_BASER_INNER_CACHEABILITY_SHIFT (59) +#define GITS_BASER_OUTER_CACHEABILITY_SHIFT (53) +#define GITS_BASER_INNER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, INNER, MASK) +#define GITS_BASER_CACHEABILITY_MASK GITS_BASER_INNER_CACHEABILITY_MASK +#define GITS_BASER_OUTER_CACHEABILITY_MASK \ + GIC_BASER_CACHEABILITY(GITS_BASER, OUTER, MASK) +#define GITS_BASER_SHAREABILITY_MASK \ + GIC_BASER_SHAREABILITY(GITS_BASER, SHAREABILITY_MASK) + +#define GITS_BASER_nCnB GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nCnB) +#define GITS_BASER_nC GIC_BASER_CACHEABILITY(GITS_BASER, INNER, nC) +#define GITS_BASER_RaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWt) +#define GITS_BASER_RaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWb) +#define GITS_BASER_WaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWt) +#define GITS_BASER_WaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, WaWb) +#define GITS_BASER_RaWaWt GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWt) +#define GITS_BASER_RaWaWb GIC_BASER_CACHEABILITY(GITS_BASER, INNER, RaWaWb) + +#define GITS_BASER_TYPE_SHIFT (56) +#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) +#define GITS_BASER_ENTRY_SIZE_SHIFT (48) +#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) +#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) +#define GITS_BASER_PHYS_52_to_48(phys) \ + (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) +#define GITS_BASER_ADDR_48_to_52(baser) \ + (((baser) & GENMASK_ULL(47, 16)) | (((baser) >> 12) & 0xf) << 48) + +#define GITS_BASER_SHAREABILITY_SHIFT (10) +#define GITS_BASER_InnerShareable \ + GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) +#define GITS_BASER_PAGE_SIZE_SHIFT (8) +#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT) +#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K) +#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K) +#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K) +#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK) +#define GITS_BASER_PAGES_MAX 256 +#define GITS_BASER_PAGES_SHIFT (0) +#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1) + +#define GITS_BASER_TYPE_NONE 0 +#define GITS_BASER_TYPE_DEVICE 1 +#define GITS_BASER_TYPE_VCPU 2 +#define GITS_BASER_TYPE_RESERVED3 3 +#define GITS_BASER_TYPE_COLLECTION 4 +#define GITS_BASER_TYPE_RESERVED5 5 +#define GITS_BASER_TYPE_RESERVED6 6 +#define GITS_BASER_TYPE_RESERVED7 7 + +#define GITS_LVL1_ENTRY_SIZE (8UL) + +/* + * ITS commands + */ +#define GITS_CMD_MAPD 0x08 +#define GITS_CMD_MAPC 0x09 +#define GITS_CMD_MAPTI 0x0a +#define GITS_CMD_MAPI 0x0b +#define GITS_CMD_MOVI 0x01 +#define GITS_CMD_DISCARD 0x0f +#define GITS_CMD_INV 0x0c +#define GITS_CMD_MOVALL 0x0e +#define GITS_CMD_INVALL 0x0d +#define GITS_CMD_INT 0x03 +#define GITS_CMD_CLEAR 0x04 +#define GITS_CMD_SYNC 0x05 + +/* + * GICv4 ITS specific commands + */ +#define GITS_CMD_GICv4(x) ((x) | 0x20) +#define GITS_CMD_VINVALL GITS_CMD_GICv4(GITS_CMD_INVALL) +#define GITS_CMD_VMAPP GITS_CMD_GICv4(GITS_CMD_MAPC) +#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI) +#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI) +#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC) +/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */ +#define GITS_CMD_VMOVP GITS_CMD_GICv4(2) +#define GITS_CMD_VSGI GITS_CMD_GICv4(3) +#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe) + +/* + * ITS error numbers + */ +#define E_ITS_MOVI_UNMAPPED_INTERRUPT 0x010107 +#define E_ITS_MOVI_UNMAPPED_COLLECTION 0x010109 +#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307 +#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507 +#define E_ITS_MAPD_DEVICE_OOR 0x010801 +#define E_ITS_MAPD_ITTSIZE_OOR 0x010802 +#define E_ITS_MAPC_PROCNUM_OOR 0x010902 +#define E_ITS_MAPC_COLLECTION_OOR 0x010903 +#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04 +#define E_ITS_MAPTI_ID_OOR 0x010a05 +#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06 +#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07 +#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09 +#define E_ITS_MOVALL_PROCNUM_OOR 0x010e01 +#define E_ITS_DISCARD_UNMAPPED_INTERRUPT 0x010f07 + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_SHIFT (1) +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_drop (1U << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT) +#define ICC_CTLR_EL1_CBPR_SHIFT 0 +#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT) +#define ICC_CTLR_EL1_PMHE_SHIFT 6 +#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT) +#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8 +#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT) +#define ICC_CTLR_EL1_ID_BITS_SHIFT 11 +#define ICC_CTLR_EL1_ID_BITS_MASK (0x7 << ICC_CTLR_EL1_ID_BITS_SHIFT) +#define ICC_CTLR_EL1_SEIS_SHIFT 14 +#define ICC_CTLR_EL1_SEIS_MASK (0x1 << ICC_CTLR_EL1_SEIS_SHIFT) +#define ICC_CTLR_EL1_A3V_SHIFT 15 +#define ICC_CTLR_EL1_A3V_MASK (0x1 << ICC_CTLR_EL1_A3V_SHIFT) +#define ICC_CTLR_EL1_RSS (0x1 << 18) +#define ICC_CTLR_EL1_ExtRange (0x1 << 19) +#define ICC_PMR_EL1_SHIFT 0 +#define ICC_PMR_EL1_MASK (0xff << ICC_PMR_EL1_SHIFT) +#define ICC_BPR0_EL1_SHIFT 0 +#define ICC_BPR0_EL1_MASK (0x7 << ICC_BPR0_EL1_SHIFT) +#define ICC_BPR1_EL1_SHIFT 0 +#define ICC_BPR1_EL1_MASK (0x7 << ICC_BPR1_EL1_SHIFT) +#define ICC_IGRPEN0_EL1_SHIFT 0 +#define ICC_IGRPEN0_EL1_MASK (1 << ICC_IGRPEN0_EL1_SHIFT) +#define ICC_IGRPEN1_EL1_SHIFT 0 +#define ICC_IGRPEN1_EL1_MASK (1 << ICC_IGRPEN1_EL1_SHIFT) +#define ICC_SRE_EL1_DIB (1U << 2) +#define ICC_SRE_EL1_DFB (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* These are for GICv2 emulation only */ +#define GICH_LR_VIRTUALID (0x3ffUL << 0) +#define GICH_LR_PHYSID_CPUID_SHIFT (10) +#define GICH_LR_PHYSID_CPUID (7UL << GICH_LR_PHYSID_CPUID_SHIFT) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +#define ICC_SGI1R_TARGET_LIST_SHIFT 0 +#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) +#define ICC_SGI1R_AFFINITY_1_SHIFT 16 +#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) +#define ICC_SGI1R_SGI_ID_SHIFT 24 +#define ICC_SGI1R_SGI_ID_MASK (0xfULL << ICC_SGI1R_SGI_ID_SHIFT) +#define ICC_SGI1R_AFFINITY_2_SHIFT 32 +#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_2_SHIFT) +#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 +#define ICC_SGI1R_RS_SHIFT 44 +#define ICC_SGI1R_RS_MASK (0xfULL << ICC_SGI1R_RS_SHIFT) +#define ICC_SGI1R_AFFINITY_3_SHIFT 48 +#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_3_SHIFT) + +#include + +#ifndef __ASSEMBLY__ + +/* + * We need a value to serve as a irq-type for LPIs. Choose one that will + * hopefully pique the interest of the reviewer. + */ +#define GIC_IRQ_TYPE_LPI 0xa110c8ed + +struct rdists { + struct { + raw_spinlock_t rd_lock; + void __iomem *rd_base; + struct page *pend_page; + phys_addr_t phys_base; + u64 flags; + cpumask_t *vpe_table_mask; + void *vpe_l1_base; + } __percpu *rdist; + phys_addr_t prop_table_pa; + void *prop_table_va; + u64 flags; + u32 gicd_typer; + u32 gicd_typer2; + int cpuhp_memreserve_state; + bool has_vlpis; + bool has_rvpeid; + bool has_direct_lpi; + bool has_vpend_valid_dirty; +}; + +struct irq_domain; +struct fwnode_handle; +int __init its_lpi_memreserve_init(void); +int phytium_its_cpu_init(void); +int phytium_its_init(struct fwnode_handle *handle, struct rdists *rdists, + struct irq_domain *domain); +int mbi_init(struct fwnode_handle *fwnode, struct irq_domain *parent); + +static inline bool gic_enable_sre(void) +{ + u32 val; + + val = gic_read_sre(); + if (val & ICC_SRE_EL1_SRE) + return true; + + val |= ICC_SRE_EL1_SRE; + gic_write_sre(val); + val = gic_read_sre(); + + return !!(val & ICC_SRE_EL1_SRE); +} + +#endif + +#endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 52772c826c8682086a15d4dced31d34cc5ceeb04..88c59cd882923cf0a2d790a6a2738c324ed4f3f0 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -497,7 +497,10 @@ struct jbd2_journal_handle unsigned int h_type: 8; unsigned int h_line_no: 16; + unsigned long h_pre_start_jiffies; unsigned long h_start_jiffies; + u64 h_sched_wait_sum; + u64 h_io_wait_sum; unsigned int h_requested_credits; unsigned int saved_alloc_context; @@ -706,6 +709,9 @@ struct transaction_s * structures associated with the transaction */ struct list_head t_private_list; + + /* When this transaction is locked */ + unsigned long t_locked_time; }; struct transaction_run_stats_s { @@ -844,6 +850,16 @@ struct journal_s */ wait_queue_head_t j_wait_commit; + /** + * @j_wait_done_checkpoint: Wait queue for waiting for checkpoint to complete. + */ + wait_queue_head_t j_wait_done_checkpoint; + + /** + * @j_wait_checkpoint: Wait queue to trigger checkpointing. + */ + wait_queue_head_t j_wait_checkpoint; + /** * @j_wait_updates: Wait queue to wait for updates to complete. */ @@ -1200,6 +1216,13 @@ struct journal_s int (*j_finish_inode_data_buffers) (struct jbd2_inode *); + /** + * @j_checkpoint_task: + * + * Pointer to the current checkpoint thread for this journal. + */ + struct task_struct *j_checkpoint_task; + /* * Journal statistics */ @@ -1219,6 +1242,20 @@ struct journal_s */ struct transaction_stats_s j_stats; + /** + * @j_force_copy: if not zero, force to do buffer copy-out. + */ + unsigned int j_force_copy; + + /** + * @j_stall_thresh: when transaction is locked and there are still + * outstanding handles, such handles will prevent transaction + * committing, trace these handles if they have stalled the transaction + * for @j_stall_thresh time, unit is millisecond, default 100ms. + */ +#define JBD2_DEFAULT_TRANS_STALL_THRESH 100 + unsigned long j_stall_thresh; + /** * @j_failed_commit: Failed journal commit ID. */ diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index 9935f7ecbfb9e31a68a6c6d0748aea49e0171344..9e86ee77d335490e7ab3021cdd0ec72b8065a5fd 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -30,6 +30,11 @@ enum cpu_usage_stat { CPUTIME_GUEST_NICE, #ifdef CONFIG_SCHED_CORE CPUTIME_FORCEIDLE, + CPUTIME_FORCEIDLE_TASK, +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + CPUTIME_SIBIDLE, + CPUTIME_SIBIDLE_TASK, #endif NR_STATS, }; @@ -130,8 +135,8 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_idle_ticks(unsigned long ticks); -#ifdef CONFIG_SCHED_CORE -extern void __account_forceidle_time(struct task_struct *tsk, u64 delta); +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) +extern void __account_sibidle_time(struct task_struct *tsk, u64 delta, u64 delta_task, bool fi); #endif #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 401af475751413187fe047f5a5cb69aae11b0be0..6771c6eea7206c0e9f56debb1d5097c9b34d1db6 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -16,19 +16,45 @@ #include #include +#include +#include -extern unsigned long kfence_sample_interval; +extern long kfence_sample_interval; -/* - * We allocate an even number of pages, as it simplifies calculations to map - * address to metadata indices; effectively, the very first page serves as an - * extended guard page, but otherwise has no special purpose. - */ -#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE) -extern char *__kfence_pool; +struct kfence_pool_area { + struct rb_node rb_node; /* binary tree linked to root */ + struct kfence_metadata *meta; /* metadata per area */ + char *addr; /* start kfence pool address */ + unsigned long pool_size; /* size of kfence pool of this area */ + unsigned long nr_objects; /* max object number of this area, 0 marked as zombie area */ + int node; /* the numa node (freelist) this area belongs to, likely from phy mem node */ + atomic_t _ref; /* count kpa ref, to protect kpa itself */ + struct list_head list; /* ready to be added to kfence_pool_root */ + struct percpu_ref refcnt; /* count in-use objects, to protect pool, meta, etc... */ + struct work_struct work; /* use workqueue to free unused area */ +}; DECLARE_STATIC_KEY_FALSE(kfence_allocation_key); +DECLARE_STATIC_KEY_FALSE(kfence_skip_interval); extern atomic_t kfence_allocation_gate; +extern unsigned long kfence_num_objects; +extern char *__kfence_pool_early_init; + +/** + * is_kfence_address_area() - check if an address belongs to KFENCE pool in given area + * @addr: address to check + * @kpa: area to check + * + * Return: true or false depending on whether the address is within the KFENCE + * object range in given area. + * + * This function is used when you already know the nearest leftside area. + */ +static __always_inline bool is_kfence_address_area(const void *addr, + const struct kfence_pool_area *kpa) +{ + return unlikely(kpa && (unsigned long)((char *)addr - kpa->addr) < kpa->pool_size); +} /** * is_kfence_address() - check if an address belongs to KFENCE pool @@ -50,12 +76,17 @@ extern atomic_t kfence_allocation_gate; */ static __always_inline bool is_kfence_address(const void *addr) { +#if defined(CONFIG_KASAN) || defined(CONFIG_DEBUG_KMEMLEAK) /* - * The __kfence_pool != NULL check is required to deal with the case - * where __kfence_pool == NULL && addr < KFENCE_POOL_SIZE. Keep it in - * the slow-path after the range-check! + * KASAN functions such as kasan_record_aux_stack(), + * kasan_poison_shadow(), or kasan_unpoison_shadow() + * may give an invalid kaddr (direct mapping kernel address). + * We must add a check here. */ - return unlikely((unsigned long)((char *)addr - __kfence_pool) < KFENCE_POOL_SIZE && __kfence_pool); + return virt_addr_valid(addr) && PageKfence(virt_to_page(addr)); +#else + return PageKfence(virt_to_page(addr)); +#endif } /** @@ -72,6 +103,17 @@ void __init kfence_alloc_pool_and_metadata(void); */ void __init kfence_init(void); +/** + * update_kfence_booting_max() - analyse the max num_objects from cmdline + * + * Read the config from boot cmdline and limit kfence pool size. + * This function is called by kfence itself (e.g., kfence_alloc_pool()), or, + * by specific arch alloc (e.g., arm64_kfence_alloc_pool()). + * + * Return: 1 if kfence_num_objects is changed, otherwise 0. + */ +int __init update_kfence_booting_max(void); + /** * kfence_shutdown_cache() - handle shutdown_cache() for KFENCE objects * @s: cache being shut down @@ -97,7 +139,8 @@ void kfence_shutdown_cache(struct kmem_cache *s); * Allocate a KFENCE object. Allocators must not call this function directly, * use kfence_alloc() instead. */ -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags); +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node); +struct page *__kfence_alloc_page(int node, gfp_t flags); /** * kfence_alloc() - allocate a KFENCE object with a low probability @@ -124,9 +167,73 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp if (!static_branch_likely(&kfence_allocation_key)) return NULL; #endif - if (likely(atomic_read(&kfence_allocation_gate))) + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) return NULL; - return __kfence_alloc(s, size, flags); + return __kfence_alloc(s, size, flags, NUMA_NO_NODE); +} + +/** + * kfence_alloc_node() - allocate a KFENCE object with a low probability + * @s: struct kmem_cache with object requirements + * @size: exact size of the object to allocate (can be less than @s->size + * e.g. for kmalloc caches) + * @flags: GFP flags + * @node: alloc from kfence pool on which node + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE object. + * + * kfence_alloc_node() should be inserted into the heap allocation fast path, + * allowing it to transparently return KFENCE-allocated objects with a low + * probability using a static branch (the probability is controlled by the + * kfence.sample_interval boot parameter). + */ +static __always_inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, + int node) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + return __kfence_alloc(s, size, flags, node); +} + +/** + * kfence_alloc_page() - allocate a KFENCE page with a low probability + * @node: preferred nid + * @flags: GFP flags + * + * Return: + * * NULL - must proceed with allocating as usual, + * * non-NULL - pointer to a KFENCE page. + * + * the order-0 page version of kfence_alloc(). + */ +static __always_inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ +#if defined(CONFIG_KFENCE_STATIC_KEYS) || CONFIG_KFENCE_SAMPLE_INTERVAL == 0 + if (!static_branch_unlikely(&kfence_allocation_key)) + return NULL; +#else + if (!static_branch_likely(&kfence_allocation_key)) + return NULL; +#endif + if (order) + return NULL; + + if (!static_branch_likely(&kfence_skip_interval) && + likely(atomic_read(&kfence_allocation_gate))) + return NULL; + + return __kfence_alloc_page(node, flags); } /** @@ -166,6 +273,7 @@ void *kfence_object_start(const void *addr); * Release a KFENCE object and mark it as freed. */ void __kfence_free(void *addr); +void __kfence_free_page(struct page *page, void *addr); /** * kfence_free() - try to release an arbitrary heap object to KFENCE pool @@ -188,6 +296,30 @@ static __always_inline __must_check bool kfence_free(void *addr) return true; } +/** + * kfence_free_page() - try to release a page to KFENCE pool + * @page: page to be freed + * + * Return: + * * false - page doesn't belong to KFENCE pool and was ignored, + * * true - page was released to KFENCE pool. + * + * Release a KFENCE page and mark it as freed. May be called on any page, + * even non-KFENCE page. The allocator must check the return value to + * determine if it was a KFENCE object or not. + */ +static __always_inline __must_check bool kfence_free_page(struct page *page) +{ + void *addr; + + if (!PageKfence(page)) + return false; + + addr = page_to_virt(page); + __kfence_free_page(page, addr); + return true; +} + /** * kfence_handle_page_fault() - perform page fault handling for KFENCE pages * @addr: faulting address @@ -228,10 +360,19 @@ static inline void kfence_alloc_pool_and_metadata(void) { } static inline void kfence_init(void) { } static inline void kfence_shutdown_cache(struct kmem_cache *s) { } static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; } +static inline void *kfence_alloc_node(struct kmem_cache *s, size_t size, gfp_t flags, int node) +{ + return NULL; +} +static inline struct page *kfence_alloc_page(unsigned int order, int node, gfp_t flags) +{ + return NULL; +} static inline size_t kfence_ksize(const void *addr) { return 0; } static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void __kfence_free(void *addr) { } static inline bool __must_check kfence_free(void *addr) { return false; } +static inline bool __must_check kfence_free_page(struct page *page) { return false; } static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdcad69f81cd38edf52dce7dc3d7a4e9..d027f8fd23bfb36f8f09104c9b15869c1f995e79 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1766,6 +1766,9 @@ static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) enum kvm_stat_kind { KVM_STAT_VM, KVM_STAT_VCPU, +#ifdef CONFIG_SW64 + KVM_STAT_DFX_SW64, /* Detail For vcpu stat EXtension */ +#endif }; struct kvm_stat_data { @@ -1895,6 +1898,21 @@ struct _kvm_stats_desc { HALT_POLL_HIST_COUNT), \ STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking) +#ifdef CONFIG_SW64 +enum dfx_sw64_stat_kind { + DFX_SW64_STAT_U64, + DFX_SW64_STAT_CPUTIME, +}; + +/* Detail For vcpu stat EXtension debugfs item */ +struct dfx_sw64_kvm_stats_debugfs_item { + const char *name; + int offset; + enum dfx_sw64_stat_kind dfx_kind; + struct dentry *dentry; +}; +extern struct dfx_sw64_kvm_stats_debugfs_item dfx_sw64_debugfs_entries[]; +#endif extern struct dentry *kvm_debugfs_dir; ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header, diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e4e24da16d2c398493e197cfb97f25be9940e3d0..e8498d088bb25a37fccdd95f82a8aa3729da5e48 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1159,6 +1159,18 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, gfp_t gfp_mask, unsigned long *total_scanned); +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext); + +#ifdef CONFIG_RICH_CONTAINER +struct mem_cgroup *rich_container_get_memcg(void); +#else +static inline struct mem_cgroup *rich_container_get_memcg(void) +{ + return NULL; +} +#endif + #else /* CONFIG_MEMCG */ #define MEM_CGROUP_ID_SHIFT 0 @@ -1580,6 +1592,12 @@ void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { } +static inline void +memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ +} + static inline void split_page_memcg(struct page *head, unsigned int nr) { } diff --git a/include/linux/mm.h b/include/linux/mm.h index bf5d0b1b16f4341e8851d9bdfef30df631f62226..6193311fae5b22fcbdaa900052b21cde5f2f84fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3461,6 +3461,10 @@ int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr, int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *); int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr, struct page **pages, unsigned long *num); +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num); int vm_map_pages(struct vm_area_struct *vma, struct page **pages, unsigned long num); int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index 962cd41a2cb5aff1dbf16a90f2573d81f064444c..c401b4e975cc05659cddc535af9738e662673a3b 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -276,7 +276,7 @@ struct kparam_array read-only sections (which is part of respective UNIX ABI on these platforms). So 'const' makes no sense and even causes compile failures with some compilers. */ -#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) +#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64) || defined(CONFIG_SW64) #define __moduleparam_const #else #define __moduleparam_const const diff --git a/include/linux/nmi.h b/include/linux/nmi.h index e92e378df000fb1eca1e082ebc889fe7849a19d2..43dd3a79fdf21ec7143614c30c4bb49a2183273e 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -235,4 +235,18 @@ static inline void nmi_backtrace_stall_snap(const struct cpumask *btp) {} static inline void nmi_backtrace_stall_check(const struct cpumask *btp) {} #endif +#ifdef CONFIG_SDEI_WATCHDOG +void sdei_watchdog_hardlockup_enable(unsigned int cpu); +void sdei_watchdog_hardlockup_disable(unsigned int cpu); +void sdei_watchdog_clear_eoi(void); +int sdei_watchdog_hardlockup_probe(void); +extern bool disable_sdei_nmi_watchdog; +#else +static inline void sdei_watchdog_hardlockup_enable(unsigned int cpu) { } +static inline void sdei_watchdog_hardlockup_disable(unsigned int cpu) { } +static inline void sdei_watchdog_clear_eoi(void) { } +static inline int sdei_watchdog_hardlockup_probe(void) { return -ENODEV; } +#define disable_sdei_nmi_watchdog 1 +#endif + #endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5c02720c53a5848e37f03beeadb2ac787a6a91f4..5f4b2f18d8d5fdbdc8ef868e13fd031dc717a563 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -135,6 +135,9 @@ enum pageflags { #ifdef CONFIG_ARCH_USES_PG_ARCH_X PG_arch_2, PG_arch_3, +#endif +#ifdef CONFIG_KFENCE + PG_kfence, /* Page in kfence pool */ #endif __NR_PAGEFLAGS, @@ -603,6 +606,10 @@ PAGEFLAG(VmemmapSelfHosted, vmemmap_self_hosted, PF_ANY) PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) #endif +#ifdef CONFIG_KFENCE +__PAGEFLAG(Kfence, kfence, PF_ANY) +#endif + /* * On an anonymous page mapped into a user virtual memory area, * page->mapping points to its anon_vma, not to a struct address_space; @@ -1050,6 +1057,12 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) #define __PG_MLOCKED 0 #endif +#ifdef CONFIG_KFENCE +#define __PG_KFENCE (1UL << PG_kfence) +#else +#define __PG_KFENCE 0 +#endif + /* * Flags checked when a page is freed. Pages being freed should not have * these flags set. If they are, there is a problem. diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h index abeba356bc3f5794275e14d83036d7020031c40f..f03f0c0735de8d395372d19b14d6d9f1e110d7cc 100644 --- a/include/linux/part_stat.h +++ b/include/linux/part_stat.h @@ -7,6 +7,7 @@ struct disk_stats { u64 nsecs[NR_STAT_GROUPS]; + u64 d2c_nsecs[NR_STAT_GROUPS]; unsigned long sectors[NR_STAT_GROUPS]; unsigned long ios[NR_STAT_GROUPS]; unsigned long merges[NR_STAT_GROUPS]; diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 6b1301e2498e9e163c6e9a0525e49afa226bffea..863e572202e2d590fbe55cef76dc5ef12193cb22 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h @@ -88,6 +88,7 @@ extern const struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x extern const struct pci_ecam_ops al_pcie_ops; /* Amazon Annapurna Labs PCIe */ extern const struct pci_ecam_ops tegra194_pcie_ops; /* Tegra194 PCIe */ extern const struct pci_ecam_ops loongson_pci_ecam_ops; /* Loongson PCIe */ +extern const struct pci_ecam_ops sw64_pci_ecam_ops; /* SW64 PCIe */ #endif #if IS_ENABLED(CONFIG_PCI_HOST_COMMON) diff --git a/include/linux/pci.h b/include/linux/pci.h index b56417276042dc9dd5707e24f9d5c6cc09bd6bfd..9319923570677dc20a5032a1f4856b26f1820d87 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -312,6 +312,16 @@ struct pci_vpd { u8 cap; }; +/* The structure describes the regs to be saved for yitian710 SoC. */ +struct pci_saved_regs { + u16 dev_ctrl; + u16 dev_ctrl2; + u32 acs_cap_ctrl; + u32 root_err_cmd; + u16 root_ctrl; + u16 slot_ctrl; /* should be the last register to restore */ +}; + struct irq_affinity; struct pcie_link_state; struct pci_sriov; @@ -465,6 +475,7 @@ struct pci_dev { unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */ unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */ + unsigned int broken_bus_reset:1; /* Abnormal bus reset */ pci_dev_flags_t dev_flags; atomic_t enable_cnt; /* pci_enable_device has been called */ @@ -1213,6 +1224,8 @@ int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val); int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val); int pci_write_config_word(const struct pci_dev *dev, int where, u16 val); int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val); +void pci_clear_and_set_config_dword(const struct pci_dev *dev, int pos, + u32 clear, u32 set); int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index fe4a3589bb3fdae60fafb782a427fcb5c49e9346..df84a7666ef7119f4e0382ab2439ee1fd7597231 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2596,6 +2596,8 @@ #define PCI_VENDOR_ID_ZHAOXIN 0x1d17 #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_DEVICE_ID_HYGON_18H_M05H_HDA 0x14a9 +#define PCI_DEVICE_ID_HYGON_18H_M05H_DF_F3 0x14b3 #define PCI_VENDOR_ID_FUNGIBLE 0x1dad @@ -2604,6 +2606,8 @@ #define PCI_VENDOR_ID_TEKRAM 0x1de1 #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 +#define PCI_VENDOR_ID_ALIBABA 0x1ded + #define PCI_VENDOR_ID_TEHUTI 0x1fc9 #define PCI_DEVICE_ID_TEHUTI_3009 0x3009 #define PCI_DEVICE_ID_TEHUTI_3010 0x3010 @@ -3212,4 +3216,6 @@ #define PCI_VENDOR_ID_NCUBE 0x10ff +#define PCI_VENDOR_ID_PHYTIUM 0x1db7 + #endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e846f87e2d0991b4aea4bf0bdebe858e1de47218..b22a10fe04fda8ff594acfb2463fc0401ee992dc 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1143,6 +1143,15 @@ static inline bool branch_sample_priv(const struct perf_event *event) return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; } +static inline bool branch_sample_counters(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; +} + +static inline bool branch_sample_call_stack(const struct perf_event *event) +{ + return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; +} struct perf_sample_data { /* @@ -1177,6 +1186,7 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + u64 *br_stack_cntr; union perf_sample_weight weight; union perf_mem_data_src data_src; u64 txn; @@ -1254,7 +1264,8 @@ static inline void perf_sample_save_raw_data(struct perf_sample_data *data, static inline void perf_sample_save_brstack(struct perf_sample_data *data, struct perf_event *event, - struct perf_branch_stack *brs) + struct perf_branch_stack *brs, + u64 *brs_cntr) { int size = sizeof(u64); /* nr */ @@ -1262,7 +1273,16 @@ static inline void perf_sample_save_brstack(struct perf_sample_data *data, size += sizeof(u64); size += brs->nr * sizeof(struct perf_branch_entry); + /* + * The extension space for counters is appended after the + * struct perf_branch_stack. It is used to store the occurrences + * of events of each branch. + */ + if (brs_cntr) + size += brs->nr * sizeof(u64); + data->br_stack = brs; + data->br_stack_cntr = brs_cntr; data->dyn_size += size; data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; } diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h index f9f9931e02d6ad51970be627edcc58fbda879f0f..9da7d0da722c19720098b2704ed93939aee729c9 100644 --- a/include/linux/pid_namespace.h +++ b/include/linux/pid_namespace.h @@ -123,4 +123,43 @@ static inline bool task_is_in_init_pid_ns(struct task_struct *tsk) return task_active_pid_ns(tsk) == &init_pid_ns; } +#ifdef CONFIG_RICH_CONTAINER +extern int sysctl_rich_container_enable; +extern int sysctl_rich_container_source; +extern int sysctl_rich_container_cpuinfo_source; +extern unsigned int sysctl_rich_container_cpuinfo_sharesbase; + +static inline struct task_struct *rich_container_get_scenario(void) +{ + if (sysctl_rich_container_source == 1) + return task_active_pid_ns(current)->child_reaper; + + return current; +} + +static inline bool in_rich_container(struct task_struct *tsk) +{ + if (sysctl_rich_container_enable == 0) + return false; + + return !task_is_in_init_pid_ns(tsk) && child_cpuacct(tsk); +} + +void rich_container_get_cpuset_cpus(struct cpumask *pmask); +#else +static inline bool in_rich_container(struct task_struct *tsk) +{ + return false; +} + +static inline void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ +} + +static inline struct task_struct *rich_container_get_scenario(void) +{ + return NULL; +} +#endif + #endif /* _LINUX_PID_NS_H */ diff --git a/include/linux/platform_data/gpio-sunway.h b/include/linux/platform_data/gpio-sunway.h new file mode 100644 index 0000000000000000000000000000000000000000..58b1bddeb409ff46751b829f21f99ef05d330828 --- /dev/null +++ b/include/linux/platform_data/gpio-sunway.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef GPIO_SUNWAY_H +#define GPIO_SUNWAY_H + +struct sunway_port_property { + struct fwnode_handle *fwnode; + unsigned int idx; + unsigned int ngpio; + unsigned int gpio_base; + int irq[32]; + bool has_irq; + bool irq_shared; +}; + +struct sunway_platform_data { + struct sunway_port_property *properties; + unsigned int nports; +}; + +#endif diff --git a/include/linux/psp-csv.h b/include/linux/psp-csv.h new file mode 100644 index 0000000000000000000000000000000000000000..2da1adea8d3396d1a3b930b5c582ab2e080e1dad --- /dev/null +++ b/include/linux/psp-csv.h @@ -0,0 +1,288 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hygon Secure Virtualization feature CSV driver interface + * + * Copyright (C) Hygon Info Technologies Ltd. + */ + +#ifndef __PSP_CSV_H__ +#define __PSP_CSV_H__ + +#include + +/** + * Guest/platform management commands for CSV3 + */ +enum csv3_cmd { + /* Guest launch commands */ + CSV3_CMD_SET_GUEST_PRIVATE_MEMORY = 0x200, + CSV3_CMD_LAUNCH_ENCRYPT_DATA = 0x201, + CSV3_CMD_LAUNCH_ENCRYPT_VMCB = 0x202, + /* Guest NPT(Nested Page Table) management commands */ + CSV3_CMD_UPDATE_NPT = 0x203, + + /* Guest migration commands */ + CSV3_CMD_SEND_ENCRYPT_DATA = 0x210, + CSV3_CMD_SEND_ENCRYPT_CONTEXT = 0x211, + CSV3_CMD_RECEIVE_ENCRYPT_DATA = 0x212, + CSV3_CMD_RECEIVE_ENCRYPT_CONTEXT = 0x213, + + /* Guest debug commands */ + CSV3_CMD_DBG_READ_VMSA = 0x220, + CSV3_CMD_DBG_READ_MEM = 0x221, + + /* Platform secure memory management commands */ + CSV3_CMD_SET_SMR = 0x230, + CSV3_CMD_SET_SMCR = 0x231, + + CSV3_CMD_MAX, +}; + +/** + * struct csv3_data_launch_encrypt_data - CSV3_CMD_LAUNCH_ENCRYPT_DATA command + * + * @handle: handle of the VM to update + * @gpa: guest address where data is copied + * @length: len of memory to be encrypted + * @data_blocks: memory regions to hold data page address + */ +struct csv3_data_launch_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u32 length; /* In */ + u32 reserved1; /* In */ + u64 data_blocks[8]; /* In */ +} __packed; + +/** + * struct csv3_data_launch_encrypt_vmcb - CSV3_CMD_LAUNCH_ENCRYPT_VMCB command + * + * @handle: handle of the VM + * @vcpu_id: id of vcpu per vmsa/vmcb + * @vmsa_addr: memory address of initial vmsa data + * @vmsa_len: len of initial vmsa data + * @shadow_vmcb_addr: memory address of shadow vmcb data + * @shadow_vmcb_len: len of shadow vmcb data + * @secure_vmcb_addr: memory address of secure vmcb data + * @secure_vmcb_len: len of secure vmcb data + */ +struct csv3_data_launch_encrypt_vmcb { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 vcpu_id; /* In */ + u32 reserved1; /* In */ + u64 vmsa_addr; /* In */ + u32 vmsa_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_addr; /* In */ + u32 shadow_vmcb_len; /* In */ + u32 reserved3; /* In */ + u64 secure_vmcb_addr; /* Out */ + u32 secure_vmcb_len; /* Out */ +} __packed; + +/** + * struct csv3_data_update_npt - CSV3_CMD_UPDATE_NPT command + * + * @handle: handle assigned to the VM + * @error_code: nested page fault error code + * @gpa: guest page address where npf happens + * @spa: physical address which maps to gpa in host page table + * @level: page level which can be mapped in nested page table + * @page_attr: page attribute for gpa + * @page_attr_mask: which page attribute bit should be set + * @npages: number of pages from gpa is handled. + */ +struct csv3_data_update_npt { + u32 handle; /* In */ + u32 reserved; /* In */ + u32 error_code; /* In */ + u32 reserved1; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u64 level; /* In */ + u64 page_attr; /* In */ + u64 page_attr_mask; /* In */ + u32 npages; /* In/Out */ +} __packed; + +/** + * struct csv3_data_mem_region - define a memory region + * + * @base_address: base address of a memory region + * @size: size of memory region + */ +struct csv3_data_memory_region { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_set_guest_private_memory - CSV3_CMD_SET_GUEST_PRIVATE_MEMORY + * command parameters + * + * @handle: handle assigned to the VM + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_guest_private_memory { + u32 handle; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smr - CSV3_CMD_SET_SMR command parameters + * + * @smr_entry_size: size of SMR entry + * @nregions: number of memory regions + * @regions_paddr: address of memory containing multiple memory regions + */ +struct csv3_data_set_smr { + u32 smr_entry_size; /* In */ + u32 nregions; /* In */ + u64 regions_paddr; /* In */ +} __packed; + +/** + * struct csv3_data_set_smcr - CSV3_CMD_SET_SMCR command parameters + * + * @base_address: start address of SMCR memory + * @size: size of SMCR memory + */ +struct csv3_data_set_smcr { + u64 base_address; /* In */ + u64 size; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_vmsa - CSV3_CMD_DBG_READ_VMSA command parameters + * + * @handle: handle assigned to the VM + * @spa: system physical address of memory to get vmsa of the specific vcpu + * @size: size of the host memory + * @vcpu_id: the specific vcpu + */ +struct csv3_data_dbg_read_vmsa { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 spa; /* In */ + u32 size; /* In */ + u32 vcpu_id; /* In */ +} __packed; + +/** + * struct csv3_data_dbg_read_mem - CSV3_CMD_DBG_READ_MEM command parameters + * + * @handle: handle assigned to the VM + * @gpa: guest physical address of the memory to access + * @spa: system physical address of memory to get data from gpa + * @size: size of guest memory to access + */ +struct csv3_data_dbg_read_mem { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 gpa; /* In */ + u64 spa; /* In */ + u32 size; /* In */ +} __packed; + +/** + * struct csv3_data_send_encrypt_data - SEND_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @guest_block: physical address containing multiple guest address + * @guest_len: len of guest block + * @flag: flag of send encrypt data + * 0x00000000: migrate pages in guest block + * 0x00000001: set readonly of pages in guest block + * others: invalid + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 flag; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_send_encrypt_context - SEND_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_send_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In/Out */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In/Out */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_data - RECEIVE_ENCRYPT_DATA command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header blob + * @hdr_len: len of packet header + * @guest_block: system physical address containing multiple guest address + * @guest_len: len of guest block memory region + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + */ +struct csv3_data_receive_encrypt_data { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 guest_block; /* In */ + u32 guest_len; /* In */ + u32 reserved2; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ +} __packed; + +/** + * struct csv3_data_receive_encrypt_context - RECEIVE_ENCRYPT_CONTEXT command parameters + * + * @handle: handle of the VM to process + * @hdr_address: physical address containing packet header + * @hdr_len: len of packet header + * @trans_block: physical address of a page containing multiple host memory pages + * @trans_len: len of host memory region + * @shadow_vmcb_block: physical address of a page containing multiple shadow vmcb address + * @secure_vmcb_block: physical address of a page containing multiple secure vmcb address + * @vmcb_block_len: len of shadow/secure vmcb block + */ +struct csv3_data_receive_encrypt_context { + u32 handle; /* In */ + u32 reserved; /* In */ + u64 hdr_address; /* In */ + u32 hdr_len; /* In */ + u32 reserved1; /* In */ + u64 trans_block; /* In */ + u32 trans_len; /* In */ + u32 reserved2; /* In */ + u64 shadow_vmcb_block; /* In */ + u64 secure_vmcb_block; /* In */ + u32 vmcb_block_len; /* In */ +} __packed; + +#endif diff --git a/include/linux/psp-sev.h b/include/linux/psp-sev.h index 7fd17e82bab43ff4409cf9e4c8827ff35c9df4d9..74086c1141841f1f407d5cbf6b37ef66e9496b94 100644 --- a/include/linux/psp-sev.h +++ b/include/linux/psp-sev.h @@ -16,6 +16,8 @@ #define SEV_FW_BLOB_MAX_SIZE 0x4000 /* 16KB */ +#define CSV_FW_MAX_SIZE 0x80000 /* 512KB */ + /** * SEV platform state */ @@ -81,6 +83,34 @@ enum sev_cmd { SEV_CMD_MAX, }; +/** + * CSV communication state + */ +enum csv_comm_state { + CSV_COMM_MAILBOX_ON = 0x0, + CSV_COMM_RINGBUFFER_ON = 0x1, + + CSV_COMM_MAX +}; + +enum csv_cmd { + CSV_CMD_RING_BUFFER = 0x00F, + CSV_CMD_HGSC_CERT_IMPORT = 0x300, + CSV_CMD_MAX, +}; + +/** + * Ring Buffer Mode regions: + * There are 4 regions and every region is a 4K area that must be 4K aligned. + * To accomplish this allocate an amount that is the size of area and the + * required alignment. + * The aligned address will be calculated from the returned address. + */ +#define CSV_RING_BUFFER_SIZE (32 * 1024) +#define CSV_RING_BUFFER_ALIGN (4 * 1024) +#define CSV_RING_BUFFER_LEN (CSV_RING_BUFFER_SIZE + CSV_RING_BUFFER_ALIGN) +#define CSV_RING_BUFFER_ESIZE 16 + /** * struct sev_data_init - INIT command parameters * @@ -387,6 +417,7 @@ struct sev_data_send_update_data { */ struct sev_data_send_update_vmsa { u32 handle; /* In */ + u32 reserved1; u64 hdr_address; /* In */ u32 hdr_len; /* In/Out */ u32 reserved2; @@ -523,8 +554,143 @@ struct sev_data_attestation_report { u32 len; /* In/Out */ } __packed; +/** + * struct csv_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: len of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: len of HGSC certificate + */ +struct csv_data_hgsc_cert_import { + u64 hgscsk_cert_address; /* In */ + u32 hgscsk_cert_len; /* In */ + u32 reserved; /* In */ + u64 hgsc_cert_address; /* In */ + u32 hgsc_cert_len; /* In */ +} __packed; + +#define CSV_COMMAND_PRIORITY_HIGH 0 +#define CSV_COMMAND_PRIORITY_LOW 1 +#define CSV_COMMAND_PRIORITY_NUM 2 + +struct csv_cmdptr_entry { + u16 cmd_id; + u16 cmd_flags; + u32 sw_data; + u64 cmd_buf_ptr; +} __packed; + +struct csv_statval_entry { + u16 status; + u16 reserved0; + u32 reserved1; + u64 reserved2; +} __packed; + +struct csv_queue { + u32 head; + u32 tail; + u32 mask; /* mask = (size - 1), inicates the elements max count */ + u32 esize; /* size of an element */ + u64 data; + u64 data_align; +} __packed; + +struct csv_ringbuffer_queue { + struct csv_queue cmd_ptr; + struct csv_queue stat_val; +} __packed; + +/** + * struct csv_data_ring_buffer - RING_BUFFER command parameters + * + * @queue_lo_cmdptr_address: physical address of the region to be used for + * low priority queue's CmdPtr ring buffer + * @queue_lo_statval_address: physical address of the region to be used for + * low priority queue's StatVal ring buffer + * @queue_hi_cmdptr_address: physical address of the region to be used for + * high priority queue's CmdPtr ring buffer + * @queue_hi_statval_address: physical address of the region to be used for + * high priority queue's StatVal ring buffer + * @queue_lo_size: size of the low priority queue in 4K pages. Must be 1 + * @queue_hi_size: size of the high priority queue in 4K pages. Must be 1 + * @queue_lo_threshold: queue(low) size, below which an interrupt may be generated + * @queue_hi_threshold: queue(high) size, below which an interrupt may be generated + * @int_on_empty: unconditionally interrupt when both queues are found empty + */ +struct csv_data_ring_buffer { + u64 queue_lo_cmdptr_address; /* In */ + u64 queue_lo_statval_address; /* In */ + u64 queue_hi_cmdptr_address; /* In */ + u64 queue_hi_statval_address; /* In */ + u8 queue_lo_size; /* In */ + u8 queue_hi_size; /* In */ + u16 queue_lo_threshold; /* In */ + u16 queue_hi_threshold; /* In */ + u16 int_on_empty; /* In */ +} __packed; + +/** + * enum VPSP_CMD_STATUS - virtual psp command status + * + * @VPSP_INIT: the initial command from guest + * @VPSP_RUNNING: the middle command to check and run ringbuffer command + * @VPSP_FINISH: inform the guest that the command ran successfully + */ +enum VPSP_CMD_STATUS { + VPSP_INIT = 0, + VPSP_RUNNING, + VPSP_FINISH, + VPSP_MAX +}; + +/** + * struct vpsp_cmd - virtual psp command + * + * @cmd_id: the command id is used to distinguish different commands + * @is_high_rb: indicates the ringbuffer level in which the command is placed + */ +struct vpsp_cmd { + u32 cmd_id : 31; + u32 is_high_rb : 1; +}; + +/** + * struct vpsp_ret - virtual psp return result + * + * @pret: the return code from device + * @resv: reserved bits + * @index: used to distinguish the position of command in the ringbuffer + * @status: indicates the current status of the related command + */ +struct vpsp_ret { + u32 pret : 16; + u32 resv : 2; + u32 index : 12; + u32 status : 2; +}; + +#define PSP_VID_MASK 0xff +#define PSP_VID_SHIFT 56 +#define PUT_PSP_VID(hpa, vid) ((__u64)(hpa) | ((__u64)(PSP_VID_MASK & vid) << PSP_VID_SHIFT)) +#define GET_PSP_VID(hpa) ((__u16)((__u64)(hpa) >> PSP_VID_SHIFT) & PSP_VID_MASK) +#define CLEAR_PSP_VID(hpa) ((__u64)(hpa) & ~((__u64)PSP_VID_MASK << PSP_VID_SHIFT)) + +#ifdef CONFIG_HYGON_PSP2CPU_CMD + +typedef int (*p2c_notifier_t)(uint32_t id, uint64_t data); + +int psp_register_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +int psp_unregister_cmd_notifier(uint32_t cmd_id, int (*notifier)(uint32_t id, uint64_t data)); + +#endif + #ifdef CONFIG_CRYPTO_DEV_SP_PSP +int psp_do_cmd(int cmd, void *data, int *psp_ret); + /** * sev_platform_init - perform SEV INIT command * @@ -639,8 +805,33 @@ int sev_guest_decommission(struct sev_data_decommission *data, int *error); void *psp_copy_user_blob(u64 uaddr, u32 len); +int csv_ring_buffer_queue_init(void); + +int csv_ring_buffer_queue_free(void); + +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags); + +int csv_check_stat_queue_status(int *psp_ret); + +/** + * csv_issue_ringbuf_cmds_external_user - issue CSV commands into a ring + * buffer. + */ +int csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret); + +int vpsp_try_get_result(uint32_t vid, uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret); + +int vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret); + +int vpsp_get_vid(uint32_t *vid, pid_t pid); + +int vpsp_get_default_vid_permission(void); #else /* !CONFIG_CRYPTO_DEV_SP_PSP */ +static inline int +psp_do_cmd(int cmd, void *data, int *psp_ret) { return -ENODEV; } + static inline int sev_platform_status(struct sev_user_data_status *status, int *error) { return -ENODEV; } @@ -662,6 +853,27 @@ sev_issue_cmd_external_user(struct file *filep, unsigned int id, void *data, int static inline void *psp_copy_user_blob(u64 __user uaddr, u32 len) { return ERR_PTR(-EINVAL); } +static inline int csv_ring_buffer_queue_init(void) { return -ENODEV; } + +static inline int csv_ring_buffer_queue_free(void) { return -ENODEV; } + +static inline +int csv_fill_cmd_queue(int prio, int cmd, void *data, uint16_t flags) { return -ENODEV; } + +static inline int csv_check_stat_queue_status(int *psp_ret) { return -ENODEV; } + +static inline int +csv_issue_ringbuf_cmds_external_user(struct file *filep, int *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_get_result(uint8_t prio, uint32_t index, + void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_try_do_cmd(uint32_t vid, int cmd, void *data, struct vpsp_ret *psp_ret) { return -ENODEV; } + +static inline int +vpsp_get_default_vid_permission(void) { return -ENODEV; } #endif /* CONFIG_CRYPTO_DEV_SP_PSP */ #endif /* __PSP_SEV_H__ */ diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h index 8334eeacfec5257628eca1bcca143796feed496f..dc80ddab26cf2857e02fd60ba48f97b01b19a9f8 100644 --- a/include/linux/resctrl.h +++ b/include/linux/resctrl.h @@ -2,9 +2,21 @@ #ifndef _RESCTRL_H #define _RESCTRL_H +#include #include #include #include +#include + +#ifdef CONFIG_ARCH_HAS_CPU_RESCTRL +#include +#endif + +/* CLOSID, RMID value used by the default control group */ +#define RESCTRL_RESERVED_CLOSID 0 +#define RESCTRL_RESERVED_RMID 0 + +#define RESCTRL_PICK_ANY_CPU -1 #ifdef CONFIG_PROC_CPU_RESCTRL @@ -18,28 +30,52 @@ int proc_resctrl_show(struct seq_file *m, /* max value for struct rdt_domain's mbps_val */ #define MBA_MAX_MBPS U32_MAX -/** - * enum resctrl_conf_type - The type of configuration. - * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. - * @CDP_CODE: Configuration applies to instruction fetches. - * @CDP_DATA: Configuration applies to reads and writes. +/* + * Resctrl uses u32 to hold the user-space config. The maximum bitmap size is + * 32. */ -enum resctrl_conf_type { - CDP_NONE, - CDP_CODE, - CDP_DATA, -}; +#define RESCTRL_MAX_CBM 32 -#define CDP_NUM_TYPES (CDP_DATA + 1) +extern unsigned int resctrl_rmid_realloc_limit; +extern unsigned int resctrl_rmid_realloc_threshold; -/* - * Event IDs, the values match those used to program IA32_QM_EVTSEL before - * reading IA32_QM_CTR on RDT systems. +/** + * struct pseudo_lock_region - pseudo-lock region information + * @s: Resctrl schema for the resource to which this + * pseudo-locked region belongs + * @closid: The closid that this pseudo-locked region uses + * @d: RDT domain to which this pseudo-locked region + * belongs + * @cbm: bitmask of the pseudo-locked region + * @lock_thread_wq: waitqueue used to wait on the pseudo-locking thread + * completion + * @thread_done: variable used by waitqueue to test if pseudo-locking + * thread completed + * @cpu: core associated with the cache on which the setup code + * will be run + * @line_size: size of the cache lines + * @size: size of pseudo-locked region in bytes + * @kmem: the kernel memory associated with pseudo-locked region + * @minor: minor number of character device associated with this + * region + * @debugfs_dir: pointer to this region's directory in the debugfs + * filesystem + * @pm_reqs: Power management QoS requests related to this region */ -enum resctrl_event_id { - QOS_L3_OCCUP_EVENT_ID = 0x01, - QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, - QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, +struct pseudo_lock_region { + struct resctrl_schema *s; + u32 closid; + struct rdt_domain *d; + u32 cbm; + wait_queue_head_t lock_thread_wq; + int thread_done; + int cpu; + unsigned int line_size; + unsigned int size; + void *kmem; + unsigned int minor; + struct dentry *debugfs_dir; + struct list_head pm_reqs; }; /** @@ -94,7 +130,7 @@ struct rdt_domain { * zero CBM. * @shareable_bits: Bitmask of shareable resource with other * executing entities - * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid. + * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid. * @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache * level has CPU scope. */ @@ -102,7 +138,7 @@ struct resctrl_cache { unsigned int cbm_len; unsigned int min_cbm_bits; unsigned int shareable_bits; - bool arch_has_sparse_bitmaps; + bool arch_has_sparse_bitmasks; bool arch_has_per_cpu_cfg; }; @@ -141,9 +177,6 @@ struct resctrl_membw { u32 *mb_map; }; -struct rdt_parse_data; -struct resctrl_schema; - /** * struct rdt_resource - attributes of a resctrl resource * @rid: The index of the resource @@ -153,14 +186,15 @@ struct resctrl_schema; * @cache_level: Which cache level defines scope of this resource * @cache: Cache allocation related data * @membw: If the component has bandwidth controls, their properties. - * @domains: All domains for this resource + * @domains: RCU list of all domains for this resource * @name: Name to use in "schemata" file. * @data_width: Character width of data when displaying * @default_ctrl: Specifies default cache cbm or memory B/W percent. * @format_str: Per resource format string to show domain value - * @parse_ctrlval: Per resource function pointer to parse control values * @evt_list: List of monitoring events * @fflags: flags to choose base and info files + * @mbm_cfg_mask: Bandwidth sources that can be tracked when Bandwidth + * Monitoring Event Configuration (BMEC) is supported. * @cdp_capable: Is the CDP feature available on this resource */ struct rdt_resource { @@ -176,14 +210,19 @@ struct rdt_resource { int data_width; u32 default_ctrl; const char *format_str; - int (*parse_ctrlval)(struct rdt_parse_data *data, - struct resctrl_schema *s, - struct rdt_domain *d); struct list_head evt_list; unsigned long fflags; + unsigned int mbm_cfg_mask; bool cdp_capable; }; +/* + * Get the resource that exists at this level. If the level is not supported + * a dummy/not-capable resource can be returned. Levels >= RDT_NUM_RESOURCES + * will return NULL. + */ +struct rdt_resource *resctrl_arch_get_resource(enum resctrl_res_level l); + /** * struct resctrl_schema - configuration abilities of a resource presented to * user-space @@ -204,10 +243,68 @@ struct resctrl_schema { u32 num_closid; }; +struct resctrl_cpu_sync { + u32 closid; + u32 rmid; +}; + +struct resctrl_mon_config_info { + struct rdt_resource *r; + struct rdt_domain *d; + u32 evtid; + u32 mon_config; + int err; +}; + +/* + * Update and re-load this CPUs defaults. Called via IPI, takes a pointer to + * struct resctrl_cpu_sync, or NULL. + */ +void resctrl_arch_sync_cpu_defaults(void *info); + /* The number of closid supported by this resource regardless of CDP */ u32 resctrl_arch_get_num_closid(struct rdt_resource *r); + +struct rdt_domain *resctrl_arch_find_domain(struct rdt_resource *r, int id); int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid); +bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt); +void resctrl_arch_mon_event_config_write(void *info); +void resctrl_arch_mon_event_config_read(void *info); + +/* For use by arch code to remap resctrl's smaller CDP CLOSID range */ +static inline u32 resctrl_get_config_index(u32 closid, + enum resctrl_conf_type type) +{ + switch (type) { + default: + case CDP_NONE: + return closid; + case CDP_CODE: + return (closid * 2) + 1; + case CDP_DATA: + return (closid * 2); + } +} + +/* + * Caller must hold the cpuhp read lock to prevent the struct rdt_domain being + * freed. + */ +static inline struct rdt_domain * +resctrl_get_domain_from_cpu(int cpu, struct rdt_resource *r) +{ + struct rdt_domain *d; + + list_for_each_entry_rcu(d, &r->domains, list) { + /* Find the domain that contains this CPU */ + if (cpumask_test_cpu(cpu, &d->cpu_mask)) + return d; + } + + return NULL; +} + /* * Update the ctrl_val and apply this config right now. * Must be called on one of the domain's CPUs. @@ -219,36 +316,70 @@ u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d, u32 closid, enum resctrl_conf_type type); int resctrl_online_domain(struct rdt_resource *r, struct rdt_domain *d); void resctrl_offline_domain(struct rdt_resource *r, struct rdt_domain *d); +void resctrl_online_cpu(unsigned int cpu); +void resctrl_offline_cpu(unsigned int cpu); /** * resctrl_arch_rmid_read() - Read the eventid counter corresponding to rmid * for this resource and domain. * @r: resource that the counter should be read from. * @d: domain that the counter should be read from. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid + * only. * @rmid: rmid of the counter to read. * @eventid: eventid to read, e.g. L3 occupancy. * @val: result of the counter read in bytes. + * @arch_mon_ctx: An architecture specific value from + * resctrl_arch_mon_ctx_alloc(), for MPAM this identifies + * the hardware monitor allocated for this read request. * - * Call from process context on a CPU that belongs to domain @d. + * Some architectures need to sleep when first programming some of the counters. + * (specifically: arm64's MPAM cache occupancy counters can return 'not ready' + * for a short period of time). Call from a non-migrateable process context on + * a CPU that belongs to domain @d. e.g. use smp_call_on_cpu() or + * schedule_work_on(). This function can be called with interrupts masked, + * e.g. using smp_call_function_any(), but may consistently return an error. * * Return: * 0 on success, or -EIO, -EINVAL etc on error. */ int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid, u64 *val); + u32 closid, u32 rmid, enum resctrl_event_id eventid, + u64 *val, void *arch_mon_ctx); + +/** + * resctrl_arch_rmid_read_context_check() - warn about invalid contexts + * + * When built with CONFIG_DEBUG_ATOMIC_SLEEP generate a warning when + * resctrl_arch_rmid_read() is called with preemption disabled. + * + * The contract with resctrl_arch_rmid_read() is that if interrupts + * are unmasked, it can sleep. This allows NOHZ_FULL systems to use an + * IPI, (and fail if the call needed to sleep), while most of the time + * the work is scheduled, allowing the call to sleep. + */ +static inline void resctrl_arch_rmid_read_context_check(void) +{ + if (!irqs_disabled()) + might_sleep(); +} /** * resctrl_arch_reset_rmid() - Reset any private state associated with rmid * and eventid. * @r: The domain's resource. * @d: The rmid's domain. + * @closid: closid that matches the rmid. Depending on the architecture, the + * counter may match traffic of both @closid and @rmid, or @rmid only. * @rmid: The rmid whose counter values should be reset. * @eventid: The eventid whose counter values should be reset. * * This can be called from any CPU. */ void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d, - u32 rmid, enum resctrl_event_id eventid); + u32 closid, u32 rmid, + enum resctrl_event_id eventid); /** * resctrl_arch_reset_rmid_all() - Reset all private state associated with @@ -264,4 +395,7 @@ void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d); extern unsigned int resctrl_rmid_realloc_threshold; extern unsigned int resctrl_rmid_realloc_limit; +int resctrl_init(void); +void resctrl_exit(void); + #endif /* _RESCTRL_H */ diff --git a/include/linux/resctrl_types.h b/include/linux/resctrl_types.h new file mode 100644 index 0000000000000000000000000000000000000000..a0d8694be783366a48dfab68acbac1fc56ca58ba --- /dev/null +++ b/include/linux/resctrl_types.h @@ -0,0 +1,101 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Arm Ltd. + * Based on arch/x86/kernel/cpu/resctrl/internal.h + */ + +#ifndef __LINUX_RESCTRL_TYPES_H +#define __LINUX_RESCTRL_TYPES_H + +#define CQM_LIMBOCHECK_INTERVAL 1000 + +#define MBM_CNTR_WIDTH_BASE 24 +#define MBM_OVERFLOW_INTERVAL 1000 +#define MAX_MBA_BW 100u +#define MBA_IS_LINEAR 0x4 + +/* rdtgroup.flags */ +#define RDT_DELETED 1 + +/* rftype.flags */ +#define RFTYPE_FLAGS_CPUS_LIST 1 + +/* + * Define the file type flags for base and info directories. + */ +#define RFTYPE_INFO BIT(0) +#define RFTYPE_BASE BIT(1) +#define RFTYPE_CTRL BIT(4) +#define RFTYPE_MON BIT(5) +#define RFTYPE_TOP BIT(6) +#define RFTYPE_RES_CACHE BIT(8) +#define RFTYPE_RES_MB BIT(9) +#define RFTYPE_DEBUG BIT(10) +#define RFTYPE_CTRL_INFO (RFTYPE_INFO | RFTYPE_CTRL) +#define RFTYPE_MON_INFO (RFTYPE_INFO | RFTYPE_MON) +#define RFTYPE_TOP_INFO (RFTYPE_INFO | RFTYPE_TOP) +#define RFTYPE_CTRL_BASE (RFTYPE_BASE | RFTYPE_CTRL) +#define RFTYPE_MON_BASE (RFTYPE_BASE | RFTYPE_MON) + +/* Reads to Local DRAM Memory */ +#define READS_TO_LOCAL_MEM BIT(0) + +/* Reads to Remote DRAM Memory */ +#define READS_TO_REMOTE_MEM BIT(1) + +/* Non-Temporal Writes to Local Memory */ +#define NON_TEMP_WRITE_TO_LOCAL_MEM BIT(2) + +/* Non-Temporal Writes to Remote Memory */ +#define NON_TEMP_WRITE_TO_REMOTE_MEM BIT(3) + +/* Reads to Local Memory the system identifies as "Slow Memory" */ +#define READS_TO_LOCAL_S_MEM BIT(4) + +/* Reads to Remote Memory the system identifies as "Slow Memory" */ +#define READS_TO_REMOTE_S_MEM BIT(5) + +/* Dirty Victims to All Types of Memory */ +#define DIRTY_VICTIMS_TO_ALL_MEM BIT(6) + +/* Max event bits supported */ +#define MAX_EVT_CONFIG_BITS GENMASK(6, 0) + +/** + * enum resctrl_conf_type - The type of configuration. + * @CDP_NONE: No prioritisation, both code and data are controlled or monitored. + * @CDP_CODE: Configuration applies to instruction fetches. + * @CDP_DATA: Configuration applies to reads and writes. + */ +enum resctrl_conf_type { + CDP_NONE, + CDP_CODE, + CDP_DATA, +}; + +enum resctrl_res_level { + RDT_RESOURCE_L3, + RDT_RESOURCE_L2, + RDT_RESOURCE_MBA, + RDT_RESOURCE_SMBA, + + /* Must be the last */ + RDT_NUM_RESOURCES, +}; + +#define CDP_NUM_TYPES (CDP_DATA + 1) + +/* + * Event IDs, the values match those used to program IA32_QM_EVTSEL before + * reading IA32_QM_CTR on RDT systems. + */ +enum resctrl_event_id { + QOS_L3_OCCUP_EVENT_ID = 0x01, + QOS_L3_MBM_TOTAL_EVENT_ID = 0x02, + QOS_L3_MBM_LOCAL_EVENT_ID = 0x03, + QOS_MC_MBM_BPS_EVENT_ID = 0x04, +}; + +#define RESCTRL_MAX_EVENT_NUM 4 + +#endif /* __LINUX_RESCTRL_TYPES_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 77f01ac385f7a57695c2e9af5b826cb4f9b39f6a..5ce57c47806922c77ea9a4795e9448a34b19b875 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -133,6 +133,10 @@ struct user_event_mm; #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) +#define task_contributes_to_load(task) \ + ((READ_ONCE((task)->__state) & TASK_UNINTERRUPTIBLE) != 0 && \ + (READ_ONCE((task)->__state) & TASK_FROZEN) == 0 && \ + (READ_ONCE((task)->__state) & TASK_NOLOAD) == 0) /* * Special states are those that do not use the normal wait-loop pattern. See @@ -509,6 +513,8 @@ struct sched_statistics { u64 wait_max; u64 wait_count; u64 wait_sum; + u64 parent_wait_sum_base; + u64 parent_wait_contrib; u64 iowait_count; u64 iowait_sum; @@ -541,7 +547,13 @@ struct sched_statistics { #ifdef CONFIG_SCHED_CORE u64 core_forceidle_sum; + u64 core_forceidle_task_sum; #endif +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) + u64 core_sibidle_sum; + u64 core_sibidle_task_sum; +#endif + #endif /* CONFIG_SCHEDSTATS */ } ____cacheline_aligned; @@ -562,6 +574,20 @@ struct sched_entity { s64 vlag; u64 slice; + /* irq time is included */ + u64 exec_start_raw; + u64 sum_exec_raw; + u64 cg_idle_start; + u64 cg_idle_sum; + u64 cg_init_time; + u64 cg_nr_iowait; + u64 cg_iowait_sum; + u64 cg_iowait_start; + u64 cg_ineffective_sum; + u64 cg_ineffective_start; + seqlock_t idle_seqlock; + spinlock_t iowait_lock; + u64 nr_migrations; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -584,6 +610,9 @@ struct sched_entity { */ struct sched_avg avg; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif }; struct sched_rt_entity { @@ -1536,6 +1565,13 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif + int wait_res_type; + union { + struct folio *wait_folio; + struct bio *wait_bio; + }; + unsigned long wait_moment; + /* * New fields for task_struct should be added above here, so that * they are included in the randomized portion of task_struct. @@ -1553,6 +1589,36 @@ struct task_struct { */ }; +enum { + TASK_WAIT_FOLIO = 1, + TASK_WAIT_BIO, +}; + +static inline void task_set_wait_res(int type, void *res) +{ + switch (type) { + case TASK_WAIT_FOLIO: + current->wait_folio = (struct folio *)res; + break; + case TASK_WAIT_BIO: + current->wait_bio = (struct bio *)res; + break; + default: + current->wait_folio = NULL; + break; + } + + current->wait_res_type = type; + current->wait_moment = jiffies; +} + +static inline void task_clear_wait_res(void) +{ + current->wait_folio = NULL; + current->wait_res_type = 0; + current->wait_moment = 0; +} + static inline struct pid *task_pid(struct task_struct *task) { return task->thread_pid; @@ -2457,4 +2523,74 @@ static inline int sched_core_idle_cpu(int cpu) { return idle_cpu(cpu); } extern void sched_set_stop_task(int cpu, struct task_struct *stop); +struct cpuacct_usage_result { + u64 user, nice, system, irq, softirq; + u64 steal, iowait, idle, guest, guest_nice; +}; + +enum rich_container_source { + RICH_CONTAINER_REAPER, + RICH_CONTAINER_CURRENT, +}; + +#ifdef CONFIG_RICH_CONTAINER +void rich_container_source(enum rich_container_source *from); +bool child_cpuacct(struct task_struct *tsk); +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res); +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu); +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running); +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total); + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask); + +#else /* CONFIG_RICH_CONTAINER */ +static inline void +rich_container_source(enum rich_container_source *from) +{ +} + +static inline void +rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ +} + +static inline unsigned long +rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + return 0; +} + +static inline void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ +} + +static inline bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + return false; +} + +static inline +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ +} +#endif + +#ifdef CONFIG_SCHED_SLI +void create_rich_container_reaper(struct task_struct *tsk); +#else +static inline void create_rich_container_reaper(struct task_struct *tsk) { } +#endif + #endif diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h index fe1a46f30d2409ff0fe0906bd12af567ae225f0e..bf538a280c829cfc195c4275fb42bca870426b20 100644 --- a/include/linux/sched/isolation.h +++ b/include/linux/sched/isolation.h @@ -55,6 +55,13 @@ static inline bool housekeeping_test_cpu(int cpu, enum hk_type type) static inline void housekeeping_init(void) { } #endif /* CONFIG_CPU_ISOLATION */ +#if defined(CONFIG_CPU_ISOLATION) && defined(CONFIG_CGROUP_SCHED) +DECLARE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +extern void wilds_cpus_allowed(struct cpumask *pmask); +#else +static inline void wilds_cpus_allowed(struct cpumask *pmask) {} +#endif + static inline bool housekeeping_cpu(int cpu, enum hk_type type) { #ifdef CONFIG_CPU_ISOLATION diff --git a/include/linux/sched/loadavg.h b/include/linux/sched/loadavg.h index 83ec54b65e792f13e87121918dad334af5481578..20165894027d10ebf11d13c76b168f57b9cd9707 100644 --- a/include/linux/sched/loadavg.h +++ b/include/linux/sched/loadavg.h @@ -13,8 +13,18 @@ * 11 bit fractions. */ extern unsigned long avenrun[]; /* Load averages */ +extern unsigned long avenrun_r[]; /* R load averages */ + extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift); +#ifdef CONFIG_SCHED_SLI +extern void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift); +#else +static inline void get_avenrun_r(unsigned long *loads, unsigned long offset, + int shift) { } +#endif + #define FSHIFT 11 /* nr of bits of precision */ #define FIXED_1 (1<i_private); \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __name ## _open, \ + .read = seq_read, \ + .write = __name ## _write, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + #define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \ static int __name ## _open(struct inode *inode, struct file *file) \ { \ diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 95ac8398ee72d97015a61c4b1bdf5063d20a9e15..e77a3345d20b250a6a77007b074dbe91c91516e2 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -12,6 +12,7 @@ static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_np(unsigned long addr, int numpages) { return 0; } #endif #ifndef set_memory_rox @@ -38,18 +39,6 @@ static inline bool kernel_page_present(struct page *page) { return true; } -#else /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ -/* - * Some architectures, e.g. ARM64 can disable direct map modifications at - * boot time. Let them overrive this query. - */ -#ifndef can_set_direct_map -static inline bool can_set_direct_map(void) -{ - return true; -} -#define can_set_direct_map can_set_direct_map -#endif #endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */ #ifdef CONFIG_X86_64 diff --git a/include/linux/tick.h b/include/linux/tick.h index 9459fef5b85736d0c895a5643ae0d87feabbb41d..65af90ca409ae722d499b25c2114f1ac6dfe7f19 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -174,9 +174,16 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } static inline void tick_nohz_idle_stop_tick_protected(void) { } #endif /* !CONFIG_NO_HZ_COMMON */ +/* + * Mask of CPUs that are nohz_full. + * + * Users should be guarded by CONFIG_NO_HZ_FULL or a tick_nohz_full_cpu() + * check. + */ +extern cpumask_var_t tick_nohz_full_mask; + #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; -extern cpumask_var_t tick_nohz_full_mask; static inline bool tick_nohz_full_enabled(void) { diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 3064314f48329cac729b9b4e29fc62072ec2df3b..550287c929906a402b03c65ee81479c2650799c8 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -205,6 +205,15 @@ copy_mc_to_kernel(void *dst, const void *src, size_t cnt) } #endif +#ifndef copy_mc_to_user +static inline unsigned long __must_check +copy_mc_to_user(void *dst, const void *src, size_t cnt) +{ + check_object_size(src, cnt, true); + return raw_copy_to_user(dst, src, cnt); +} +#endif + static __always_inline void pagefault_disabled_inc(void) { current->pagefault_disabled++; diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 47c5962b876b027e2ea935c4003b37f7a1bfdca3..46e2710985e6b906d8ef5b8fc180c7e1dbf30b13 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h @@ -16,6 +16,7 @@ #include #include #include +#include struct module; struct uio_map; @@ -74,9 +75,11 @@ struct uio_device { struct fasync_struct *async_queue; wait_queue_head_t wait; struct uio_info *info; - struct mutex info_lock; struct kobject *map_dir; struct kobject *portio_dir; + struct percpu_ref info_ref; + struct completion confirm_done; + struct completion free_done; }; /** @@ -109,6 +112,7 @@ struct uio_info { int (*open)(struct uio_info *info, struct inode *inode); int (*release)(struct uio_info *info, struct inode *inode); int (*irqcontrol)(struct uio_info *info, s32 irq_on); + long (*ioctl)(struct uio_info *info, unsigned int cmd, unsigned long arg); }; extern int __must_check diff --git a/include/linux/units.h b/include/linux/units.h index 2793a41e73a2b6c36a14bd9a245e582e5c4ae560..45110daaf8d3260ced995b66ba62669e8b29ddfa 100644 --- a/include/linux/units.h +++ b/include/linux/units.h @@ -2,6 +2,7 @@ #ifndef _LINUX_UNITS_H #define _LINUX_UNITS_H +#include #include /* Metric prefixes in accordance with Système international (d'unités) */ @@ -31,6 +32,10 @@ #define MICROWATT_PER_MILLIWATT 1000UL #define MICROWATT_PER_WATT 1000000UL +#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE) +#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE) +#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE) + #define ABSOLUTE_ZERO_MILLICELSIUS -273150 static inline long milli_kelvin_to_millicelsius(long t) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index fed855bae6d8e88956064f85f1260bca522bb5fb..a3e76af9bf07fc3c808a9348b122a388756495a3 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -34,6 +34,23 @@ struct reclaim_stat { unsigned nr_lazyfree_fail; }; +struct sysinfo_ext { + unsigned long lrupages[NR_LRU_LISTS]; + unsigned long cached; + unsigned long available; + unsigned long file_dirty; + unsigned long writeback; + unsigned long anon_mapped; + unsigned long file_mapped; + unsigned long slab_reclaimable; + unsigned long slab_unreclaimable; + unsigned long kernel_stack_kb; + unsigned long writeback_temp; + unsigned long anon_thps; + unsigned long shmem_thps; + unsigned long shmem_pmd_mapped; +}; + enum writeback_stat_item { NR_DIRTY_THRESHOLD, NR_DIRTY_BG_THRESHOLD, diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h index 32c59053b48edca72dcf57cda55b674c3a9dd5e3..101183b8d3bcd897191fcdae216bbc624248d488 100644 --- a/include/sound/hdaudio.h +++ b/include/sound/hdaudio.h @@ -350,6 +350,7 @@ struct hdac_bus { bool needs_damn_long_delay:1; bool not_use_interrupts:1; /* prohibiting the RIRB IRQ */ bool access_sdnctl_in_dword:1; /* accessing the sdnctl register by dword */ + bool hygon_dword_access:1; int poll_count; diff --git a/include/trace/events/intel_ifs.h b/include/trace/events/intel_ifs.h index d7353024016cceaeeef9c7bf37c4291fa9a433f4..af0af3f1d9b7c081fb61ab582482302094409ff3 100644 --- a/include/trace/events/intel_ifs.h +++ b/include/trace/events/intel_ifs.h @@ -10,25 +10,25 @@ TRACE_EVENT(ifs_status, - TP_PROTO(int cpu, union ifs_scan activate, union ifs_status status), + TP_PROTO(int cpu, int start, int stop, u64 status), - TP_ARGS(cpu, activate, status), + TP_ARGS(cpu, start, stop, status), TP_STRUCT__entry( __field( u64, status ) __field( int, cpu ) - __field( u8, start ) - __field( u8, stop ) + __field( u16, start ) + __field( u16, stop ) ), TP_fast_assign( __entry->cpu = cpu; - __entry->start = activate.start; - __entry->stop = activate.stop; - __entry->status = status.data; + __entry->start = start; + __entry->stop = stop; + __entry->status = status; ), - TP_printk("cpu: %d, start: %.2x, stop: %.2x, status: %llx", + TP_printk("cpu: %d, start: %.4x, stop: %.4x, status: %.16llx", __entry->cpu, __entry->start, __entry->stop, diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h index 5646ae15a957ad90804255c21a486323e8fddbb7..5779ac0df0392e12deda56e3ae98235df2964009 100644 --- a/include/trace/events/jbd2.h +++ b/include/trace/events/jbd2.h @@ -245,6 +245,56 @@ TRACE_EVENT(jbd2_handle_stats, __entry->dirtied_blocks) ); +TRACE_EVENT(jbd2_slow_handle_stats, + TP_PROTO(dev_t dev, unsigned long tid, unsigned int type, + unsigned int line_no, int interval, int sync, + int requested_blocks, int dirtied_blocks, + unsigned long trans_wait, unsigned long space_wait, + u64 sched_wait, u64 io_wait), + + TP_ARGS(dev, tid, type, line_no, interval, sync, + requested_blocks, dirtied_blocks, trans_wait, space_wait, + sched_wait, io_wait), + + TP_STRUCT__entry( + __field(dev_t, dev) + __field(unsigned long, tid) + __field(unsigned int, type) + __field(unsigned int, line_no) + __field(int, interval) + __field(int, sync) + __field(int, requested_blocks) + __field(int, dirtied_blocks) + __field(unsigned long, trans_wait) + __field(unsigned long, space_wait) + __field(u64, sched_wait) + __field(u64, io_wait) + ), + + TP_fast_assign( + __entry->dev = dev; + __entry->tid = tid; + __entry->type = type; + __entry->line_no = line_no; + __entry->interval = interval; + __entry->sync = sync; + __entry->requested_blocks = requested_blocks; + __entry->dirtied_blocks = dirtied_blocks; + __entry->trans_wait = trans_wait; + __entry->space_wait = space_wait; + __entry->sched_wait = sched_wait; + __entry->io_wait = io_wait; + ), + + TP_printk("dev %d,%d tid %lu type %u line_no %u interval %d " + "sync %d requested_blocks %d dirtied_blocks %d " + "trans_wait %lu space_wait %lu sched_wait %llu io_wait %llu", + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->tid, + __entry->type, __entry->line_no, __entry->interval, + __entry->sync, __entry->requested_blocks, + __entry->dirtied_blocks, __entry->trans_wait, + __entry->space_wait, __entry->sched_wait, __entry->io_wait) +); TRACE_EVENT(jbd2_run_stats, TP_PROTO(dev_t dev, tid_t tid, struct transaction_run_stats_s *stats), diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index 1478b9dd05fae42e2c41eeee83e05d583a966b87..a548a7c240f8409ff58e5a374436e96875946d6e 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -60,9 +60,16 @@ #define __def_gfpflag_names_kasan #endif -#define show_gfp_flags(flags) \ - (flags) ? __print_flags(flags, "|", \ - __def_gfpflag_names __def_gfpflag_names_kasan \ +#ifdef CONFIG_KFENCE +#define __def_gfpflag_names_kfence , \ + gfpflag_string(__GFP_NOKFENCE) +#else +#define __def_gfpflag_names_kfence +#endif + +#define show_gfp_flags(flags) \ + (flags) ? __print_flags(flags, "|", \ + __def_gfpflag_names __def_gfpflag_names_kasan __def_gfpflag_names_kfence \ ) : "none" #ifdef CONFIG_MMU @@ -95,6 +102,12 @@ #define IF_HAVE_PG_ARCH_X(_name) #endif +#ifdef CONFIG_KFENCE +#define IF_HAVE_PG_KFENCE(_name) ,{1UL << PG_##_name, __stringify(_name)} +#else +#define IF_HAVE_PG_KFENCE(_name) +#endif + #define DEF_PAGEFLAG_NAME(_name) { 1UL << PG_##_name, __stringify(_name) } #define __def_pageflag_names \ @@ -125,7 +138,8 @@ IF_HAVE_PG_HWPOISON(hwpoison) \ IF_HAVE_PG_IDLE(idle) \ IF_HAVE_PG_IDLE(young) \ IF_HAVE_PG_ARCH_X(arch_2) \ -IF_HAVE_PG_ARCH_X(arch_3) +IF_HAVE_PG_ARCH_X(arch_3) \ +IF_HAVE_PG_KFENCE(kfence) #define show_page_flags(flags) \ (flags) ? __print_flags(flags, "|", \ diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 54e353c9f919fcfdc70c1cc5c53532f6accd4f29..0e190e112dc4a44be72d24d1bd9db1cea8b9a277 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -441,6 +441,29 @@ TRACE_EVENT(writeback_bdi_register, ) ); +#ifdef CONFIG_CGROUP_WRITEBACK +TRACE_EVENT(insert_memcg_blkcg_link, + TP_PROTO(struct cgroup_subsys_state *memcg_css, + struct cgroup_subsys_state *blkcg_css, + struct cgroup_subsys_state *old_blkcg_css), + TP_ARGS(memcg_css, blkcg_css, old_blkcg_css), + TP_STRUCT__entry( + __field(unsigned int, memcg_ino) + __field(unsigned int, blkcg_ino) + __field(unsigned int, old_blkcg_ino) + ), + TP_fast_assign( + __entry->memcg_ino = kernfs_ino(memcg_css->cgroup->kn); + __entry->blkcg_ino = kernfs_ino(blkcg_css->cgroup->kn); + __entry->old_blkcg_ino = old_blkcg_css ? + kernfs_ino(old_blkcg_css->cgroup->kn) : 0; + ), + TP_printk("memcg_ino=%u blkcg_ino=%u old_blkcg_ino=%u", + __entry->memcg_ino, __entry->blkcg_ino, __entry->old_blkcg_ino + ) +); +#endif + DECLARE_EVENT_CLASS(wbc_class, TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), TP_ARGS(wbc, bdi), diff --git a/include/uapi/linux/arm_sdei.h b/include/uapi/linux/arm_sdei.h index af0630ba5437d4f3f15f4ee0b4ca94843a813a9f..a5375679dd503dbef5acc176442ecd073773aef3 100644 --- a/include/uapi/linux/arm_sdei.h +++ b/include/uapi/linux/arm_sdei.h @@ -24,6 +24,8 @@ #define SDEI_1_0_FN_SDEI_INTERRUPT_RELEASE SDEI_1_0_FN(0x0E) #define SDEI_1_0_FN_SDEI_PRIVATE_RESET SDEI_1_0_FN(0x11) #define SDEI_1_0_FN_SDEI_SHARED_RESET SDEI_1_0_FN(0x12) +#define SDEI_1_0_FN_SDEI_CLEAR_EOI SDEI_1_0_FN(0x18) +#define SDEI_1_0_FN_SET_SECURE_TIMER_PERIOD SDEI_1_0_FN(0x19) #define SDEI_VERSION_MAJOR_SHIFT 48 #define SDEI_VERSION_MAJOR_MASK 0x7fff diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h index d676ed2b246ec269a23f005e4b89c46dfb4504b4..f428015e85de916a65c7e150814eb61f7cbcb33a 100644 --- a/include/uapi/linux/audit.h +++ b/include/uapi/linux/audit.h @@ -441,6 +441,7 @@ enum { #define AUDIT_ARCH_XTENSA (EM_XTENSA) #define AUDIT_ARCH_LOONGARCH32 (EM_LOONGARCH|__AUDIT_ARCH_LE) #define AUDIT_ARCH_LOONGARCH64 (EM_LOONGARCH|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) +#define AUDIT_ARCH_SW64 (EM_SW64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE) #define AUDIT_PERM_EXEC 1 #define AUDIT_PERM_WRITE 2 diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h index ef38c2bc5ab7a58ba03d309654051b25a2ac2933..32458706a403414cc6cb63880b6ee52ee598b63f 100644 --- a/include/uapi/linux/elf-em.h +++ b/include/uapi/linux/elf-em.h @@ -59,6 +59,7 @@ * up with a final number. */ #define EM_ALPHA 0x9026 +#define EM_SW64 0x9916 /* Bogus old m32r magic number, used by old tools. */ #define EM_CYGNUS_M32R 0x9041 diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h index db92a7202b342b3a1df3f7583c86b79f3ad4e3df..730e620286a7ebea94025f89f98640079195908f 100644 --- a/include/uapi/linux/fuse.h +++ b/include/uapi/linux/fuse.h @@ -209,8 +209,12 @@ * - add FUSE_HAS_EXPIRE_ONLY * * 7.39 - * - add FUSE_DIRECT_IO_RELAX + * - add FUSE_DIRECT_IO_ALLOW_MMAP * - add FUSE_STATX and related structures + * + * 7.40 + * - add FUSE_NO_EXPORT_SUPPORT init flag + * - add FUSE_NOTIFY_RESEND, add FUSE_HAS_RESEND init flag */ #ifndef _LINUX_FUSE_H @@ -409,8 +413,10 @@ struct fuse_file_lock { * FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir, * symlink and mknod (single group that matches parent) * FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation - * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now - * allow shared mmap + * FUSE_DIRECT_IO_ALLOW_MMAP: allow shared mmap in FOPEN_DIRECT_IO mode. + * FUSE_NO_EXPORT_SUPPORT: explicitly disable export support + * FUSE_HAS_RESEND: kernel supports resending pending requests, and the high bit + * of the request ID indicates resend requests */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -449,7 +455,15 @@ struct fuse_file_lock { #define FUSE_HAS_INODE_DAX (1ULL << 33) #define FUSE_CREATE_SUPP_GROUP (1ULL << 34) #define FUSE_HAS_EXPIRE_ONLY (1ULL << 35) -#define FUSE_DIRECT_IO_RELAX (1ULL << 36) +#define FUSE_DIRECT_IO_ALLOW_MMAP (1ULL << 36) +#define FUSE_NO_EXPORT_SUPPORT (1ULL << 38) +#define FUSE_HAS_RESEND (1ULL << 39) + +/* Obsolete alias for FUSE_DIRECT_IO_ALLOW_MMAP */ +#define FUSE_DIRECT_IO_RELAX FUSE_DIRECT_IO_ALLOW_MMAP + +#define FUSE_DELETE_STALE (1ULL << 58) +/* The 59th bit is left to FUSE_DIO_SHARED_MMAP */ /** * CUSE INIT request/reply flags @@ -633,6 +647,7 @@ enum fuse_notify_code { FUSE_NOTIFY_STORE = 4, FUSE_NOTIFY_RETRIEVE = 5, FUSE_NOTIFY_DELETE = 6, + FUSE_NOTIFY_RESEND = 7, FUSE_NOTIFY_CODE_MAX, }; @@ -958,6 +973,14 @@ struct fuse_fallocate_in { uint32_t padding; }; +/** + * FUSE request unique ID flag + * + * Indicates whether this is a resend request. The receiver should handle this + * request accordingly. + */ +#define FUSE_UNIQUE_RESEND (1ULL << 63) + struct fuse_in_header { uint32_t len; uint32_t opcode; diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index 8e61f8b7c2ced1221179e3ada854f4bbba73f36f..fdea50f53da39958f8e9f63d18864e297722d9c7 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -165,6 +165,8 @@ enum { * Only one task is allowed to submit requests */ #define IORING_SETUP_SINGLE_ISSUER (1U << 12) +#define IORING_SETUP_IDLE_US (1U << 30) /* unit of thread_idle is macrosecond */ +#define IORING_SETUP_SQPOLL_PERCPU (1U << 31) /* percpu SQ poll thread */ /* * Defer running task work to get events. @@ -466,6 +468,7 @@ struct io_cqring_offsets { #define IORING_ENTER_SQ_WAIT (1U << 2) #define IORING_ENTER_EXT_ARG (1U << 3) #define IORING_ENTER_REGISTERED_RING (1U << 4) +#define IORING_ENTER_SQ_SUBMIT_ON_IDLE (1U << 31) /* * Passed in for io_uring_setup(2). Copied back with updated info on success diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 01766dd839b076029b07d906a01bbf732e94dd5b..3be3e81c67ae26d7d75d2128908b676b85227e98 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -45,6 +45,7 @@ #define KEXEC_ARCH_AARCH64 (183 << 16) #define KEXEC_ARCH_RISCV (243 << 16) #define KEXEC_ARCH_LOONGARCH (258 << 16) +#define KEXEC_ARCH_SW64 (0x9916UL << 16) /* The artificial cap on the number of segments passed to kexec_load. */ #define KEXEC_SEGMENT_MAX 16 diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132da65beb99f9455659c9b75ed109d..def7a0ee97170e3be1e792c3cfc5ab3e7d99c4d0 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -264,6 +264,7 @@ struct kvm_xen_exit { #define KVM_EXIT_RISCV_SBI 35 #define KVM_EXIT_RISCV_CSR 36 #define KVM_EXIT_NOTIFY 37 +#define KVM_EXIT_LOONGARCH_IOCSR 38 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -336,6 +337,13 @@ struct kvm_run { __u32 len; __u8 is_write; } mmio; + /* KVM_EXIT_LOONGARCH_IOCSR */ + struct { + __u64 phys_addr; + __u8 data[8]; + __u32 len; + __u8 is_write; + } iocsr_io; /* KVM_EXIT_HYPERCALL */ struct { __u64 nr; @@ -1193,6 +1201,8 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_SEV_ES_GHCB 500 + #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing_irqchip { @@ -1362,6 +1372,7 @@ struct kvm_dirty_tlb { #define KVM_REG_ARM64 0x6000000000000000ULL #define KVM_REG_MIPS 0x7000000000000000ULL #define KVM_REG_RISCV 0x8000000000000000ULL +#define KVM_REG_LOONGARCH 0x9000000000000000ULL #define KVM_REG_SIZE_SHIFT 52 #define KVM_REG_SIZE_MASK 0x00f0000000000000ULL @@ -1563,6 +1574,11 @@ struct kvm_s390_ucas_mapping { /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +/* ioctl for SW vcpu init*/ +#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) +#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) +#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) + /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) @@ -1571,6 +1587,10 @@ struct kvm_s390_ucas_mapping { #define KVM_GET_DEVICE_ATTR _IOW(KVMIO, 0xe2, struct kvm_device_attr) #define KVM_HAS_DEVICE_ATTR _IOW(KVMIO, 0xe3, struct kvm_device_attr) +/* ioctls for control vm during system reset */ +#define KVM_CONTROL_PRE_SYSTEM_RESET _IO(KVMIO, 0xe8) +#define KVM_CONTROL_POST_SYSTEM_RESET _IO(KVMIO, 0xe9) + /* * ioctls for vcpu fds */ @@ -1918,6 +1938,9 @@ enum sev_cmd_id { /* Guest Migration Extension */ KVM_SEV_SEND_CANCEL, + /* Hygon CSV batch command */ + KVM_CSV_COMMAND_BATCH = 0x18, + KVM_SEV_NR_MAX, }; @@ -1996,6 +2019,14 @@ struct kvm_sev_send_update_data { __u32 trans_len; }; +struct kvm_sev_send_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + struct kvm_sev_receive_start { __u32 handle; __u32 policy; @@ -2014,6 +2045,30 @@ struct kvm_sev_receive_update_data { __u32 trans_len; }; +struct kvm_sev_receive_update_vmsa { + __u32 vcpu_id; + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv_batch_list_node { + __u64 cmd_data_addr; + __u64 addr; + __u64 next_cmd_addr; +}; + +struct kvm_csv_command_batch { + __u32 command_id; + __u64 csv_batch_list_uaddr; +}; + +struct kvm_csv_init { + __u64 userid_addr; + __u32 len; +}; + #define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) #define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) #define KVM_DEV_ASSIGN_MASK_INTX (1 << 2) @@ -2256,4 +2311,61 @@ struct kvm_s390_zpci_op { /* flags for kvm_s390_zpci_op->u.reg_aen.flags */ #define KVM_S390_ZPCIOP_REGAEN_HOST (1 << 0) +/* CSV3 command */ +enum csv3_cmd_id { + KVM_CSV3_NR_MIN = 0xc0, + + KVM_CSV3_INIT = KVM_CSV3_NR_MIN, + KVM_CSV3_LAUNCH_ENCRYPT_DATA, + KVM_CSV3_LAUNCH_ENCRYPT_VMCB, + KVM_CSV3_SEND_ENCRYPT_DATA, + KVM_CSV3_SEND_ENCRYPT_CONTEXT, + KVM_CSV3_RECEIVE_ENCRYPT_DATA, + KVM_CSV3_RECEIVE_ENCRYPT_CONTEXT, + + KVM_CSV3_NR_MAX, +}; + +struct kvm_csv3_init_data { + __u64 nodemask; +}; + +struct kvm_csv3_launch_encrypt_data { + __u64 gpa; + __u64 uaddr; + __u32 len; +}; + +struct kvm_csv3_send_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_send_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_data { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 guest_addr_data; + __u32 guest_addr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + +struct kvm_csv3_receive_encrypt_context { + __u64 hdr_uaddr; + __u32 hdr_len; + __u64 trans_uaddr; + __u32 trans_len; +}; + #endif /* __LINUX_KVM_H */ diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h index 960c7e93d1a98a363df4c6e7eeaffee065512f7c..86369b7a573397a66e4b371c5e21a32c7fac5ccc 100644 --- a/include/uapi/linux/kvm_para.h +++ b/include/uapi/linux/kvm_para.h @@ -30,6 +30,8 @@ #define KVM_HC_SEND_IPI 10 #define KVM_HC_SCHED_YIELD 11 #define KVM_HC_MAP_GPA_RANGE 12 +#define KVM_HC_VM_ATTESTATION 100 /* Specific to Hygon CPU */ +#define KVM_HC_PSP_OP 101 /* Specific to Hygon platform */ /* * hypercalls use architecture specific diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 39c6a250dd1b92af18e3b4a72a047d2784f89382..3a64499b0f5d63734d632ab03cd1966211473d8c 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -204,6 +204,8 @@ enum perf_branch_sample_type_shift { PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT = 18, /* save privilege mode */ + PERF_SAMPLE_BRANCH_COUNTERS_SHIFT = 19, /* save occurrences of events on a branch */ + PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ }; @@ -235,6 +237,8 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_PRIV_SAVE = 1U << PERF_SAMPLE_BRANCH_PRIV_SAVE_SHIFT, + PERF_SAMPLE_BRANCH_COUNTERS = 1U << PERF_SAMPLE_BRANCH_COUNTERS_SHIFT, + PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, }; @@ -982,6 +986,12 @@ enum perf_event_type { * { u64 nr; * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX * { u64 from, to, flags } lbr[nr]; + * # + * # The format of the counters is decided by the + * # "branch_counter_nr" and "branch_counter_width", + * # which are defined in the ABI. + * # + * { u64 counters; } cntr[nr] && PERF_SAMPLE_BRANCH_COUNTERS * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi @@ -1427,6 +1437,9 @@ struct perf_branch_entry { reserved:31; }; +/* Size of used info bits in struct perf_branch_entry */ +#define PERF_BRANCH_ENTRY_INFO_BITS_MAX 33 + union perf_sample_weight { __u64 full; #if defined(__LITTLE_ENDIAN_BITFIELD) diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h index 1c9da485318f93fe50e11a47f284d8fe81106066..07db804852a2b1cbb7ce69e8f9bd604b17649645 100644 --- a/include/uapi/linux/psp-sev.h +++ b/include/uapi/linux/psp-sev.h @@ -32,6 +32,18 @@ enum { SEV_MAX, }; +/** + * CSV platform commands + */ +enum { + CSV_PLATFORM_INIT = 101, + CSV_PLATFORM_SHUTDOWN = 102, + CSV_DOWNLOAD_FIRMWARE = 128, + CSV_HGSC_CERT_IMPORT = 201, + + CSV_MAX, +}; + /** * SEV Firmware status code */ @@ -154,6 +166,32 @@ struct sev_user_data_get_id2 { __u32 length; /* In/Out */ } __packed; +/** + * struct csv_user_data_hgsc_cert_import - HGSC_CERT_IMPORT command parameters + * + * @hgscsk_cert_address: HGSCSK certificate chain + * @hgscsk_cert_len: length of HGSCSK certificate + * @hgsc_cert_address: HGSC certificate chain + * @hgsc_cert_len: length of HGSC certificate + */ +struct csv_user_data_hgsc_cert_import { + __u64 hgscsk_cert_address; /* In */ + __u32 hgscsk_cert_len; /* In */ + __u64 hgsc_cert_address; /* In */ + __u32 hgsc_cert_len; /* In */ +} __packed; + +/** + * struct csv_user_data_download_firmware - DOWNLOAD_FIRMWARE command parameters + * + * @address: physical address of CSV firmware image + * @length: length of the CSV firmware image + */ +struct csv_user_data_download_firmware { + __u64 address; /* In */ + __u32 length; /* In */ +} __packed; + /** * struct sev_issue_cmd - SEV ioctl parameters * diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h index f925a77f19ed1b42ab0626b1b897aefe1328b25d..8931c2bb0afe34263f4438c4dc7385cf45fd3c73 100644 --- a/include/uapi/linux/target_core_user.h +++ b/include/uapi/linux/target_core_user.h @@ -73,6 +73,8 @@ enum tcmu_opcode { struct tcmu_cmd_entry_hdr { __u32 len_op; __u16 cmd_id; +#define TCMU_KFLAG_ZERO_COPY 0x1 +#define TCMU_KFLAG_BYPASS_DATA_AREA 0x2 __u8 kflags; #define TCMU_UFLAG_UNKNOWN_OP 0x1 #define TCMU_UFLAG_READ_LEN 0x2 @@ -185,4 +187,21 @@ enum tcmu_genl_attr { }; #define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1) +struct tcmu_data_xfer { + __u16 cmd_id; + __u16 __pad1; + __u32 iov_cnt; + struct iovec __user *iovec; +}; + +#define TCMU_IOCTL_CMD_COPY_TO_SGL _IOW('T', 0xe0, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_COPY_FROM_SGL _IOR('T', 0xe1, struct tcmu_data_xfer) +#define TCMU_IOCTL_CMD_ZEROCOPY _IOW('T', 0xe2, struct tcmu_cmd_zerocopy) + +struct tcmu_cmd_zerocopy { + struct iovec __user *iov; + __u32 iov_cnt; + __u16 cmd_id; +}; + #endif diff --git a/include/uapi/linux/virtfuse.h b/include/uapi/linux/virtfuse.h new file mode 100644 index 0000000000000000000000000000000000000000..93b7ab200b3293be5c49b33653e37f976d35c515 --- /dev/null +++ b/include/uapi/linux/virtfuse.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _LINUX_VIRTFUSE_H +#define _LINUX_VIRTFUSE_H + +#include +#include + +/* Maximum number of devices supported. */ +#define VIRT_FUSE_MAX_DEVICES 1024 + +/* + * Clone a fuse device sharing the fuse connection bound to the specified + * virtual device. + */ +#define VIRTFUSE_IOC_CLONE _IO(0x99, 1) + +/* Reset the specified virtual device */ +#define VIRTFUSE_IOC_RESET _IO(0x99, 2) + +/* Print all mountinfo of the specified virtual device. */ +#define VIRTFUSE_IOC_GET_MOUNTS _IO(0x99, 3) + +/* + * @len indicates the size of the buffer indicated by @buf + * @buf indicates a buffer to contain the output mountinfo of the specified + * virtual device. + */ +struct virtfuse_mounts_buf { + __u32 len; + __u8 buf[]; +}; + +#endif diff --git a/init/Kconfig b/init/Kconfig index 6d35728b94b2b3b12bfb47c6fcea047dbf145be6..659caf1da85302223470760ab1a784a4ec2c4ece 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1115,6 +1115,36 @@ config CGROUP_DEVICE Provides a cgroup controller implementing whitelists for devices which a process in the cgroup can mknod or open. +config SCHED_SLI + bool "cgroup CPU usage and additional scheduler statistics" + depends on CGROUP_CPUACCT + depends on FAIR_GROUP_SCHED + default Y + help + This accounts CPU time spent by tasks in a cgroup into "usr%" "sys%" + "idle" "steal%" "irq%" "softirq%" "guest%". And this exports + nr_migrations, nr_running, nr_uninterruptible of a cgroup. + + The corresponding interface is cpuacct.proc_stat. + + Note CPU usage requires cpuacct and cpu cgroup subsys to be mounted + together to take effect. + +config RICH_CONTAINER + bool "Alibaba rich container" + depends on CGROUP_CPUACCT + depends on CFS_BANDWIDTH + depends on CPUSETS + select SCHED_SLI + default n + help + Make containers feel like VMs. Change the following interface + to reflect the information from containers, like: + "/proc/cpuinfo", "/proc/meminfo", "/sys/devices/system/cpu/online". + Then tools(i.e. top, free) can work in containers as in VMs. + Note that it requires "cpu,cpuacct,cpuset" shared v1 hierarchy to + work properly. + config CGROUP_CPUACCT bool "Simple CPU accounting controller" help @@ -1984,3 +2014,21 @@ config ARCH_HAS_SYNC_CORE_BEFORE_USERMODE # . config ARCH_HAS_SYSCALL_WRAPPER def_bool n + +config CK_KABI_RESERVE + bool "Enables KABI and hotfix RESERVE" + default y + help + This option enables KABI and hotfix reserve. + For Anolis Cloud Kernel, the KABI reserve macros and hotfix reserve + macros are the same. + For some embedded systems, KABI and hotfix reserve may be not necessary. + Disable it on demand. + +config CK_KABI_SIZE_ALIGN_CHECKS + bool "Enables more stringent kabi checks in the macros" + default y + depends on CK_KABI_RESERVE + help + This option enables more stringent kabi checks. Those must be disabled + in case of a debug-build because they allow to change struct sizes. \ No newline at end of file diff --git a/init/main.c b/init/main.c index e24b0780fdff7a807bd027ab26e61fc303c624ef..7bbce78cdccf370b3f210ac990fc425075231547 100644 --- a/init/main.c +++ b/init/main.c @@ -1539,7 +1539,8 @@ static noinline void __init kernel_init_freeable(void) rcu_init_tasks_generic(); do_pre_smp_initcalls(); - lockup_detector_init(); + if (disable_sdei_nmi_watchdog) + lockup_detector_init(); smp_init(); sched_init_smp(); @@ -1550,6 +1551,10 @@ static noinline void __init kernel_init_freeable(void) do_basic_setup(); + /* sdei_watchdog needs to be initialized after sdei_init */ + if (!disable_sdei_nmi_watchdog) + lockup_detector_init(); + kunit_run_all_tests(); wait_for_initramfs(); diff --git a/io_uring/io-wq.c b/io_uring/io-wq.c index 522196dfb0ff5a2c8054842569d5b1a8d1d43d2d..fb7aca49d0974b6fe6e5d848ebdc2363f92a1cbe 100644 --- a/io_uring/io-wq.c +++ b/io_uring/io-wq.c @@ -771,7 +771,7 @@ static void create_worker_cont(struct callback_head *cb) worker = container_of(cb, struct io_worker, create_work); clear_bit_unlock(0, &worker->create_state); wq = worker->wq; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); io_worker_release(worker); @@ -840,7 +840,7 @@ static bool create_io_worker(struct io_wq *wq, int index) if (index == IO_WQ_ACCT_BOUND) worker->flags |= IO_WORKER_F_BOUND; - tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE); + tsk = create_io_thread(io_wq_worker, worker, NUMA_NO_NODE, false); if (!IS_ERR(tsk)) { io_init_new_worker(wq, worker, tsk); } else if (!io_should_retry_thread(PTR_ERR(tsk))) { diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index bb8880d1e084c5048af67e53ca45a64d410f9d88..825c7d4acd8206fb8e0f14a01cd213ccfecd92c4 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -138,6 +138,8 @@ struct io_defer_entry { u32 seq; }; +extern struct io_sq_data __percpu **percpu_sqd; + /* requests with any of those set should undergo io_disarm_next() */ #define IO_DISARM_MASK (REQ_F_ARM_LTIMEOUT | REQ_F_LINK_TIMEOUT | REQ_F_FAIL) #define IO_REQ_LINK_FLAGS (REQ_F_LINK | REQ_F_HARDLINK) @@ -1744,11 +1746,12 @@ static void io_iopoll_req_issued(struct io_kiocb *req, unsigned int issue_flags) if (unlikely(needs_lock)) { /* * If IORING_SETUP_SQPOLL is enabled, sqes are either handle - * in sq thread task context or in io worker task context. If - * current task context is sq thread, we don't need to check - * whether should wake up sq thread. + * in sq thread task context or in io worker task context or + * in original context. If current task context is sq thread, + * we don't need to check whether should wake up sq thread. */ if ((ctx->flags & IORING_SETUP_SQPOLL) && + (current != ctx->sq_data->thread) && wq_has_sleeper(&ctx->sq_data->wait)) wake_up(&ctx->sq_data->wait); @@ -3612,6 +3615,7 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, if (unlikely(flags & ~(IORING_ENTER_GETEVENTS | IORING_ENTER_SQ_WAKEUP | IORING_ENTER_SQ_WAIT | IORING_ENTER_EXT_ARG | + IORING_ENTER_SQ_SUBMIT_ON_IDLE | IORING_ENTER_REGISTERED_RING))) return -EINVAL; @@ -3656,8 +3660,18 @@ SYSCALL_DEFINE6(io_uring_enter, unsigned int, fd, u32, to_submit, ret = -EOWNERDEAD; goto out; } - if (flags & IORING_ENTER_SQ_WAKEUP) + if (flags & IORING_ENTER_SQ_WAKEUP) { wake_up(&ctx->sq_data->wait); + if (flags & IORING_ENTER_SQ_SUBMIT_ON_IDLE) { + bool has_lock; + + has_lock = mutex_trylock(&ctx->uring_lock); + if (has_lock) { + io_submit_sqes(ctx, min(to_submit, 8U)); + mutex_unlock(&ctx->uring_lock); + } + } + } if (flags & IORING_ENTER_SQ_WAIT) io_sqpoll_wait_sq(ctx); @@ -4083,7 +4097,8 @@ static long io_uring_setup(u32 entries, struct io_uring_params __user *params) IORING_SETUP_SQE128 | IORING_SETUP_CQE32 | IORING_SETUP_SINGLE_ISSUER | IORING_SETUP_DEFER_TASKRUN | IORING_SETUP_NO_MMAP | IORING_SETUP_REGISTERED_FD_ONLY | - IORING_SETUP_NO_SQARRAY)) + IORING_SETUP_NO_SQARRAY | IORING_SETUP_SQPOLL_PERCPU | + IORING_SETUP_IDLE_US)) return -EINVAL; return io_uring_create(entries, &p, params); @@ -4371,7 +4386,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, if (sqd) { mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); } if (copy_to_user(arg, new_count, sizeof(new_count))) @@ -4397,7 +4412,7 @@ static __cold int io_register_iowq_max_workers(struct io_ring_ctx *ctx, err: if (sqd) { mutex_unlock(&sqd->lock); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); } return ret; } @@ -4618,6 +4633,8 @@ SYSCALL_DEFINE4(io_uring_register, unsigned int, fd, unsigned int, opcode, static int __init io_uring_init(void) { + int cpu; + #define __BUILD_BUG_VERIFY_OFFSET_SIZE(stype, eoffset, esize, ename) do { \ BUILD_BUG_ON(offsetof(stype, ename) != eoffset); \ BUILD_BUG_ON(sizeof_field(stype, ename) != esize); \ @@ -4706,6 +4723,10 @@ static int __init io_uring_init(void) offsetof(struct io_kiocb, cmd.data), sizeof_field(struct io_kiocb, cmd.data), NULL); + percpu_sqd = alloc_percpu(struct io_sq_data *); + for_each_possible_cpu(cpu) + *per_cpu_ptr(percpu_sqd, cpu) = NULL; + #ifdef CONFIG_SYSCTL register_sysctl_init("kernel", kernel_io_uring_disabled_table); #endif diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 65b5dbe3c850ed564432c76f17e64739d430f2fe..de494bebc84cb3be88f068dd5035cfd40ec60d1e 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -24,6 +24,9 @@ enum { IO_SQ_THREAD_SHOULD_PARK, }; +DEFINE_MUTEX(percpu_sqd_lock); +struct io_sq_data __percpu **percpu_sqd; + void io_sq_thread_unpark(struct io_sq_data *sqd) __releases(&sqd->lock) { @@ -64,14 +67,28 @@ void io_sq_thread_stop(struct io_sq_data *sqd) wait_for_completion(&sqd->exited); } -void io_put_sq_data(struct io_sq_data *sqd) +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd) { + int percpu = 0; + + if ((ctx->flags & IORING_SETUP_SQ_AFF) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) + percpu = 1; + + if (percpu) + mutex_lock(&percpu_sqd_lock); + if (refcount_dec_and_test(&sqd->refs)) { WARN_ON_ONCE(atomic_read(&sqd->park_pending)); io_sq_thread_stop(sqd); + if (percpu) + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = NULL; kfree(sqd); } + + if (percpu) + mutex_unlock(&percpu_sqd_lock); } static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) @@ -79,11 +96,36 @@ static __cold void io_sqd_update_thread_idle(struct io_sq_data *sqd) struct io_ring_ctx *ctx; unsigned sq_thread_idle = 0; - list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) - sq_thread_idle = max(sq_thread_idle, ctx->sq_thread_idle); + sqd->idle_mode_us = false; + list_for_each_entry(ctx, &sqd->ctx_list, sqd_list) { + bool idle_mode_us = ctx->flags & IORING_SETUP_IDLE_US; + unsigned int tmp_idle = idle_mode_us ? ctx->sq_thread_idle : + jiffies_to_usecs(ctx->sq_thread_idle); + + if (idle_mode_us && !sqd->idle_mode_us) + sqd->idle_mode_us = true; + + if (sq_thread_idle < tmp_idle) + sq_thread_idle = tmp_idle; + } + + if (!sqd->idle_mode_us) + sq_thread_idle = usecs_to_jiffies(sq_thread_idle); sqd->sq_thread_idle = sq_thread_idle; } +static inline u64 io_current_time(bool idle_mode_us) +{ + return idle_mode_us ? (ktime_get_ns() >> 10) : get_jiffies_64(); +} + +static inline bool io_time_after(bool idle_mode_us, u64 timeout) +{ + u64 now = io_current_time(idle_mode_us); + + return time_after64(now, timeout); +} + void io_sq_thread_finish(struct io_ring_ctx *ctx) { struct io_sq_data *sqd = ctx->sq_data; @@ -94,7 +136,7 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx) io_sqd_update_thread_idle(sqd); io_sq_thread_unpark(sqd); - io_put_sq_data(sqd); + io_put_sq_data(ctx, sqd); ctx->sq_data = NULL; } } @@ -130,11 +172,11 @@ static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) } static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, - bool *attached) + bool *attached, bool *percpu_found) { struct io_sq_data *sqd; - *attached = false; + *attached = *percpu_found = false; if (p->flags & IORING_SETUP_ATTACH_WQ) { sqd = io_attach_sq_data(p); if (!IS_ERR(sqd)) { @@ -146,6 +188,19 @@ static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, return sqd; } + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + sqd = *per_cpu_ptr(percpu_sqd, p->sq_thread_cpu); + if (sqd) { + refcount_inc(&sqd->refs); + mutex_unlock(&percpu_sqd_lock); + *percpu_found = true; + return sqd; + } + mutex_unlock(&percpu_sqd_lock); + } + sqd = kzalloc(sizeof(*sqd), GFP_KERNEL); if (!sqd) return ERR_PTR(-ENOMEM); @@ -210,8 +265,16 @@ static bool io_sqd_handle_event(struct io_sq_data *sqd) if (test_bit(IO_SQ_THREAD_SHOULD_PARK, &sqd->state) || signal_pending(current)) { mutex_unlock(&sqd->lock); - if (signal_pending(current)) + if (signal_pending(current)) { did_sig = get_signal(&ksig); + if (did_sig && sqd->sq_cpu != -1 && + refcount_read(&sqd->refs) != 0) { + mutex_lock(&percpu_sqd_lock); + if (*per_cpu_ptr(percpu_sqd, sqd->sq_cpu) == sqd) + did_sig = false; + mutex_unlock(&percpu_sqd_lock); + } + } cond_resched(); mutex_lock(&sqd->lock); sqd->sq_cpu = raw_smp_processor_id(); @@ -223,7 +286,7 @@ static int io_sq_thread(void *data) { struct io_sq_data *sqd = data; struct io_ring_ctx *ctx; - unsigned long timeout = 0; + u64 timeout = 0; char buf[TASK_COMM_LEN]; DEFINE_WAIT(wait); @@ -247,7 +310,7 @@ static int io_sq_thread(void *data) if (io_sqd_events_pending(sqd) || signal_pending(current)) { if (io_sqd_handle_event(sqd)) break; - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } cap_entries = !list_is_singular(&sqd->ctx_list); @@ -260,9 +323,9 @@ static int io_sq_thread(void *data) if (io_run_task_work()) sqt_spin = true; - if (sqt_spin || !time_after(jiffies, timeout)) { + if (sqt_spin || !io_time_after(sqd->idle_mode_us, timeout)) { if (sqt_spin) - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; if (unlikely(need_resched())) { mutex_unlock(&sqd->lock); cond_resched(); @@ -309,7 +372,7 @@ static int io_sq_thread(void *data) } finish_wait(&sqd->wait, &wait); - timeout = jiffies + sqd->sq_thread_idle; + timeout = io_current_time(sqd->idle_mode_us) + sqd->sq_thread_idle; } io_uring_cancel_generic(true, sqd); @@ -340,6 +403,8 @@ void io_sqpoll_wait_sq(struct io_ring_ctx *ctx) finish_wait(&ctx->sqo_sq_wait, &wait); } +#define DEFAULT_SQ_IDLE_US 10 + __cold int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p) { @@ -362,13 +427,26 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ctx->flags & IORING_SETUP_SQPOLL) { struct task_struct *tsk; struct io_sq_data *sqd; - bool attached; + bool attached, percpu_found; ret = security_uring_sqpoll(); if (ret) return ret; - sqd = io_get_sq_data(p, &attached); + if ((ctx->flags & IORING_SETUP_ATTACH_WQ) && + (ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + /* ATTACH_WQ and SQPOLL_PERCPU are mutual exclusive */ + ret = -EINVAL; + goto err; + } + if ((ctx->flags & IORING_SETUP_SQPOLL_PERCPU) && + !(ctx->flags & IORING_SETUP_SQ_AFF)) { + /* SQPOLL_PERCPU and SQ_AFF should both exist */ + ret = -EINVAL; + goto err; + } + + sqd = io_get_sq_data(p, &attached, &percpu_found); if (IS_ERR(sqd)) { ret = PTR_ERR(sqd); goto err; @@ -376,9 +454,22 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, ctx->sq_creds = get_current_cred(); ctx->sq_data = sqd; - ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); - if (!ctx->sq_thread_idle) - ctx->sq_thread_idle = HZ; + if ((ctx->flags & IORING_SETUP_IDLE_US) && + !(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)) { + ret = -EINVAL; + goto err; + } + + /* + * for ms mode: ctx->sq_thread_idle is jiffies + * for us mode: ctx->sq_thread_idle is time in terms of microsecond + */ + if (ctx->flags & IORING_SETUP_IDLE_US) + ctx->sq_thread_idle = p->sq_thread_idle ? + p->sq_thread_idle : DEFAULT_SQ_IDLE_US; + else + ctx->sq_thread_idle = p->sq_thread_idle ? + msecs_to_jiffies(p->sq_thread_idle) : HZ; io_sq_thread_park(sqd); list_add(&ctx->sqd_list, &sqd->ctx_list); @@ -389,7 +480,7 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, if (ret < 0) goto err; - if (attached) + if (attached || percpu_found) return 0; if (p->flags & IORING_SETUP_SQ_AFF) { @@ -405,19 +496,27 @@ __cold int io_sq_offload_create(struct io_ring_ctx *ctx, sqd->task_pid = current->pid; sqd->task_tgid = current->tgid; - tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE); + tsk = create_io_thread(io_sq_thread, sqd, NUMA_NO_NODE, + !!(ctx->flags & IORING_SETUP_SQPOLL_PERCPU)); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); goto err_sqpoll; } sqd->thread = tsk; + if ((p->flags & IORING_SETUP_SQ_AFF) && + (p->flags & IORING_SETUP_SQPOLL_PERCPU)) { + mutex_lock(&percpu_sqd_lock); + *per_cpu_ptr(percpu_sqd, sqd->sq_cpu) = sqd; + mutex_unlock(&percpu_sqd_lock); + } ret = io_uring_alloc_task_context(tsk, ctx); wake_up_new_task(tsk); if (ret) goto err; - } else if (p->flags & IORING_SETUP_SQ_AFF) { - /* Can't have SQ_AFF without SQPOLL */ + } else if (p->flags & (IORING_SETUP_SQ_AFF | IORING_SETUP_IDLE_US | + IORING_SETUP_SQPOLL_PERCPU)) { + /* Can't have SQ_AFF or IDLE_US or SQPOLL_PERCPU without SQPOLL */ ret = -EINVAL; goto err; } diff --git a/io_uring/sqpoll.h b/io_uring/sqpoll.h index 8df37e8c914936d777b9d0495796c236f41e5189..60eec0c97864f84e9cec956833a739936b3223d3 100644 --- a/io_uring/sqpoll.h +++ b/io_uring/sqpoll.h @@ -18,6 +18,8 @@ struct io_sq_data { unsigned long state; struct completion exited; + + bool idle_mode_us; }; int io_sq_offload_create(struct io_ring_ctx *ctx, struct io_uring_params *p); @@ -25,6 +27,6 @@ void io_sq_thread_finish(struct io_ring_ctx *ctx); void io_sq_thread_stop(struct io_sq_data *sqd); void io_sq_thread_park(struct io_sq_data *sqd); void io_sq_thread_unpark(struct io_sq_data *sqd); -void io_put_sq_data(struct io_sq_data *sqd); +void io_put_sq_data(struct io_ring_ctx *ctx, struct io_sq_data *sqd); void io_sqpoll_wait_sq(struct io_ring_ctx *ctx); int io_sqpoll_wq_cpu_affinity(struct io_ring_ctx *ctx, cpumask_var_t mask); diff --git a/kernel/cgroup/cgroup-internal.h b/kernel/cgroup/cgroup-internal.h index c56071f150f2ae74362f6a397c22687409372736..82159299c35b3f0fbd612c96b3e2aa2039cbf9af 100644 --- a/kernel/cgroup/cgroup-internal.h +++ b/kernel/cgroup/cgroup-internal.h @@ -108,6 +108,7 @@ struct cgroup_taskset { /* the src and dst cset list running through cset->mg_node */ struct list_head src_csets; struct list_head dst_csets; + int dst_count; /* the number of tasks in the set */ int nr_tasks; @@ -152,6 +153,7 @@ struct cgroup_mgctx { .src_csets = LIST_HEAD_INIT(tset.src_csets), \ .dst_csets = LIST_HEAD_INIT(tset.dst_csets), \ .csets = &tset.src_csets, \ + .dst_count = 0, \ } #define CGROUP_MGCTX_INIT(name) \ diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 518725b57200c2fe6d1d01a2280bd5156073ae0c..255c400953a9ed544b1494f1a8cd1e663417b643 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -59,6 +59,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -492,6 +493,12 @@ static struct cgroup_subsys_state *cgroup_css(struct cgroup *cgrp, return &cgrp->self; } +struct cgroup_subsys_state *global_cgroup_css(struct cgroup *cgrp, + int ssid) +{ + return cgroup_css(cgrp, cgroup_subsys[(ssid)]); +} + /** * cgroup_e_css_by_mask - obtain a cgroup's effective css for the specified ss * @cgrp: the cgroup of interest @@ -2429,9 +2436,11 @@ static void cgroup_migrate_add_task(struct task_struct *task, if (list_empty(&cset->mg_node)) list_add_tail(&cset->mg_node, &mgctx->tset.src_csets); - if (list_empty(&cset->mg_dst_cset->mg_node)) + if (list_empty(&cset->mg_dst_cset->mg_node)) { list_add_tail(&cset->mg_dst_cset->mg_node, &mgctx->tset.dst_csets); + mgctx->tset.dst_count++; + } } /** @@ -2512,9 +2521,14 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) struct task_struct *task, *tmp_task; struct css_set *cset, *tmp_cset; int ssid, failed_ssid, ret; + LIST_HEAD(tmp_links); /* check that we can legitimately attach to the cgroup */ if (tset->nr_tasks) { + ret = allocate_memcg_blkcg_links(tset->dst_count*2, &tmp_links); + if (ret) + goto out_release_tset; + do_each_subsys_mask(ss, ssid, mgctx->ss_mask) { if (ss->can_attach) { tset->ssid = ssid; @@ -2567,6 +2581,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) tset->ssid = ssid; ss->attach(tset); } + list_for_each_entry(cset, &tset->dst_csets, mg_node) + insert_memcg_blkcg_link(ss, &tmp_links, cset); } while_each_subsys_mask(); } @@ -2593,6 +2609,8 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx) } spin_unlock_irq(&css_set_lock); + free_memcg_blkcg_links(&tmp_links); + /* * Re-initialize the cgroup_taskset structure in case it is reused * again in another cgroup_migrate_add_task()/cgroup_migrate_execute() @@ -5343,6 +5361,8 @@ static void css_free_rwork_fn(struct work_struct *work) struct cgroup_subsys_state *parent = css->parent; int id = css->id; + delete_memcg_blkcg_link(ss, css); + ss->css_free(css); cgroup_idr_remove(&ss->css_idr, id); cgroup_put(cgrp); diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 4749e0c86c62c317a1a3bc6dc992ff97d0f2b4f2..f7973bf74aadfbf5efc5e4927ec83ed3f82f9338 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -43,6 +43,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_FALSE(cpusets_pre_enable_key); DEFINE_STATIC_KEY_FALSE(cpusets_enabled_key); @@ -1600,6 +1601,8 @@ static int update_parent_subparts_cpumask(struct cpuset *cs, int cmd, return 0; } +static struct cpumask added, deleted, old_cpus; + /* * update_cpumasks_hier() flags */ @@ -1656,6 +1659,11 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, parent->child_ecpus_count--; } + if (cpumask_empty(cp->effective_cpus)) + cpumask_copy(&old_cpus, parent->effective_cpus); + else + cpumask_copy(&old_cpus, cp->effective_cpus); + /* * Skip the whole subtree if * 1) the cpumask remains the same, @@ -1747,8 +1755,16 @@ static void update_cpumasks_hier(struct cpuset *cs, struct tmpmasks *tmp, WARN_ON(!is_in_v2_mode() && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); + /* add = new - old = new & (~old) */ + cpumask_andnot(&added, tmp->new_cpus, &old_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, &added); + update_tasks_cpumask(cp, tmp->new_cpus); + /* deleted = old - new = old & (~new) */ + cpumask_andnot(&deleted, &old_cpus, tmp->new_cpus); + cpuacct_cpuset_changed(cs->css.cgroup, &deleted, NULL); + /* * On default hierarchy, inherit the CS_SCHED_LOAD_BALANCE * from parent if current cpuset isn't a valid partition root @@ -2645,9 +2661,10 @@ static void cpuset_attach_task(struct cpuset *cs, struct task_struct *task) { lockdep_assert_held(&cpuset_mutex); - if (cs != &top_cpuset) + if (cs != &top_cpuset) { guarantee_online_cpus(task, cpus_attach); - else + wilds_cpus_allowed(cpus_attach); + } else cpumask_andnot(cpus_attach, task_cpu_possible_mask(task), cs->subparts_cpus); /* @@ -3327,6 +3344,7 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); + cpuacct_cpuset_changed(cs->css.cgroup, NULL, cs->effective_cpus); spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); @@ -3476,8 +3494,24 @@ static void cpuset_fork(struct task_struct *task) rcu_read_unlock(); if (same_cs) { - if (cs == &top_cpuset) + if (cs == &top_cpuset) { + /* + * This is necessary since update_wilds_cpumask() + * could have missed the 'task', if it's parent is + * the last one on the iteratoration list, like: + * + * 1. 'task' dup old dyn_allowed from parent + * 2. update_wilds_cpumask() begin + * 3. new dyn_allowed applied to parent + * 4. update_wilds_cpumask() end + * 5. 'task' add into iteratoration list + * + * Fix this by redup current's allowed here if changed. + */ + if (!cpumask_equal(task->cpus_ptr, current->cpus_ptr)) + set_cpus_allowed_ptr(task, current->cpus_ptr); return; + } set_cpus_allowed_ptr(task, current->cpus_ptr); task->mems_allowed = current->mems_allowed; @@ -3969,6 +4003,29 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) spin_unlock_irqrestore(&callback_lock, flags); } +#ifdef CONFIG_RICH_CONTAINER +void rich_container_get_cpuset_cpus(struct cpumask *pmask) +{ + unsigned long flags; + struct task_struct *p; + + rcu_read_lock(); + if (sysctl_rich_container_source == 1) { + read_lock(&tasklist_lock); + p = task_active_pid_ns(current)->child_reaper; + read_unlock(&tasklist_lock); + + } else { + p = current; + } + + spin_lock_irqsave(&callback_lock, flags); + guarantee_online_cpus(p, pmask); + spin_unlock_irqrestore(&callback_lock, flags); + rcu_read_unlock(); +} +#endif + /** * cpuset_cpus_allowed_fallback - final fallback before complete catastrophe. * @tsk: pointer to task_struct with which the scheduler is struggling diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c index d80d7a6081412994582e2a3686442226b582cd50..2ac57d3760cf3426a210f0fac99ac4740d80b66f 100644 --- a/kernel/cgroup/rstat.c +++ b/kernel/cgroup/rstat.c @@ -326,6 +326,11 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum += src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum += src_bstat->forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum += src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum += src_bstat->sibidle_task_sum; #endif } @@ -337,6 +342,11 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat, dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime; #ifdef CONFIG_SCHED_CORE dst_bstat->forceidle_sum -= src_bstat->forceidle_sum; + dst_bstat->forceidle_task_sum -= src_bstat->forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + dst_bstat->sibidle_sum -= src_bstat->sibidle_sum; + dst_bstat->sibidle_task_sum -= src_bstat->sibidle_task_sum; #endif } @@ -430,6 +440,17 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp, case CPUTIME_FORCEIDLE: rstatc->bstat.forceidle_sum += delta_exec; break; + case CPUTIME_FORCEIDLE_TASK: + rstatc->bstat.forceidle_task_sum += delta_exec; + break; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + case CPUTIME_SIBIDLE: + rstatc->bstat.sibidle_sum += delta_exec; + break; + case CPUTIME_SIBIDLE_TASK: + rstatc->bstat.sibidle_task_sum += delta_exec; + break; #endif default: break; @@ -473,6 +494,11 @@ static void root_cgroup_cputime(struct cgroup_base_stat *bstat) #ifdef CONFIG_SCHED_CORE bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE]; + bstat->forceidle_task_sum += cpustat[CPUTIME_FORCEIDLE_TASK]; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + bstat->sibidle_sum += cpustat[CPUTIME_SIBIDLE]; + bstat->sibidle_task_sum += cpustat[CPUTIME_SIBIDLE_TASK]; #endif } } @@ -484,6 +510,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) struct cgroup_base_stat bstat; #ifdef CONFIG_SCHED_CORE u64 forceidle_time; + u64 forceidle_task_time; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + u64 sibidle_time; + u64 sibidle_task_time; #endif if (cgroup_parent(cgrp)) { @@ -493,6 +524,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) &utime, &stime); #ifdef CONFIG_SCHED_CORE forceidle_time = cgrp->bstat.forceidle_sum; + forceidle_task_time = cgrp->bstat.forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = cgrp->bstat.sibidle_sum; + sibidle_task_time = cgrp->bstat.sibidle_task_sum; #endif cgroup_rstat_flush_release(); } else { @@ -502,6 +538,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) stime = bstat.cputime.stime; #ifdef CONFIG_SCHED_CORE forceidle_time = bstat.forceidle_sum; + forceidle_task_time = bstat.forceidle_task_sum; +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + sibidle_time = bstat.sibidle_sum; + sibidle_task_time = bstat.sibidle_task_sum; #endif } @@ -510,6 +551,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) do_div(stime, NSEC_PER_USEC); #ifdef CONFIG_SCHED_CORE do_div(forceidle_time, NSEC_PER_USEC); + do_div(forceidle_task_time, NSEC_PER_USEC); +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + do_div(sibidle_time, NSEC_PER_USEC); + do_div(sibidle_task_time, NSEC_PER_USEC); #endif seq_printf(seq, "usage_usec %llu\n" @@ -519,6 +565,11 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq) #ifdef CONFIG_SCHED_CORE seq_printf(seq, "core_sched.force_idle_usec %llu\n", forceidle_time); + seq_printf(seq, "core_sched.force_idle_task_usec %llu\n", forceidle_task_time); +#endif +#if defined(CONFIG_SCHED_ACPU) || defined(CONFIG_SCHED_CORE) + seq_printf(seq, "sibidle_usec %llu\n", sibidle_time); + seq_printf(seq, "sibidle_task_usec %llu\n", sibidle_task_time); #endif } diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index f005c66f378c32e9bae2d84b0ca80d1c126f0c15..8b860c7ecabc54edbaaea2bedf8802cd0840bc64 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -224,6 +224,9 @@ void __init dma_contiguous_reserve(phys_addr_t limit) dma_numa_cma_reserve(); + if (is_zhaoxin_kh40000) + return; + pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); if (size_cmdline != -1) { diff --git a/kernel/events/core.c b/kernel/events/core.c index 252387b6ac8d691833493e936dec0a1ad6b59739..4995c6ba32e657f088ea2098c1132d2b495ae8df 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7375,6 +7375,14 @@ void perf_output_sample(struct perf_output_handle *handle, if (branch_sample_hw_index(event)) perf_output_put(handle, data->br_stack->hw_idx); perf_output_copy(handle, data->br_stack->entries, size); + /* + * Add the extension space which is appended + * right after the struct perf_branch_stack. + */ + if (data->br_stack_cntr) { + size = data->br_stack->nr * sizeof(u64); + perf_output_copy(handle, data->br_stack_cntr, size); + } } else { /* * we always store at least the value of nr diff --git a/kernel/fork.c b/kernel/fork.c index 177ce7438db6b1ca4f309351c7364e15ca4e6ab6..4474fbb6aa8b7a67e0b66f33253bdf3cb147506f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -110,6 +110,11 @@ #define CREATE_TRACE_POINTS #include +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +#else +#define unprivileged_userns_clone 0 +#endif /* * Minimum number of threads to boot the kernel @@ -2260,6 +2265,10 @@ __latent_entropy struct task_struct *copy_process( if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) return ERR_PTR(-EINVAL); + if ((clone_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) + if (!capable(CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + /* * Thread groups must share signals as well, and detached threads * can only be started up within the thread group. @@ -2724,6 +2733,8 @@ __latent_entropy struct task_struct *copy_process( proc_fork_connector(p); sched_post_fork(p); cgroup_post_fork(p, args); + if (likely(p->pid) && is_child_reaper(pid)) + create_rich_container_reaper(p); perf_event_fork(p); trace_task_newtask(p, clone_flags); @@ -2840,13 +2851,22 @@ struct task_struct * __init fork_idle(int cpu) * The returned task is inactive, and the caller must fire it up through * wake_up_new_task(p). All signals are blocked in the created task. */ -struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node) -{ - unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD| - CLONE_IO; +struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node, + bool unshare) +{ + unsigned long flags = unshare ? 0 : (CLONE_FS|CLONE_FILES| + CLONE_SIGHAND|CLONE_THREAD| + CLONE_IO|CLONE_VM); + /* we use 'unshare' flag to try to create an independent io_thread, + * 'unshare' describes whether child share parent's mm directly (with + * refcount add one), or it should copy mm/files when copy_process(). + * By setting this flag, the io_thread won't share parent's mm + * directly, but can be shared among different tasks, and looks more + * reasonably. + */ struct kernel_clone_args args = { - .flags = ((lower_32_bits(flags) | CLONE_VM | - CLONE_UNTRACED) & ~CSIGNAL), + .flags = ((lower_32_bits(flags) | CLONE_UNTRACED) + & ~CSIGNAL), .exit_signal = (lower_32_bits(flags) & CSIGNAL), .fn = fn, .fn_arg = arg, @@ -3413,6 +3433,12 @@ int ksys_unshare(unsigned long unshare_flags) if (unshare_flags & CLONE_NEWNS) unshare_flags |= CLONE_FS; + if ((unshare_flags & CLONE_NEWUSER) && !unprivileged_userns_clone) { + err = -EPERM; + if (!capable(CAP_SYS_ADMIN)) + goto bad_unshare_out; + } + err = check_unshare_flags(unshare_flags); if (err) goto bad_unshare_out; diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 2531f3496ab6d73a7570c91ad47198bc9622e1fc..8a65b0c34b5ac096794894c12b5168d7066fcbec 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -148,5 +148,5 @@ config GENERIC_IRQ_MULTI_HANDLER # Do not even think of enabling this on any new platform config DEPRECATED_IRQ_CPU_ONOFFLINE bool - depends on CAVIUM_OCTEON_SOC - default CAVIUM_OCTEON_SOC + depends on CAVIUM_OCTEON_SOC || LOONGARCH + default CAVIUM_OCTEON_SOC || LOONGARCH diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 619972c78774f7d2e0ae2ad6fe5aa930ffde9d06..bd99e88d74eb5ccfe996640041286e7cbfbbe0f0 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c @@ -25,6 +25,11 @@ #include #include "pid_sysctl.h" +#ifdef CONFIG_RICH_CONTAINER +int sysctl_rich_container_enable; +int sysctl_rich_container_source; /* 0 - current; 1 - child_reaper */ +#endif + static DEFINE_MUTEX(pid_caches_mutex); static struct kmem_cache *pid_ns_cachep; /* Write once array, filled from the beginning. */ diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 0b3af1529778c0ffe58675af216faeba434aba5e..8bccea05df1b74bb27ee2911598e337acf51f5cb 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -2167,10 +2167,17 @@ int vprintk_store(int facility, int level, u16 text_len; int ret = 0; u64 ts_nsec; +#ifdef CONFIG_SW64_RRK + extern int sw64_printk(const char *fmt, va_list args); +#endif if (!printk_enter_irqsave(recursion_ptr, irqflags)) return 0; +#ifdef CONFIG_SW64_RRK + sw64_printk(fmt, args); +#endif + /* * Since the duration of printk() can vary depending on the message * and state of the ringbuffer, grab the timestamp now so that it is diff --git a/kernel/sched/core.c b/kernel/sched/core.c index a854b71836dd5b8a811d098a0cc24a2381e1e727..e93a1f11b52dacb19ec7c46e4518e960e296bf45 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -151,6 +151,19 @@ const_debug unsigned int sysctl_sched_nr_migrate = SCHED_NR_MIGRATE_BREAK; __read_mostly int scheduler_running; +#ifdef CONFIG_SCHED_ACPU +DEFINE_STATIC_KEY_FALSE(acpu_enabled); +unsigned int sysctl_sched_acpu_enabled; +#endif + +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Percent of burst assigned to cfs_b->runtime on tg_set_cfs_bandwidth, + * 0 by default. + */ +unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + #ifdef CONFIG_SCHED_CORE DEFINE_STATIC_KEY_FALSE(__sched_core_enabled); @@ -368,7 +381,8 @@ static void __sched_core_flip(bool enabled) for_each_cpu(t, smt_mask) cpu_rq(t)->core_enabled = enabled; - cpu_rq(cpu)->core->core_forceidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start = 0; + cpu_rq(cpu)->core->core_sibidle_start_task = 0; sched_core_unlock(cpu, &flags); @@ -2122,6 +2136,12 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) p->sched_class->dequeue_task(rq, p, flags); } +static void update_nr_uninterruptible(struct task_struct *tsk, long inc) +{ + if (tsk->sched_class->update_nr_uninterruptible) + tsk->sched_class->update_nr_uninterruptible(tsk, inc); +} + void activate_task(struct rq *rq, struct task_struct *p, int flags) { if (task_on_rq_migrating(p)) @@ -3391,6 +3411,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) p->se.nr_migrations++; rseq_migrate(p); sched_mm_cid_migrate_from(p); + task_ca_increase_nr_migrations(p); perf_event_task_migrate(p); } @@ -3775,8 +3796,10 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, lockdep_assert_rq_held(rq); - if (p->sched_contributes_to_load) + if (p->sched_contributes_to_load) { + update_nr_uninterruptible(p, -1); rq->nr_uninterruptible--; + } #ifdef CONFIG_SMP if (wake_flags & WF_MIGRATED) @@ -3786,6 +3809,7 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } activate_task(rq, p, en_flags); @@ -3960,6 +3984,18 @@ bool cpus_share_cache(int this_cpu, int that_cpu) return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); } +/* + * Whether CPUs are share cache resources, which means LLC on non-cluster + * machines and LLC tag or L2 on machines with clusters. + */ +bool cpus_share_resources(int this_cpu, int that_cpu) +{ + if (this_cpu == that_cpu) + return true; + + return per_cpu(sd_share_id, this_cpu) == per_cpu(sd_share_id, that_cpu); +} + static inline bool ttwu_queue_cond(struct task_struct *p, int cpu) { /* @@ -4337,6 +4373,7 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) if (p->in_iowait) { delayacct_blkio_end(p); atomic_dec(&task_rq(p)->nr_iowait); + update_nr_iowait(p, -1); } wake_flags |= WF_MIGRATED; @@ -4977,6 +5014,137 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr, #endif /* CONFIG_PREEMPT_NOTIFIERS */ +#ifdef CONFIG_SCHED_ACPU +static void acpu_enable(void) +{ + int i; + + for_each_possible_cpu(i) { + struct rq *rq = cpu_rq(i); + + /* It may be not that accurate, but useful enough. */ + rq->last_acpu_update_time = rq->clock; + } + static_branch_enable(&acpu_enabled); +} + +static void acpu_disable(void) +{ + static_branch_disable(&acpu_enabled); +} + +int sched_acpu_enable_handler(struct ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + int ret; + unsigned int old, new; + + if (!write) { + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + return ret; + } + + old = sysctl_sched_acpu_enabled; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + new = sysctl_sched_acpu_enabled; + if (!ret && write && (old != new)) { + if (new) + acpu_enable(); + else + acpu_disable(); + } + + return ret; +} + +static void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ + const int cpu = cpu_of(rq); + const struct cpumask *smt_mask = cpu_smt_mask(cpu); + u64 now = rq_clock(rq); + u64 now_task = rq_clock_task(rq); + u64 sibidle_sum, sibidle_task_sum, last_update_time, last_update_time_task; + s64 delta, delta_task, last, last_task; + int i; + + if (!static_branch_likely(&acpu_enabled) || !schedstat_enabled()) + return; + + /* + * If core sched is enabled and core_sibidle_count is not zero, we update sibidle + * time in function __sched_core_account_sibidle(). + */ +#ifdef CONFIG_SCHED_CORE + if (rq->core->core_sibidle_count) + goto out; +#endif + + /* Update idle sum and busy sum for current rq. */ + delta = now - rq->last_acpu_update_time; + if (prev == rq->idle) + rq->acpu_idle_sum += delta; + + /* + * Be carefule, smt_mask maybe NULL. + * We only consider the case where there are two SMT at this stage. + */ + if (unlikely(!smt_mask) || unlikely(cpumask_weight(smt_mask) != 2)) + goto out; + + for_each_cpu(i, smt_mask) { + if (i != cpu) { + struct rq *rq_i = cpu_rq(i); + struct task_struct *curr_i = rq_i->curr; + + last = (s64)(rq->last_acpu_update_time - + rq_i->last_acpu_update_time); + last_update_time = last >= 0 ? rq->last_acpu_update_time : + rq_i->last_acpu_update_time; + last_task = (s64)(rq->last_acpu_update_time_task - + rq_i->last_acpu_update_time_task); + last_update_time_task = last_task >= 0 ? + rq->last_acpu_update_time_task : + rq_i->last_acpu_update_time_task; + /* + * Sibling may update acpu at the same time, and it's + * timestamp may be newer than this rq. + */ + delta = now - last_update_time; + delta = delta > 0 ? delta : 0; + delta_task = now_task - last_update_time_task; + delta_task = delta_task > 0 ? delta_task : 0; + + /* Add the delta to improve accuracy. */ + sibidle_sum = last >= 0 ? rq->sibidle_sum : rq_i->acpu_idle_sum; + sibidle_task_sum = last_task >= 0 ? rq->sibidle_task_sum : + rq_i->acpu_idle_sum; + if (curr_i == rq_i->idle) { + sibidle_sum += delta; + sibidle_task_sum += delta_task; + } + } + } + + if (prev != rq->idle) { + delta = sibidle_sum - rq->sibidle_sum; + delta = delta > 0 ? delta : 0; + delta_task = sibidle_task_sum - rq->sibidle_task_sum; + delta_task = delta_task > 0 ? delta_task : 0; + __account_sibidle_time(prev, delta, delta_task, false); + } + + rq->sibidle_sum = sibidle_sum; + rq->sibidle_task_sum = sibidle_task_sum; +out: + rq->last_acpu_update_time = now; + rq->last_acpu_update_time_task = now_task; +} +#else +static inline void update_acpu(struct rq *rq, struct task_struct *prev, struct task_struct *next) +{ +} +#endif /* CONFIG_SCHED_ACPU */ + static inline void prepare_task(struct task_struct *next) { #ifdef CONFIG_SMP @@ -5185,6 +5353,7 @@ prepare_task_switch(struct rq *rq, struct task_struct *prev, { kcov_prepare_switch(prev); sched_info_switch(rq, prev, next); + update_acpu(rq, prev, next); perf_event_task_sched_out(prev, next); rseq_preempt(prev); fire_sched_out_preempt_notifiers(prev, next); @@ -5657,6 +5826,7 @@ void scheduler_tick(void) thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq)); update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure); curr->sched_class->task_tick(rq, curr, 0); + update_acpu(rq, curr, curr); if (sched_feat(LATENCY_WARN)) resched_latency = cpu_resched_latency(rq); calc_global_load_tick(rq); @@ -6124,18 +6294,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* reset state */ rq->core->core_cookie = 0UL; - if (rq->core->core_forceidle_count) { + if (rq->core->core_sibidle_count) { if (!core_clock_updated) { update_rq_clock(rq->core); core_clock_updated = true; } - sched_core_account_forceidle(rq); + sched_core_account_sibidle(rq); /* reset after accounting force idle */ - rq->core->core_forceidle_start = 0; - rq->core->core_forceidle_count = 0; - rq->core->core_forceidle_occupation = 0; - need_sync = true; - fi_before = true; + rq->core->core_sibidle_start = 0; + rq->core->core_sibidle_start_task = 0; + rq->core->core_sibidle_count = 0; + rq->core->core_sibidle_occupation = 0; + if (rq->core->core_forceidle_count) { + rq->core->core_forceidle_count = 0; + need_sync = true; + fi_before = true; + } } /* @@ -6211,6 +6385,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) rq_i->core_pick = p; if (p == rq_i->idle) { + rq->core->core_sibidle_count++; if (rq_i->nr_running) { rq->core->core_forceidle_count++; if (!fi_before) @@ -6221,9 +6396,10 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } } - if (schedstat_enabled() && rq->core->core_forceidle_count) { - rq->core->core_forceidle_start = rq_clock(rq->core); - rq->core->core_forceidle_occupation = occ; + if (schedstat_enabled() && rq->core->core_sibidle_count) { + rq->core->core_sibidle_start = rq_clock(rq->core); + rq->core->core_sibidle_start_task = rq_clock_task(rq->core); + rq->core->core_sibidle_occupation = occ; } rq->core->core_pick_seq = rq->core->core_task_seq; @@ -6265,7 +6441,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (!(fi_before && rq->core->core_forceidle_count)) task_vruntime_update(rq_i, rq_i->core_pick, !!rq->core->core_forceidle_count); - rq_i->core_pick->core_occupation = occ; + if (rq->core->core_forceidle_count) + rq_i->core_pick->core_occupation = occ; if (i == cpu) { rq_i->core_pick = NULL; @@ -6480,14 +6657,16 @@ static void sched_core_cpu_deactivate(unsigned int cpu) core_rq->core_cookie = rq->core_cookie; core_rq->core_forceidle_count = rq->core_forceidle_count; core_rq->core_forceidle_seq = rq->core_forceidle_seq; - core_rq->core_forceidle_occupation = rq->core_forceidle_occupation; + core_rq->core_sibidle_occupation = rq->core_sibidle_occupation; + core_rq->core_sibidle_count = rq->core_sibidle_count; /* * Accounting edge for forced idle is handled in pick_next_task(). * Don't need another one here, since the hotplug thread shouldn't * have a cookie. */ - core_rq->core_forceidle_start = 0; + core_rq->core_sibidle_start = 0; + core_rq->core_sibidle_start_task = 0; /* install new leader */ for_each_cpu(t, smt_mask) { @@ -6635,8 +6814,10 @@ static void __sched notrace __schedule(unsigned int sched_mode) !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); - if (prev->sched_contributes_to_load) + if (prev->sched_contributes_to_load) { + update_nr_uninterruptible(prev, 1); rq->nr_uninterruptible++; + } /* * __schedule() ttwu() @@ -6653,6 +6834,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) if (prev->in_iowait) { atomic_inc(&rq->nr_iowait); + update_nr_iowait(prev, 1); delayacct_blkio_start(); } } @@ -9807,6 +9989,11 @@ static void calc_load_migrate(struct rq *rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 1); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif } static void dump_rq_tasks(struct rq *rq, const char *loglvl) @@ -9898,11 +10085,19 @@ int in_sched_functions(unsigned long addr) } #ifdef CONFIG_CGROUP_SCHED +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct sched_cgroup_lat_stat_cpu, root_lat_stat_cpu); +#endif + /* * Default task group. * Every task in system belongs to this group at bootup. */ -struct task_group root_task_group; +struct task_group root_task_group = { +#ifdef CONFIG_SCHED_SLI + .lat_stat_cpu = &root_lat_stat_cpu, +#endif +}; LIST_HEAD(task_groups); /* Cacheline aligned slab cache for task_group */ @@ -9980,6 +10175,9 @@ void __init sched_init(void) raw_spin_lock_init(&rq->__lock); rq->nr_running = 0; rq->calc_load_active = 0; +#ifdef CONFIG_SCHED_SLI + rq->calc_load_active_r = 0; +#endif rq->calc_load_update = jiffies + LOAD_FREQ; init_cfs_rq(&rq->cfs); init_rt_rq(&rq->rt); @@ -10042,6 +10240,12 @@ void __init sched_init(void) rcuwait_init(&rq->hotplug_wait); #endif #endif /* CONFIG_SMP */ + +#ifdef CONFIG_SCHED_ACPU + rq->acpu_idle_sum = 0; + rq->sibidle_sum = 0; + rq->last_acpu_update_time = rq->clock; +#endif hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); @@ -10051,8 +10255,10 @@ void __init sched_init(void) rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; - rq->core_forceidle_occupation = 0; - rq->core_forceidle_start = 0; + rq->core_sibidle_count = 0; + rq->core_sibidle_occupation = 0; + rq->core_sibidle_start = 0; + rq->core_sibidle_start_task = 0; rq->core_cookie = 0UL; #endif @@ -10364,6 +10570,12 @@ static void sched_free_group(struct task_group *tg) free_fair_sched_group(tg); free_rt_sched_group(tg); autogroup_free(tg); + +#ifdef CONFIG_SCHED_SLI + if (tg->lat_stat_cpu) + free_percpu(tg->lat_stat_cpu); +#endif + kmem_cache_free(task_group_cache, tg); } @@ -10398,8 +10610,17 @@ struct task_group *sched_create_group(struct task_group *parent) if (!alloc_rt_sched_group(tg, parent)) goto err; +#ifdef CONFIG_SCHED_SLI + tg->lat_stat_cpu = alloc_percpu(struct sched_cgroup_lat_stat_cpu); + if (!tg->lat_stat_cpu) + goto err; +#endif + alloc_uclamp_sched_group(tg, parent); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + tg->ht_ratio = 100; +#endif return tg; err: @@ -10523,8 +10744,20 @@ void sched_move_task(struct task_struct *tsk) if (running) put_prev_task(rq, tsk); + /* decrease old group */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, -1); + sched_change_group(tsk, group); + /* increase new group after change */ + if ((!queued && task_contributes_to_load(tsk)) || + (READ_ONCE(tsk->__state) == TASK_WAKING && + tsk->sched_contributes_to_load)) + update_nr_uninterruptible(tsk, 1); + if (queued) enqueue_task(rq, tsk, queue_flags); if (running) { @@ -10825,15 +11058,16 @@ static DEFINE_MUTEX(cfs_constraints_mutex); const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ static const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ /* More than 203 days if BW_SHIFT equals 20. */ -static const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; +const u64 max_cfs_runtime = MAX_BW * NSEC_PER_USEC; static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, - u64 burst) + u64 burst, u64 init_buffer) { int i, ret = 0, runtime_enabled, runtime_was_enabled; struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; + u64 buffer, burst_onset; if (tg == &root_task_group) return -EINVAL; @@ -10864,6 +11098,16 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, burst + quota > max_cfs_runtime)) return -EINVAL; + /* + * Bound burst to defend burst against overflow during bandwidth shift. + */ + if (burst > max_cfs_runtime || init_buffer > max_cfs_runtime) + return -EINVAL; + + if (quota == RUNTIME_INF) + buffer = RUNTIME_INF; + else + buffer = min(max_cfs_runtime, quota + burst); /* * Prevent race between setting of cfs_rq->runtime_enabled and * unthrottle_offline_cfs_rqs(). @@ -10886,12 +11130,29 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; cfs_b->burst = burst; + cfs_b->buffer = buffer; + cfs_b->init_buffer = init_buffer; - __refill_cfs_bandwidth_runtime(cfs_b); + cfs_b->max_overrun = DIV_ROUND_UP_ULL(max_cfs_runtime, quota); + cfs_b->runtime = cfs_b->quota; + + /* burst_onset needed */ + if (cfs_b->quota != RUNTIME_INF && sysctl_sched_cfs_bw_burst_onset_percent > 0) { + + burst_onset = div_u64(burst, 100) * + sysctl_sched_cfs_bw_burst_onset_percent; + + cfs_b->runtime += burst_onset; + cfs_b->runtime = min(max_cfs_runtime, cfs_b->runtime); + } + + cfs_b->runtime = max(cfs_b->runtime, init_buffer); + cfs_b->current_buffer = max(cfs_b->buffer, init_buffer); + cfs_b->runtime_snap = cfs_b->runtime; /* Restart the period timer (if active) to handle new period expiry: */ if (runtime_enabled) - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 1); raw_spin_unlock_irq(&cfs_b->lock); @@ -10919,10 +11180,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota, static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; period = ktime_to_ns(tg->cfs_bandwidth.period); burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; if (cfs_quota_us < 0) quota = RUNTIME_INF; else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC) @@ -10930,10 +11192,10 @@ static int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) else return -EINVAL; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_quota(struct task_group *tg) +long tg_get_cfs_quota(struct task_group *tg) { u64 quota_us; @@ -10948,7 +11210,7 @@ static long tg_get_cfs_quota(struct task_group *tg) static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC) return -EINVAL; @@ -10956,11 +11218,12 @@ static int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) period = (u64)cfs_period_us * NSEC_PER_USEC; quota = tg->cfs_bandwidth.quota; burst = tg->cfs_bandwidth.burst; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } -static long tg_get_cfs_period(struct task_group *tg) +long tg_get_cfs_period(struct task_group *tg) { u64 cfs_period_us; @@ -10972,28 +11235,66 @@ static long tg_get_cfs_period(struct task_group *tg) static int tg_set_cfs_burst(struct task_group *tg, long cfs_burst_us) { - u64 quota, period, burst; + u64 quota, period, burst, init_buffer; - if ((u64)cfs_burst_us > U64_MAX / NSEC_PER_USEC) + if (cfs_burst_us < 0) + burst = RUNTIME_INF; + else if ((u64)cfs_burst_us <= U64_MAX / NSEC_PER_USEC) + burst = (u64)cfs_burst_us * NSEC_PER_USEC; + else return -EINVAL; burst = (u64)cfs_burst_us * NSEC_PER_USEC; period = ktime_to_ns(tg->cfs_bandwidth.period); quota = tg->cfs_bandwidth.quota; + init_buffer = tg->cfs_bandwidth.init_buffer; - return tg_set_cfs_bandwidth(tg, period, quota, burst); + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); } static long tg_get_cfs_burst(struct task_group *tg) { u64 burst_us; + if (tg->cfs_bandwidth.burst == RUNTIME_INF) + return -1; + burst_us = tg->cfs_bandwidth.burst; do_div(burst_us, NSEC_PER_USEC); return burst_us; } +static int tg_set_cfs_init_buffer(struct task_group *tg, long cfs_init_buffer_us) +{ + u64 quota, period, burst, init_buffer; + + period = ktime_to_ns(tg->cfs_bandwidth.period); + quota = tg->cfs_bandwidth.quota; + burst = tg->cfs_bandwidth.burst; + if (cfs_init_buffer_us < 0) + init_buffer = RUNTIME_INF; + else if ((u64)cfs_init_buffer_us <= U64_MAX / NSEC_PER_USEC) + init_buffer = (u64)cfs_init_buffer_us * NSEC_PER_USEC; + else + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); +} + +static long tg_get_cfs_init_buffer(struct task_group *tg) +{ + u64 init_buffer_us; + + if (tg->cfs_bandwidth.init_buffer == RUNTIME_INF) + return -1; + + init_buffer_us = tg->cfs_bandwidth.init_buffer; + do_div(init_buffer_us, NSEC_PER_USEC); + + return init_buffer_us; +} + static s64 cpu_cfs_quota_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { @@ -11018,18 +11319,30 @@ static int cpu_cfs_period_write_u64(struct cgroup_subsys_state *css, return tg_set_cfs_period(css_tg(css), cfs_period_us); } -static u64 cpu_cfs_burst_read_u64(struct cgroup_subsys_state *css, +static s64 cpu_cfs_burst_read_s64(struct cgroup_subsys_state *css, struct cftype *cft) { return tg_get_cfs_burst(css_tg(css)); } -static int cpu_cfs_burst_write_u64(struct cgroup_subsys_state *css, - struct cftype *cftype, u64 cfs_burst_us) +static int cpu_cfs_burst_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_burst_us) { return tg_set_cfs_burst(css_tg(css), cfs_burst_us); } +static s64 cpu_cfs_init_buffer_read_s64(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + return tg_get_cfs_init_buffer(css_tg(css)); +} + +static int cpu_cfs_init_buffer_write_s64(struct cgroup_subsys_state *css, + struct cftype *cftype, s64 cfs_init_buffer_us) +{ + return tg_set_cfs_init_buffer(css_tg(css), cfs_init_buffer_us); +} + struct cfs_schedulable_data { struct task_group *tg; u64 period, quota; @@ -11140,6 +11453,7 @@ static int cpu_cfs_stat_show(struct seq_file *sf, void *v) seq_printf(sf, "wait_sum %llu\n", ws); } + seq_printf(sf, "current_bw %llu\n", cfs_b->runtime); seq_printf(sf, "nr_bursts %d\n", cfs_b->nr_burst); seq_printf(sf, "burst_time %llu\n", cfs_b->burst_time); @@ -11209,6 +11523,38 @@ static int cpu_idle_write_s64(struct cgroup_subsys_state *css, } #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +static int cpu_ht_ratio_write(struct cgroup_subsys_state *css, + struct cftype *cftype, u64 ht_ratio) +{ + struct task_group *tg = css_tg(css); + int cpu; + + if (ht_ratio < 100 || ht_ratio > 200) + return -1; + + if (tg == &root_task_group) + return -1; + + tg->ht_ratio = ht_ratio; + for_each_online_cpu(cpu) { + struct sched_entity *se = tg->se[cpu]; + + se->ht_ratio = ht_ratio; + } + + return 0; +} + +static u64 cpu_ht_ratio_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct task_group *tg = css_tg(css); + + return tg->ht_ratio; +} +#endif + static struct cftype cpu_legacy_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11235,8 +11581,13 @@ static struct cftype cpu_legacy_files[] = { }, { .name = "cfs_burst_us", - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, + }, + { + .name = "cfs_init_buffer_us", + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, }, { .name = "stat", @@ -11272,6 +11623,13 @@ static struct cftype cpu_legacy_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, #endif { } /* Terminate */ }; @@ -11283,20 +11641,24 @@ static int cpu_extra_stat_show(struct seq_file *sf, { struct task_group *tg = css_tg(css); struct cfs_bandwidth *cfs_b = &tg->cfs_bandwidth; - u64 throttled_usec, burst_usec; + u64 throttled_usec, current_bw_usec, burst_usec; throttled_usec = cfs_b->throttled_time; do_div(throttled_usec, NSEC_PER_USEC); + current_bw_usec = cfs_b->runtime; + do_div(current_bw_usec, NSEC_PER_USEC); burst_usec = cfs_b->burst_time; do_div(burst_usec, NSEC_PER_USEC); seq_printf(sf, "nr_periods %d\n" "nr_throttled %d\n" "throttled_usec %llu\n" + "current_bw_usec %llu\n" "nr_bursts %d\n" "burst_usec %llu\n", cfs_b->nr_periods, cfs_b->nr_throttled, - throttled_usec, cfs_b->nr_burst, burst_usec); + throttled_usec, current_bw_usec, cfs_b->nr_burst, + burst_usec); } #endif return 0; @@ -11428,6 +11790,7 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct task_group *tg = css_tg(of_css(of)); + u64 init_buffer = tg_get_cfs_init_buffer(tg); u64 period = tg_get_cfs_period(tg); u64 burst = tg_get_cfs_burst(tg); u64 quota; @@ -11435,11 +11798,304 @@ static ssize_t cpu_max_write(struct kernfs_open_file *of, ret = cpu_period_quota_parse(buf, &period, "a); if (!ret) - ret = tg_set_cfs_bandwidth(tg, period, quota, burst); + ret = tg_set_cfs_bandwidth(tg, period, quota, burst, init_buffer); return ret ?: nbytes; } #endif +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_TRUE(cpu_no_sched_lat); +static int cpu_sched_lat_enabled_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", !static_key_enabled(&cpu_no_sched_lat)); + return 0; +} + +static int cpu_sched_lat_enabled_open(struct inode *inode, + struct file *file) +{ + return single_open(file, cpu_sched_lat_enabled_show, NULL); +} + +static ssize_t cpu_sched_lat_enabled_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = count; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + switch (val) { + case '0': + static_branch_enable(&cpu_no_sched_lat); + break; + case '1': + static_branch_disable(&cpu_no_sched_lat); + break; + default: + ret = -EINVAL; + } + +out: + return ret; +} + +static const struct proc_ops cpu_sched_lat_enabled_fops = { + .proc_open = cpu_sched_lat_enabled_open, + .proc_read = seq_read, + .proc_write = cpu_sched_lat_enabled_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init init_cpu_sched_lat_enabled(void) +{ + struct proc_dir_entry *ca_dir, *sched_lat_enabled_file; + + ca_dir = proc_mkdir("cpusli", NULL); + if (!ca_dir) + return -ENOMEM; + + sched_lat_enabled_file = proc_create("sched_lat_enabled", 0600, + ca_dir, &cpu_sched_lat_enabled_fops); + if (!sched_lat_enabled_file) { + remove_proc_entry("cpusli", NULL); + return -ENOMEM; + } + + return 0; +} +device_initcall(init_cpu_sched_lat_enabled); + +static inline enum sched_lat_count_t get_sched_lat_count_idx(u64 msecs) +{ + if (msecs < 1) + return SCHED_LAT_0_1; + if (msecs < 10) + return SCHED_LAT_0_1 + (msecs + 2) / 3; + if (msecs < 50) + return SCHED_LAT_7_10 + msecs / 10; + if (msecs < 100) + return SCHED_LAT_50_100; + if (msecs < 1000) + return SCHED_LAT_100_500 + (msecs / 500); + if (msecs < 10000) + return SCHED_LAT_1000_5000 + (msecs / 5000); + + return SCHED_LAT_10000_INF; +} + +struct task_group *cgroup_tg(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpu_cgrp_id), + struct task_group, css); +} + +void task_cpu_update_block(struct task_struct *tsk, u64 runtime) +{ + int idx; + enum sched_lat_stat_item s; + struct task_group *tg; + unsigned int msecs; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = css_tg(task_css(tsk, cpu_cgrp_id)); + if (!tg) { + rcu_read_unlock(); + return; + } + if (tsk->in_iowait) + s = SCHED_LAT_IOBLOCK; + else + s = SCHED_LAT_BLOCK; + + msecs = runtime >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], runtime); + rcu_read_unlock(); +} + +void cpu_update_latency(struct sched_entity *se, u64 delta) +{ + int idx; + enum sched_lat_stat_item s; + unsigned int msecs; + struct task_group *tg; + + if (static_branch_likely(&cpu_no_sched_lat)) + return; + + rcu_read_lock(); + tg = se->cfs_rq->tg; + if (!tg) { + rcu_read_unlock(); + return; + } + if (entity_is_task(se)) + s = SCHED_LAT_WAIT; + else + s = SCHED_LAT_CGROUP_WAIT; + + msecs = delta >> 20; /* Proximately to speed up */ + idx = get_sched_lat_count_idx(msecs); + this_cpu_inc(tg->lat_stat_cpu->item[s][idx]); + this_cpu_inc(tg->lat_stat_cpu->item[s][SCHED_LAT_NR]); + this_cpu_add(tg->lat_stat_cpu->item[s][SCHED_LAT_TOTAL], delta); + rcu_read_unlock(); +} + +#define SCHED_LAT_STAT_SMP_WRITE(name, sidx) \ +static void smp_write_##name(void *info) \ +{ \ + struct task_group *tg = (struct task_group *)info; \ + int i; \ + \ + for (i = SCHED_LAT_0_1; i < SCHED_LAT_NR_COUNT; i++) \ + this_cpu_write(tg->lat_stat_cpu->item[sidx][i], 0); \ +} \ + +SCHED_LAT_STAT_SMP_WRITE(sched_wait_latency, SCHED_LAT_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_wait_cgroup_latency, SCHED_LAT_CGROUP_WAIT); +SCHED_LAT_STAT_SMP_WRITE(sched_block_latency, SCHED_LAT_BLOCK); +SCHED_LAT_STAT_SMP_WRITE(sched_ioblock_latency, SCHED_LAT_IOBLOCK); + +smp_call_func_t smp_sched_lat_write_funcs[] = { + smp_write_sched_wait_latency, + smp_write_sched_block_latency, + smp_write_sched_ioblock_latency, + smp_write_sched_wait_cgroup_latency +}; + +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + struct cgroup *cgrp = css->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + enum sched_lat_stat_item idx = cft->private; + smp_call_func_t func = smp_sched_lat_write_funcs[idx]; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + if (val != 0) + return -EINVAL; + + func((void *)tg); + smp_call_function(func, (void *)tg, 1); + + return 0; +} + +static u64 sched_lat_stat_gather(struct task_group *tg, + enum sched_lat_stat_item sidx, + enum sched_lat_count_t cidx) +{ + u64 sum = 0; + int cpu; + + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(tg->lat_stat_cpu, cpu)->item[sidx][cidx]; + + return sum; +} + +int sched_lat_stat_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = cgroup_tg(seq_css(sf)->cgroup); + enum sched_lat_stat_item s = seq_cft(sf)->private; + + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + return -EOPNOTSUPP; + } + + /* CFS scheduling latency cgroup and task histgrams */ + seq_printf(sf, "0-1ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_0_1)); + seq_printf(sf, "1-4ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1_4)); + seq_printf(sf, "4-7ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_4_7)); + seq_printf(sf, "7-10ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_7_10)); + seq_printf(sf, "10-20ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10_20)); + seq_printf(sf, "20-30ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_20_30)); + seq_printf(sf, "30-40ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_30_40)); + seq_printf(sf, "40-50ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_40_50)); + seq_printf(sf, "50-100ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_50_100)); + seq_printf(sf, "100-500ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_100_500)); + seq_printf(sf, "500-1000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_500_1000)); + seq_printf(sf, "1000-5000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_1000_5000)); + seq_printf(sf, "5000-10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_5000_10000)); + seq_printf(sf, ">=10000ms: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_10000_INF)); + seq_printf(sf, "total(ms): \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_TOTAL) / 1000000); + seq_printf(sf, "nr: \t%llu\n", + sched_lat_stat_gather(tg, s, SCHED_LAT_NR)); + + return 0; +} + +static int cpu_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct task_group *tg = css_tg(seq_css(sf)); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype cpu_files[] = { #ifdef CONFIG_FAIR_GROUP_SCHED { @@ -11471,8 +12127,14 @@ static struct cftype cpu_files[] = { { .name = "max.burst", .flags = CFTYPE_NOT_ON_ROOT, - .read_u64 = cpu_cfs_burst_read_u64, - .write_u64 = cpu_cfs_burst_write_u64, + .read_s64 = cpu_cfs_burst_read_s64, + .write_s64 = cpu_cfs_burst_write_s64, + }, + { + .name = "max.init_buffer", + .flags = CFTYPE_NOT_ON_ROOT, + .read_s64 = cpu_cfs_init_buffer_read_s64, + .write_s64 = cpu_cfs_init_buffer_write_s64, }, #endif #ifdef CONFIG_UCLAMP_TASK_GROUP @@ -11488,6 +12150,43 @@ static struct cftype cpu_files[] = { .seq_show = cpu_uclamp_max_show, .write = cpu_uclamp_max_write, }, +#endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + { + .name = "ht_ratio", + .read_u64 = cpu_ht_ratio_read, + .write_u64 = cpu_ht_ratio_write, + }, +#endif +#ifdef CONFIG_SCHED_SLI + { + .name = "sched_cfs_statistics", + .seq_show = cpu_sched_cfs_show, + }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, #endif { } /* terminate */ }; diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c index a57fd8f27498f60eefc9b617a57712e1eaae9c4c..924859051b5f14f3c8645c48e49167edac7c1f13 100644 --- a/kernel/sched/core_sched.c +++ b/kernel/sched/core_sched.c @@ -237,38 +237,46 @@ int sched_core_share_pid(unsigned int cmd, pid_t pid, enum pid_type type, #ifdef CONFIG_SCHEDSTATS /* REQUIRES: rq->core's clock recently updated. */ -void __sched_core_account_forceidle(struct rq *rq) +void __sched_core_account_sibidle(struct rq *rq) { const struct cpumask *smt_mask = cpu_smt_mask(cpu_of(rq)); u64 delta, now = rq_clock(rq->core); + u64 delta_task, now_task = rq_clock_task(rq->core); struct rq *rq_i; struct task_struct *p; int i; lockdep_assert_rq_held(rq); - WARN_ON_ONCE(!rq->core->core_forceidle_count); + WARN_ON_ONCE(!rq->core->core_sibidle_count); - if (rq->core->core_forceidle_start == 0) - return; + /* can't be forced idle without a running task */ + WARN_ON_ONCE(!rq->core->core_sibidle_occupation && + rq->core->core_forceidle_count); + + if (rq->core->core_sibidle_start == 0 || + rq->core->core_sibidle_occupation == 0) + goto out; - delta = now - rq->core->core_forceidle_start; + delta = now - rq->core->core_sibidle_start; + delta_task = now_task - rq->core->core_sibidle_start_task; if (unlikely((s64)delta <= 0)) - return; + goto out; - rq->core->core_forceidle_start = now; + rq->core->core_sibidle_start = now; + rq->core->core_sibidle_start_task = now_task; - if (WARN_ON_ONCE(!rq->core->core_forceidle_occupation)) { - /* can't be forced idle without a running task */ - } else if (rq->core->core_forceidle_count > 1 || - rq->core->core_forceidle_occupation > 1) { + if (rq->core->core_sibidle_count > 1 || + rq->core->core_sibidle_occupation > 1) { /* * For larger SMT configurations, we need to scale the charged * forced idle amount since there can be more than one forced * idle sibling and more than one running cookied task. */ - delta *= rq->core->core_forceidle_count; - delta = div_u64(delta, rq->core->core_forceidle_occupation); + delta *= rq->core->core_sibidle_count; + delta = div_u64(delta, rq->core->core_sibidle_occupation); + delta_task *= rq->core->core_sibidle_count; + delta_task = div_u64(delta_task, rq->core->core_sibidle_occupation); } for_each_cpu(i, smt_mask) { @@ -279,22 +287,32 @@ void __sched_core_account_forceidle(struct rq *rq) continue; /* - * Note: this will account forceidle to the current cpu, even + * Note: this will account sibidle to the current cpu, even * if it comes from our SMT sibling. */ - __account_forceidle_time(p, delta); + __account_sibidle_time(p, delta, delta_task, + !!rq->core->core_forceidle_count); + account_ht_aware_quota(p, delta_task); + } + +out:; +#ifdef CONFIG_SCHED_ACPU + for_each_cpu(i, smt_mask) { + rq_i = cpu_rq(i); + rq->last_acpu_update_time = now; } +#endif } void __sched_core_tick(struct rq *rq) { - if (!rq->core->core_forceidle_count) + if (!rq->core->core_sibidle_count) return; if (rq != rq->core) update_rq_clock(rq->core); - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } #endif /* CONFIG_SCHEDSTATS */ diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c index 0de9dda099496fddb804f6ebb5a7fa0f275a72e5..6d87a617d00eae4d6abdb09a3a62f8878a586220 100644 --- a/kernel/sched/cpuacct.c +++ b/kernel/sched/cpuacct.c @@ -20,14 +20,44 @@ static const char * const cpuacct_stat_desc[] = { [CPUACCT_STAT_SYSTEM] = "system", }; +struct cpuacct_prev_cputime { + struct prev_cputime prev_cputime1; /* utime and stime */ + struct prev_cputime prev_cputime2; /* user and nice */ +} ____cacheline_aligned; + +#ifdef CONFIG_SCHED_SLI +/* Maintain various statistics */ +struct cpuacct_alistats { + u64 nr_migrations; +} ____cacheline_aligned; +#endif + + /* track CPU usage of a group of tasks and its child groups */ struct cpuacct { struct cgroup_subsys_state css; /* cpuusage holds pointer to a u64-type object on every CPU */ u64 __percpu *cpuusage; + struct cpuacct_prev_cputime __percpu *prev_cputime; struct kernel_cpustat __percpu *cpustat; +#ifdef CONFIG_SCHED_SLI + struct cpuacct_alistats __percpu *alistats; + struct list_head sli_list; + bool sli_enabled; + u64 next_load_update; +#endif + unsigned long avenrun[3]; +#ifdef CONFIG_SCHED_SLI + unsigned long avenrun_r[3]; +#endif }; +static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) +{ + return container_of(global_cgroup_css(cgrp, cpuacct_cgrp_id), + struct cpuacct, css); +} + static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css) { return css ? container_of(css, struct cpuacct, css) : NULL; @@ -45,16 +75,92 @@ static inline struct cpuacct *parent_ca(struct cpuacct *ca) } static DEFINE_PER_CPU(u64, root_cpuacct_cpuusage); +static DEFINE_PER_CPU(struct cpuacct_prev_cputime, root_cpuacct_prev_cputime); +#ifdef CONFIG_SCHED_SLI +static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats); +#endif + static struct cpuacct root_cpuacct = { .cpustat = &kernel_cpustat, + .prev_cputime = &root_cpuacct_prev_cputime, .cpuusage = &root_cpuacct_cpuusage, +#ifdef CONFIG_SCHED_SLI + .alistats = &root_alistats, +#endif }; +#ifdef CONFIG_SCHED_SLI + +void task_ca_increase_nr_migrations(struct task_struct *tsk) +{ + struct cpuacct *ca; + + rcu_read_lock(); + ca = task_ca(tsk); + if (ca) + this_cpu_ptr(ca->alistats)->nr_migrations++; + rcu_read_unlock(); +} + +#endif + +#ifdef CONFIG_SCHED_SLI +static DEFINE_SPINLOCK(sli_ca_lock); +LIST_HEAD(sli_ca_list); + +static void ca_enable_sli(struct cpuacct *ca, bool val) +{ + spin_lock(&sli_ca_lock); + if (val && !READ_ONCE(ca->sli_enabled)) + list_add_tail_rcu(&ca->sli_list, &sli_ca_list); + else if (!val && READ_ONCE(ca->sli_enabled)) + list_del_rcu(&ca->sli_list); + WRITE_ONCE(ca->sli_enabled, val); + spin_unlock(&sli_ca_lock); +} + +void create_rich_container_reaper(struct task_struct *tsk) +{ + struct cpuacct *ca; + struct cpuacct *parent_ca; + struct cgroup_subsys_state *css; + + if (thread_group_leader(tsk)) { + rcu_read_lock(); + css = task_css(tsk, cpuacct_cgrp_id); + ca = css_ca(css); + if (!ca || !in_rich_container(tsk)) { + rcu_read_unlock(); + return; + } + + ca_enable_sli(ca, true); + parent_ca = css_ca(css->parent); + if (parent_ca && parent_ca != &root_cpuacct) + ca_enable_sli(parent_ca, true); + rcu_read_unlock(); + } +} + +static int enable_sli_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val) +{ + ca_enable_sli(css_ca(css), !!val); + return 0; +} + +static u64 enable_sli_read(struct cgroup_subsys_state *css, struct cftype *cft) +{ + return READ_ONCE(css_ca(css)->sli_enabled); +} +#endif + /* Create a new CPU accounting group */ static struct cgroup_subsys_state * cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) { struct cpuacct *ca; + int i; if (!parent_css) return &root_cpuacct.css; @@ -71,8 +177,37 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) if (!ca->cpustat) goto out_free_cpuusage; + ca->prev_cputime = alloc_percpu(struct cpuacct_prev_cputime); + if (!ca->prev_cputime) + goto out_free_cpustat; + +#ifdef CONFIG_SCHED_SLI + INIT_LIST_HEAD(&ca->sli_list); + + ca->alistats = alloc_percpu(struct cpuacct_alistats); + if (!ca->alistats) + goto out_free_pre_cputime; +#endif + + for_each_possible_cpu(i) { + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime1); + prev_cputime_init( + &per_cpu_ptr(ca->prev_cputime, i)->prev_cputime2); + } + + ca->avenrun[0] = ca->avenrun[1] = ca->avenrun[2] = 0; +#ifdef CONFIG_SCHED_SLI + ca->avenrun_r[0] = ca->avenrun_r[1] = ca->avenrun_r[2] = 0; +#endif return &ca->css; +#ifdef CONFIG_SCHED_SLI +out_free_pre_cputime: + free_percpu(ca->prev_cputime); +#endif +out_free_cpustat: + free_percpu(ca->cpustat); out_free_cpuusage: free_percpu(ca->cpuusage); out_free_ca: @@ -81,13 +216,24 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css) return ERR_PTR(-ENOMEM); } +#ifdef CONFIG_SCHED_SLI +static void cpuacct_css_offline(struct cgroup_subsys_state *css) +{ + ca_enable_sli(css_ca(css), false); +} +#endif + /* Destroy an existing CPU accounting group */ static void cpuacct_css_free(struct cgroup_subsys_state *css) { struct cpuacct *ca = css_ca(css); + free_percpu(ca->prev_cputime); free_percpu(ca->cpustat); free_percpu(ca->cpuusage); +#ifdef CONFIG_SCHED_SLI + free_percpu(ca->alistats); +#endif kfree(ca); } @@ -289,6 +435,495 @@ static int cpuacct_stats_show(struct seq_file *sf, void *v) return 0; } +#ifdef CONFIG_SCHED_SLI +#ifndef arch_idle_time +#define arch_idle_time(cpu) 0 +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu); + +static void __get_cgroup_avenrun(struct cpuacct *ca, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + unsigned long *avenrun; + + if (running) + avenrun = ca->avenrun_r; + else + avenrun = ca->avenrun; + + loads[0] = (avenrun[0] + offset) << shift; + loads[1] = (avenrun[1] + offset) << shift; + loads[2] = (avenrun[2] + offset) << shift; +} + +static inline unsigned long nr_uninterruptible(void) +{ + unsigned long i, sum = 0; + + for_each_possible_cpu(i) + sum += cpu_rq(i)->nr_uninterruptible; + + /* + * Since we read the counters lockless, it might be slightly + * inaccurate. Do not allow it to go below zero though: + */ + if (unlikely((long)sum < 0)) + sum = 0; + + return sum; +} + +#ifdef CONFIG_CFS_BANDWIDTH +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return tg->cfs_rq[cpu]->throttle_count; +} +#else +static inline bool tg_cfs_throttled(struct task_group *tg, int cpu) +{ + return false; +} +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +static inline bool tg_rt_throttled(struct task_group *tg, int cpu) +{ + return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted; +} +#endif + +static unsigned long ca_running(struct cpuacct *ca, int cpu) +{ + unsigned long nr_running = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return 0; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out; + + if (!tg_cfs_throttled(tg, cpu)) + nr_running += tg->cfs_rq[cpu]->h_nr_running; +#ifdef CONFIG_RT_GROUP_SCHED + if (!tg_rt_throttled(tg, cpu)) + nr_running += tg->rt_rq[cpu]->rt_nr_running; +#endif + /* SCHED_DEADLINE doesn't support cgroup yet */ + +out: + rcu_read_unlock(); + return nr_running; +} + +static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu) +{ + unsigned long nr = 0; + struct cgroup *cgrp = ca->css.cgroup; + struct task_group *tg; + + /* Make sure it is only called for non-root cpuacct */ + if (ca == &root_cpuacct) + return nr; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) + goto out_rcu_unlock; + + nr = tg->cfs_rq[cpu]->nr_uninterruptible; +#ifdef CONFIG_RT_GROUP_SCHED + nr += tg->rt_rq[cpu]->nr_uninterruptible; +#endif + +out_rcu_unlock: + rcu_read_unlock(); + return nr; +} + +void cgroup_idle_start(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + __schedstat_set(se->cg_idle_start, clock); + write_sequnlock(&se->idle_seqlock); + + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) + __schedstat_set(se->cg_iowait_start, clock); + spin_unlock(&se->iowait_lock); + + local_irq_restore(flags); +} + +void cgroup_idle_end(struct sched_entity *se) +{ + unsigned long flags; + u64 clock; + u64 idle_start, iowait_start; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(se->cfs_rq->rq); + + local_irq_save(flags); + + write_seqlock(&se->idle_seqlock); + idle_start = schedstat_val(se->cg_idle_start); + __schedstat_add(se->cg_idle_sum, clock - idle_start); + __schedstat_set(se->cg_idle_start, 0); + write_sequnlock(&se->idle_seqlock); + + spin_lock(&se->iowait_lock); + if (schedstat_val(se->cg_nr_iowait)) { + iowait_start = schedstat_val(se->cg_iowait_start); + __schedstat_add(se->cg_iowait_sum, clock - iowait_start); + __schedstat_set(se->cg_iowait_start, 0); + } + spin_unlock(&se->iowait_lock); + + local_irq_restore(flags); +} + +void cpuacct_cpuset_changed(struct cgroup *cgrp, struct cpumask *deleted, + struct cpumask *added) +{ + struct task_group *tg; + struct sched_entity *se; + int cpu; + + if (!schedstat_enabled()) + return; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + + if (!tg) { + rcu_read_unlock(); + return; + } + + if (added) { + /* Mark newly added cpus as newly-idle */ + for_each_cpu(cpu, added) { + se = tg->se[cpu]; + cgroup_idle_start(se); + __schedstat_add(se->cg_ineffective_sum, + __rq_clock_broken(cpu_rq(cpu)) - + se->cg_ineffective_start); + __schedstat_set(se->cg_ineffective_start, 0); + } + } + + if (deleted) { + /* Mark ineffective_cpus as idle-invalid */ + for_each_cpu(cpu, deleted) { + se = tg->se[cpu]; + cgroup_idle_end(se); + /* Use __rq_clock_broken to avoid warning */ + __schedstat_set(se->cg_ineffective_start, + __rq_clock_broken(cpu_rq(cpu))); + } + } + + rcu_read_unlock(); +} + +static void cpuacct_calc_load(struct cpuacct *acct) +{ + long active = 0, active_r = 0, nr_r; + int cpu; + + if (acct != &root_cpuacct) { + for_each_possible_cpu(cpu) { + nr_r = ca_running(acct, cpu); + active += nr_r; + active_r += nr_r; + active += ca_uninterruptible(acct, cpu); + } + active = active > 0 ? active * FIXED_1 : 0; + acct->avenrun[0] = calc_load(acct->avenrun[0], EXP_1, active); + acct->avenrun[1] = calc_load(acct->avenrun[1], EXP_5, active); + acct->avenrun[2] = calc_load(acct->avenrun[2], EXP_15, active); + + active_r = active_r > 0 ? active_r * FIXED_1 : 0; + acct->avenrun_r[0] = calc_load(acct->avenrun_r[0], + EXP_1, active_r); + acct->avenrun_r[1] = calc_load(acct->avenrun_r[1], + EXP_5, active_r); + acct->avenrun_r[2] = calc_load(acct->avenrun_r[2], + EXP_15, active_r); + } else { + acct->avenrun[0] = avenrun[0]; + acct->avenrun[1] = avenrun[1]; + acct->avenrun[2] = avenrun[2]; + + acct->avenrun_r[0] = avenrun_r[0]; + acct->avenrun_r[1] = avenrun_r[1]; + acct->avenrun_r[2] = avenrun_r[2]; + } +} + +/* + * We walk cpuacct whose SLI is enabled to perform per-cgroup load calculation + * the overhead is acceptable if SLI is not enabled for most of the cgroups. + */ +void calc_cgroup_load(void) +{ + struct cpuacct *ca; + + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) + cpuacct_calc_load(ca); + rcu_read_unlock(); +} + +static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu, + struct task_group *tg, struct cpuacct_usage_result *res) +{ + struct kernel_cpustat *kcpustat; + u64 *cpuusage; + struct cpuacct_prev_cputime *prev_cputime; + struct task_cputime cputime; + u64 tick_user, tick_nice, tick_sys, left, right; + struct sched_entity *se; + + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + if (unlikely(!tg)) { + memset(res, 0, sizeof(*res)); + return; + } + + cpuusage = per_cpu_ptr(ca->cpuusage, cpu); + + se = tg->se[cpu]; + prev_cputime = per_cpu_ptr(ca->prev_cputime, cpu); + tick_user = kcpustat->cpustat[CPUTIME_USER]; + tick_nice = kcpustat->cpustat[CPUTIME_NICE]; + tick_sys = kcpustat->cpustat[CPUTIME_SYSTEM]; + + /* Calculate system run time */ + cputime.sum_exec_runtime = *cpuusage; + cputime.utime = tick_user + tick_nice; + cputime.stime = tick_sys; + cputime_adjust(&cputime, &prev_cputime->prev_cputime1, &left, &right); + res->system = right; + + /* Calculate user and nice run time */ + cputime.sum_exec_runtime = left; /* user + nice */ + cputime.utime = tick_user; + cputime.stime = tick_nice; + cputime_adjust(&cputime, &prev_cputime->prev_cputime2, &left, &right); + res->user = left; + res->nice = right; + + res->irq = kcpustat->cpustat[CPUTIME_IRQ]; + res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ]; + + if (se && schedstat_enabled()) { + unsigned int seq; + unsigned long flags; + u64 idle_start, ineff, ineff_start, elapse, complement; + u64 clock, iowait_start; + + do { + seq = read_seqbegin(&se->idle_seqlock); + res->idle = schedstat_val(se->cg_idle_sum); + idle_start = schedstat_val(se->cg_idle_start); + clock = cpu_clock(cpu); + if (idle_start && clock > idle_start) + res->idle += clock - idle_start; + } while (read_seqretry(&se->idle_seqlock, seq)); + + ineff = schedstat_val(se->cg_ineffective_sum); + ineff_start = schedstat_val(se->cg_ineffective_start); + if (ineff_start) + __schedstat_add(ineff, clock - ineff_start); + + spin_lock_irqsave(&se->iowait_lock, flags); + res->iowait = schedstat_val(se->cg_iowait_sum); + iowait_start = schedstat_val(se->cg_iowait_start); + if (iowait_start) + __schedstat_add(res->iowait, clock - iowait_start); + spin_unlock_irqrestore(&se->iowait_lock, flags); + + res->steal = 0; + + elapse = clock - schedstat_val(se->cg_init_time); + complement = res->idle + se->sum_exec_raw + ineff; + if (elapse > complement) + res->steal = elapse - complement; + + res->idle -= res->iowait; + } else { + res->idle = res->iowait = res->steal = 0; + } + + res->guest = kcpustat->cpustat[CPUTIME_GUEST]; + res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE]; +} + +static int cpuacct_proc_stats_show(struct seq_file *sf, void *v) +{ + struct cpuacct *ca = css_ca(seq_css(sf)); + struct cgroup *cgrp = seq_css(sf)->cgroup; + u64 user, nice, system, idle, iowait, irq, softirq, steal, guest; + u64 nr_migrations = 0; + struct cpuacct_alistats *alistats; + unsigned long load, avnrun[3], avnrun_r[3]; + unsigned long nr_run = 0, nr_uninter = 0; + int cpu; + + user = nice = system = idle = iowait = + irq = softirq = steal = guest = 0; + + if (ca != &root_cpuacct) { + struct cpuacct_usage_result res; + + for_each_possible_cpu(cpu) { + if (!housekeeping_cpu(cpu, HK_TYPE_DOMAIN)) + continue; + + rcu_read_lock(); + __cpuacct_get_usage_result(ca, cpu, + cgroup_tg(cgrp), &res); + rcu_read_unlock(); + + user += res.user; + nice += res.nice; + system += res.system; + irq += res.irq; + softirq += res.softirq; + steal += res.steal; + guest += res.guest; + guest += res.guest_nice; + iowait += res.iowait; + idle += res.idle; + + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; + nr_run += ca_running(ca, cpu); + nr_uninter += ca_uninterruptible(ca, cpu); + } + + __get_cgroup_avenrun(ca, avnrun, FIXED_1/200, 0, false); + __get_cgroup_avenrun(ca, avnrun_r, FIXED_1/200, 0, true); + } else { + struct kernel_cpustat *kcpustat; + + for_each_possible_cpu(cpu) { + kcpustat = per_cpu_ptr(ca->cpustat, cpu); + user += kcpustat->cpustat[CPUTIME_USER]; + nice += kcpustat->cpustat[CPUTIME_NICE]; + system += kcpustat->cpustat[CPUTIME_SYSTEM]; + irq += kcpustat->cpustat[CPUTIME_IRQ]; + softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ]; + guest += kcpustat->cpustat[CPUTIME_GUEST]; + guest += kcpustat->cpustat[CPUTIME_GUEST_NICE]; + idle += get_idle_time(kcpustat, cpu); + iowait += get_iowait_time(kcpustat, cpu); + steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL]; + alistats = per_cpu_ptr(ca->alistats, cpu); + nr_migrations += alistats->nr_migrations; + } + + nr_run = nr_running(); + nr_uninter = nr_uninterruptible(); + + get_avenrun(avnrun, FIXED_1/200, 0); + get_avenrun_r(avnrun_r, FIXED_1/200, 0); + } + + seq_printf(sf, "user %lld\n", nsec_to_clock_t(user)); + seq_printf(sf, "nice %lld\n", nsec_to_clock_t(nice)); + seq_printf(sf, "system %lld\n", nsec_to_clock_t(system)); + seq_printf(sf, "idle %lld\n", nsec_to_clock_t(idle)); + seq_printf(sf, "iowait %lld\n", nsec_to_clock_t(iowait)); + seq_printf(sf, "irq %lld\n", nsec_to_clock_t(irq)); + seq_printf(sf, "softirq %lld\n", nsec_to_clock_t(softirq)); + seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal)); + seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest)); + + load = LOAD_INT(avnrun[0]) * 100 + LOAD_FRAC(avnrun[0]); + seq_printf(sf, "load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[1]) * 100 + LOAD_FRAC(avnrun[1]); + seq_printf(sf, "load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun[2]) * 100 + LOAD_FRAC(avnrun[2]); + seq_printf(sf, "load average(15min) %lld\n", (u64)load); + + seq_printf(sf, "nr_running %lld\n", (u64)nr_run); + if ((long) nr_uninter < 0) + nr_uninter = 0; + seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter); + seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations); + + load = LOAD_INT(avnrun_r[0]) * 100 + LOAD_FRAC(avnrun_r[0]); + seq_printf(sf, "running load average(1min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[1]) * 100 + LOAD_FRAC(avnrun_r[1]); + seq_printf(sf, "running load average(5min) %lld\n", (u64)load); + load = LOAD_INT(avnrun_r[2]) * 100 + LOAD_FRAC(avnrun_r[2]); + seq_printf(sf, "running load average(15min) %lld\n", (u64)load); + + return 0; +} + +static int cpuacct_sched_cfs_show(struct seq_file *sf, void *v) +{ + struct cgroup *cgrp = seq_css(sf)->cgroup; + struct task_group *tg = cgroup_tg(cgrp); + struct sched_entity *se; + struct sched_statistics *stats; + int cpu; + u64 wait_max = 0, wait_sum = 0, wait_sum_other = 0, exec_sum = 0; + + if (!schedstat_enabled()) + goto out_show; + + rcu_read_lock(); + tg = cgroup_tg(cgrp); + if (unlikely(!tg)) { + WARN_ONCE(1, "cgroup \"cpu,cpuacct\" are not bound together"); + goto rcu_unlock_show; + } + + for_each_online_cpu(cpu) { + se = tg->se[cpu]; + if (!se) + continue; + stats = __schedstats_from_se(se); + exec_sum += schedstat_val(se->sum_exec_runtime); + wait_sum_other += + schedstat_val(stats->parent_wait_contrib); + wait_sum += schedstat_val(stats->wait_sum); + wait_max = max(wait_max, schedstat_val(stats->wait_max)); + } +rcu_unlock_show: + rcu_read_unlock(); +out_show: + /* [Serve time] [On CPU time] [Queue other time] [Queue sibling time] [Queue max time] */ + seq_printf(sf, "%lld %lld %lld %lld %lld\n", + exec_sum + wait_sum, exec_sum, wait_sum_other, + wait_sum - wait_sum_other, wait_max); + + return 0; +} +#endif + static struct cftype files[] = { { .name = "usage", @@ -323,6 +958,45 @@ static struct cftype files[] = { .name = "stat", .seq_show = cpuacct_stats_show, }, +#ifdef CONFIG_SCHED_SLI + { + .name = "proc_stat", + .seq_show = cpuacct_proc_stats_show, + }, + { + .name = "enable_sli", + .read_u64 = enable_sli_read, + .write_u64 = enable_sli_write + }, + { + .name = "sched_cfs_statistics", + .seq_show = cpuacct_sched_cfs_show, + }, + { + .name = "wait_latency", + .private = SCHED_LAT_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "cgroup_wait_latency", + .private = SCHED_LAT_CGROUP_WAIT, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "block_latency", + .private = SCHED_LAT_BLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, + { + .name = "ioblock_latency", + .private = SCHED_LAT_IOBLOCK, + .write_u64 = sched_lat_stat_write, + .seq_show = sched_lat_stat_show + }, +#endif { } /* terminate */ }; @@ -355,9 +1029,364 @@ void cpuacct_account_field(struct task_struct *tsk, int index, u64 val) __this_cpu_add(ca->cpustat->cpustat[index], val); } +static void cpuacct_cgroup_attach(struct cgroup_taskset *tset) +{ + struct task_struct *task; + struct cgroup_subsys_state *css; + + cgroup_taskset_for_each(task, css, tset) + if (task->pid && is_child_reaper(task_pid(task))) + create_rich_container_reaper(task); +} + struct cgroup_subsys cpuacct_cgrp_subsys = { .css_alloc = cpuacct_css_alloc, .css_free = cpuacct_css_free, +#ifdef CONFIG_SCHED_SLI + .css_offline = cpuacct_css_offline, +#endif + .attach = cpuacct_cgroup_attach, .legacy_cftypes = files, .early_init = true, }; + +#ifdef CONFIG_SCHED_SLI +static DEFINE_STATIC_KEY_FALSE(async_load_calc); + +bool async_load_calc_enabled(void) +{ + return static_branch_likely(&async_load_calc); +} + +static int async_load_calc_show(struct seq_file *m, void *v) +{ + seq_printf(m, "%d\n", async_load_calc_enabled()); + return 0; +} + +static int async_load_calc_open(struct inode *inode, struct file *file) +{ + return single_open(file, async_load_calc_show, NULL); +} + +static void async_calc_cgroup_load(void) +{ + int cnt; + struct cpuacct *ca; + +again: + cnt = 1; + rcu_read_lock(); + list_for_each_entry_rcu(ca, &sli_ca_list, sli_list) { + unsigned long next_update = ca->next_load_update; + + /* + * Need per ca check since after break the list + * could have been changed, otherwise the loop + * will be endless. + */ + if (time_before(jiffies, next_update + 10)) + continue; + + cpuacct_calc_load(ca); + ca->next_load_update = jiffies + LOAD_FREQ; + + /* Take a break for every 100 ca */ + if (cnt++ >= 100) { + rcu_read_unlock(); + cond_resched(); + goto again; + } + } + rcu_read_unlock(); +} + +int load_calc_func(void *unsed) +{ + unsigned long next_update = jiffies + LOAD_FREQ; + + while (!kthread_should_stop()) { + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/5); + set_current_state(TASK_RUNNING); + + if (time_before(jiffies, next_update + 10)) + continue; + + async_calc_cgroup_load(); + next_update += LOAD_FREQ; + } + + return 0; +} + +static struct task_struct *load_calc_p; + +static int mod_async_load_calc(bool enable) +{ + if (enable == async_load_calc_enabled()) + return 0; + + if (enable) { + load_calc_p = kthread_create(load_calc_func, NULL, "load_calc"); + if (!load_calc_p) + return -ENOMEM; + + wake_up_process(load_calc_p); + static_branch_enable(&async_load_calc); + } else { + kthread_stop(load_calc_p); + load_calc_p = NULL; + + static_branch_disable(&async_load_calc); + } + + return 0; +} + +static DEFINE_MUTEX(load_calc_mutex); + +static ssize_t async_load_calc_write(struct file *file, + const char __user *ubuf, + size_t count, loff_t *ppos) +{ + char val = -1; + int ret = 0; + + if (count < 1 || *ppos) { + ret = -EINVAL; + goto out; + } + + if (copy_from_user(&val, ubuf, 1)) { + ret = -EFAULT; + goto out; + } + + mutex_lock(&load_calc_mutex); + + switch (val) { + case '0': + ret = mod_async_load_calc(false); + break; + case '1': + ret = mod_async_load_calc(true); + break; + default: + ret = -EINVAL; + } + + mutex_unlock(&load_calc_mutex); +out: + return ret ? ret : count; +} + +static const struct proc_ops async_load_calc_opt = { + .proc_open = async_load_calc_open, + .proc_read = seq_read, + .proc_write = async_load_calc_write, + .proc_lseek = seq_lseek, + .proc_release = single_release, +}; + +static int __init async_load_calc_init(void) +{ + if (!proc_create("async_load_calc", 0600, NULL, + &async_load_calc_opt)) { + pr_err("Failed to register async_load_calc interface\n"); + return 0; + } + + if (mod_async_load_calc(true)) + pr_err("Failed to enable async_load_calc\n"); + + return 0; +} +late_initcall_sync(async_load_calc_init); +#endif + +#ifdef CONFIG_RICH_CONTAINER + +/* 0 - cpu quota; 1 - cpuset.cpus; 2 - cpu.shares */ +int sysctl_rich_container_cpuinfo_source; +/* when cpu.shares */ +unsigned int sysctl_rich_container_cpuinfo_sharesbase = 1024; + +static inline struct task_group *css_tg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct task_group, css) : NULL; +} + +static inline struct task_group *task_tg(struct task_struct *tsk) +{ + return css_tg(task_css(tsk, cpu_cgrp_id)); +} + +void rich_container_get_cpus(struct task_struct *tsk, struct cpumask *pmask) +{ + struct task_group *tg; + int i, cpus; + + /* cfs quota source */ + if (sysctl_rich_container_cpuinfo_source == 0) { + long quota, period; + + rcu_read_lock(); + tg = task_tg(tsk); + quota = tg_get_cfs_quota(tg); + period = tg_get_cfs_period(tg); + rcu_read_unlock(); + + if (quota == -1) { + /* Fallback to use cpuset.cpus if quota not set */ + goto cpuset_source; + } else { + /* period can't be 0 */ + cpus = (quota + period - 1) / period; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + } + + return; + } + + /* cpu.shares source */ + if (sysctl_rich_container_cpuinfo_source == 2) { + unsigned long shares; + + rcu_read_lock(); + tg = task_tg(tsk); + shares = scale_load_down(tg->shares); + rcu_read_unlock(); + + /* sysctl_rich_container_cpuinfo_sharesbase can't be 0 */ + cpus = (shares + sysctl_rich_container_cpuinfo_sharesbase - 1) / + sysctl_rich_container_cpuinfo_sharesbase; + cpus = clamp(cpus, 1, (int)num_online_cpus()); + cpumask_clear(pmask); + for (i = 0; i < cpus; i++) + cpumask_set_cpu(i, pmask); + + return; + } + +cpuset_source: + /* cpuset.cpus source */ + cpuset_cpus_allowed(tsk, pmask); +} + +bool child_cpuacct(struct task_struct *tsk) +{ + struct cpuacct *ca = task_ca(tsk); + + if (ca && ca != &root_cpuacct) + return true; + + return false; +} + + +bool check_rich_container(unsigned int cpu, unsigned int *index, + bool *rich_container, unsigned int *total) +{ + struct cpumask cpuset_allowed; + struct task_struct __maybe_unused *scenario; + bool in_rich; + int i, id = 0; + + rcu_read_lock(); + in_rich = in_rich_container(current); + rcu_read_unlock(); + if (!in_rich) + return false; + + *rich_container = true; + + read_lock(&tasklist_lock); + scenario = rich_container_get_scenario(); + get_task_struct(scenario); + read_unlock(&tasklist_lock); + rich_container_get_cpus(scenario, &cpuset_allowed); + put_task_struct(scenario); + + *total = cpumask_weight(&cpuset_allowed); + if (cpumask_test_cpu(cpu, &cpuset_allowed)) { + for_each_cpu(i, &cpuset_allowed) { + if (i == cpu) + break; + id++; + } + *index = id; + return false; + } + + /* Hide this cpu in the container */ + return true; +} + +void rich_container_source(enum rich_container_source *from) +{ + if (sysctl_rich_container_source == 1) + *from = RICH_CONTAINER_REAPER; + else + *from = RICH_CONTAINER_CURRENT; +} + +void rich_container_get_usage(enum rich_container_source from, + struct task_struct *reaper, int cpu, + struct cpuacct_usage_result *res) +{ + struct cpuacct *ca_src; + struct task_group *tg; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + tg = cgroup_tg(ca_src->css.cgroup); + __cpuacct_get_usage_result(ca_src, cpu, tg, res); + rcu_read_unlock(); +} + +unsigned long rich_container_get_running(enum rich_container_source from, + struct task_struct *reaper, int cpu) +{ + struct cpuacct *ca_src; + unsigned long nr; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + nr = ca_running(ca_src, cpu); + rcu_read_unlock(); + + return nr; +} + +void rich_container_get_avenrun(enum rich_container_source from, + struct task_struct *reaper, unsigned long *loads, + unsigned long offset, int shift, bool running) +{ + struct cpuacct *ca_src; + + rcu_read_lock(); + /* To avoid iterating css for every cpu */ + if (likely(from == RICH_CONTAINER_REAPER)) + ca_src = task_ca(reaper); + else + ca_src = task_ca(current); + + __get_cgroup_avenrun(ca_src, loads, offset, shift, running); + rcu_read_unlock(); +} + +#endif diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index af7952f12e6cf19ac9b953fbbf54df823372f497..27dd9be809dc33f2adb251bfe6852b688383849d 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c @@ -142,8 +142,6 @@ void account_user_time(struct task_struct *p, u64 cputime) */ void account_guest_time(struct task_struct *p, u64 cputime) { - u64 *cpustat = kcpustat_this_cpu->cpustat; - /* Add guest time to process. */ p->utime += cputime; account_group_user_time(p, cputime); @@ -152,10 +150,10 @@ void account_guest_time(struct task_struct *p, u64 cputime) /* Add guest time to cpustat. */ if (task_nice(p) > 0) { task_group_account_field(p, CPUTIME_NICE, cputime); - cpustat[CPUTIME_GUEST_NICE] += cputime; + task_group_account_field(p, CPUTIME_GUEST_NICE, cputime); } else { task_group_account_field(p, CPUTIME_USER, cputime); - cpustat[CPUTIME_GUEST] += cputime; + task_group_account_field(p, CPUTIME_GUEST, cputime); } } @@ -231,17 +229,32 @@ void account_idle_time(u64 cputime) } -#ifdef CONFIG_SCHED_CORE +#if defined(CONFIG_SCHED_CORE) || defined(CONFIG_SCHED_ACPU) /* - * Account for forceidle time due to core scheduling. + * Account for sibidle, and for forceidle time due to core scheduling. * * REQUIRES: schedstat is enabled. */ -void __account_forceidle_time(struct task_struct *p, u64 delta) +void __account_sibidle_time(struct task_struct *p, u64 delta, u64 delta_task, bool fi) { - __schedstat_add(p->stats.core_forceidle_sum, delta); - - task_group_account_field(p, CPUTIME_FORCEIDLE, delta); + unsigned int cpu = task_cpu(p); + + __schedstat_add(p->stats.core_sibidle_sum, delta); + __schedstat_add(p->stats.core_sibidle_task_sum, delta_task); + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_SIBIDLE_TASK] += delta_task; + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_SIBIDLE_TASK, delta_task); +#ifdef CONFIG_SCHED_CORE + if (fi) { + __schedstat_add(p->stats.core_forceidle_sum, delta); + __schedstat_add(p->stats.core_forceidle_task_sum, delta_task); + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE] += delta; + kcpustat_cpu(cpu).cpustat[CPUTIME_FORCEIDLE_TASK] += delta_task; + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE, delta); + cgroup_account_cputime_field(p, CPUTIME_FORCEIDLE_TASK, delta_task); + } +#endif } #endif diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 4c3d0d9f3db6326703f92aab707771c39921a543..075750fbf7b1831a5950a9c8692952f9a2185995 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -519,6 +519,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); } @@ -1022,6 +1023,7 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, PN_SCHEDSTAT(slice_max); PN_SCHEDSTAT(wait_max); PN_SCHEDSTAT(wait_sum); + PN_SCHEDSTAT(parent_wait_contrib); P_SCHEDSTAT(wait_count); PN_SCHEDSTAT(iowait_sum); P_SCHEDSTAT(iowait_count); @@ -1059,6 +1061,11 @@ void proc_sched_show_task(struct task_struct *p, struct pid_namespace *ns, #ifdef CONFIG_SCHED_CORE PN_SCHEDSTAT(core_forceidle_sum); + PN_SCHEDSTAT(core_forceidle_task_sum); +#endif +#ifdef CONFIG_SCHED_ACPU + PN_SCHEDSTAT(core_sibidle_sum); + PN_SCHEDSTAT(core_sibidle_task_sum); #endif } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fa9fff0f9620dce1434e62487345230a239a01ee..0659e5eeb3cd0832d81099ca3ae873a858e5e2b5 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -161,6 +161,15 @@ static struct ctl_table sched_fair_sysctls[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "sched_cfs_bw_burst_onset_percent", + .data = &sysctl_sched_cfs_bw_burst_onset_percent, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, #endif #ifdef CONFIG_NUMA_BALANCING { @@ -1144,6 +1153,15 @@ static void update_tg_load_avg(struct cfs_rq *cfs_rq) } #endif /* CONFIG_SMP */ +static inline void +update_exec_raw(struct cfs_rq *cfs_rq, struct sched_entity *curr) +{ + u64 now = rq_clock(rq_of(cfs_rq)); + + curr->sum_exec_raw += now - curr->exec_start_raw; + curr->exec_start_raw = now; +} + /* * Update the current task's runtime statistics. */ @@ -1186,6 +1204,7 @@ static void update_curr(struct cfs_rq *cfs_rq) } account_cfs_rq_runtime(cfs_rq, delta_exec); + update_exec_raw(cfs_rq, curr); } static void update_curr_fair(struct rq *rq) @@ -1196,8 +1215,10 @@ static void update_curr_fair(struct rq *rq) static inline void update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + u64 parent_wait_sum, delta, clock = rq_clock(rq_of(cfs_rq)); + struct sched_entity *pse = parent_entity(se); if (!schedstat_enabled()) return; @@ -1208,19 +1229,36 @@ update_stats_wait_start_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) p = task_of(se); __update_stats_wait_start(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + __schedstat_set(stats->parent_wait_sum_base, parent_wait_sum); } static inline void update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) { - struct sched_statistics *stats; + struct sched_statistics *stats, *pstats; struct task_struct *p = NULL; + struct sched_entity *pse = parent_entity(se); + u64 parent_wait_sum, clock = rq_clock(rq_of(cfs_rq)); + u64 delta; if (!schedstat_enabled()) return; stats = __schedstats_from_se(se); + delta = clock - schedstat_val(stats->wait_start); + /* * When the sched_schedstat changes from 0 to 1, some sched se * maybe already in the runqueue, the se->statistics.wait_start @@ -1233,7 +1271,23 @@ update_stats_wait_end_fair(struct cfs_rq *cfs_rq, struct sched_entity *se) if (entity_is_task(se)) p = task_of(se); + cpu_update_latency(se, delta); + __update_stats_wait_end(rq_of(cfs_rq), p, stats); + + if (!pse) + return; + + pstats = __schedstats_from_se(pse); + + /* pick_next_task_fair() can update parent wait_start to 0 */ + if (schedstat_val(pstats->wait_start)) + delta = clock - schedstat_val(pstats->wait_start); + else + delta = 0; + parent_wait_sum = schedstat_val(pstats->wait_sum) + delta; + delta = parent_wait_sum - schedstat_val(stats->parent_wait_sum_base); + __schedstat_add(stats->parent_wait_contrib, delta); } static inline void @@ -1312,6 +1366,7 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se) * We are starting a new run period: */ se->exec_start = rq_clock_task(rq_of(cfs_rq)); + se->exec_start_raw = rq_clock(rq_of(cfs_rq)); } /************************************************** @@ -5479,21 +5534,27 @@ static inline u64 sched_cfs_bandwidth_slice(void) * * requires cfs_b->lock */ -void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b, + u64 overrun) { - s64 runtime; + u64 refill, runtime; if (unlikely(cfs_b->quota == RUNTIME_INF)) return; - cfs_b->runtime += cfs_b->quota; - runtime = cfs_b->runtime_snap - cfs_b->runtime; - if (runtime > 0) { - cfs_b->burst_time += runtime; - cfs_b->nr_burst++; + if (cfs_b->runtime_snap > cfs_b->runtime) { + runtime = cfs_b->runtime_snap - cfs_b->runtime; + if (runtime > cfs_b->quota) { + cfs_b->burst_time += runtime - cfs_b->quota; + cfs_b->nr_burst++; + } } - cfs_b->runtime = min(cfs_b->runtime, cfs_b->quota + cfs_b->burst); + cfs_b->current_buffer = max(cfs_b->runtime, cfs_b->buffer); + overrun = min(overrun, cfs_b->max_overrun); + refill = cfs_b->quota * overrun; + cfs_b->runtime += refill; + cfs_b->runtime = min(cfs_b->runtime, cfs_b->current_buffer); cfs_b->runtime_snap = cfs_b->runtime; } @@ -5516,7 +5577,7 @@ static int __assign_cfs_rq_runtime(struct cfs_bandwidth *cfs_b, if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - start_cfs_bandwidth(cfs_b); + start_cfs_bandwidth(cfs_b, 0); if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -5689,6 +5750,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) if (!se->on_rq) goto done; + if (se->my_q != cfs_rq) + cgroup_idle_start(se); + dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -5737,6 +5801,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) { + struct cfs_rq *bottom_cfs_rq = cfs_rq; struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; @@ -5780,6 +5845,9 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) if (se->on_rq) break; + + if (se->my_q != bottom_cfs_rq) + cgroup_idle_end(se); enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -5986,7 +6054,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun, u cfs_b->nr_periods += overrun; /* Refill extra burst quota even if cfs_b->idle */ - __refill_cfs_bandwidth_runtime(cfs_b); + __refill_cfs_bandwidth_runtime(cfs_b, overrun); /* * idle depends on !throttled (for the case of a large deficit), and if @@ -6241,8 +6309,17 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) new = old * 2; if (new < max_cfs_quota_period) { cfs_b->period = ns_to_ktime(new); - cfs_b->quota *= 2; - cfs_b->burst *= 2; + cfs_b->quota = min(cfs_b->quota * 2, + max_cfs_runtime); + + cfs_b->burst = min(cfs_b->burst * 2, + max_cfs_runtime); + + cfs_b->buffer = min(max_cfs_runtime, + cfs_b->quota + cfs_b->burst); + /* Add 1 in case max_overrun becomes 0. */ + cfs_b->max_overrun >>= 1; + cfs_b->max_overrun++; pr_warn_ratelimited( "cfs_period_timer[cpu%d]: period too short, scaling up (new cfs_period_us = %lld, cfs_quota_us = %lld)\n", @@ -6275,6 +6352,8 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *paren cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); cfs_b->burst = 0; + cfs_b->init_buffer = 0; + cfs_b->buffer = RUNTIME_INF; cfs_b->hierarchical_quota = parent ? parent->hierarchical_quota : RUNTIME_INF; INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -6298,16 +6377,26 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) #endif } -void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init) { + u64 overrun; + lockdep_assert_held(&cfs_b->lock); if (cfs_b->period_active) return; cfs_b->period_active = 1; - hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); + overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period); hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED); + + /* + * When period timer stops, quota for the following period is not + * refilled, however period timer is already forwarded. We should + * accumulate quota once more than overrun here. + */ + if (!init) + __refill_cfs_bandwidth_runtime(cfs_b, overrun + 1); } static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) @@ -6625,6 +6714,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_end(se); + cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -6632,8 +6724,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (cfs_rq->nr_running == 1) + cgroup_idle_end(se->parent); +#endif goto enqueue_throttle; + } flags = ENQUEUE_WAKEUP; } @@ -6703,6 +6800,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + if (!entity_is_task(se)) + cgroup_idle_start(se); + cfs_rq->h_nr_running--; cfs_rq->idle_h_nr_running -= idle_h_nr_running; @@ -6710,8 +6810,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) idle_h_nr_running = 1; /* end evaluation on encountering a throttled cfs_rq */ - if (cfs_rq_throttled(cfs_rq)) + if (cfs_rq_throttled(cfs_rq)) { +#ifdef CONFIG_FAIR_GROUP_SCHED + if (!cfs_rq->nr_running) + cgroup_idle_start(se->parent); +#endif goto dequeue_throttle; + } /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -7303,6 +7408,30 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool } } + if (static_branch_unlikely(&sched_cluster_active)) { + struct sched_group *sg = sd->groups; + + if (sg->flags & SD_CLUSTER) { + for_each_cpu_wrap(cpu, sched_group_span(sg), target + 1) { + if (!cpumask_test_cpu(cpu, cpus)) + continue; + + if (has_idle_core) { + i = select_idle_core(p, cpu, cpus, &idle_cpu); + if ((unsigned int)i < nr_cpumask_bits) + return i; + } else { + if (--nr <= 0) + return -1; + idle_cpu = __select_idle_cpu(cpu, p); + if ((unsigned int)idle_cpu < nr_cpumask_bits) + return idle_cpu; + } + } + cpumask_andnot(cpus, cpus, sched_group_span(sg)); + } + } + for_each_cpu_wrap(cpu, cpus, target + 1) { if (has_idle_core) { i = select_idle_core(p, cpu, cpus, &idle_cpu); @@ -7310,7 +7439,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool return i; } else { - if (!--nr) + if (--nr <= 0) return -1; idle_cpu = __select_idle_cpu(cpu, p); if ((unsigned int)idle_cpu < nr_cpumask_bits) @@ -7412,7 +7541,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) bool has_idle_core = false; struct sched_domain *sd; unsigned long task_util, util_min, util_max; - int i, recent_used_cpu; + int i, recent_used_cpu, prev_aff = -1; /* * On asymmetric system, update task utilization because we will check @@ -7439,8 +7568,14 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) */ if (prev != target && cpus_share_cache(prev, target) && (available_idle_cpu(prev) || sched_idle_cpu(prev)) && - asym_fits_cpu(task_util, util_min, util_max, prev)) - return prev; + asym_fits_cpu(task_util, util_min, util_max, prev)) { + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(prev, target)) + return prev; + + prev_aff = prev; + } /* * Allow a per-cpu kthread to stack with the wakee if the @@ -7467,7 +7602,13 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) (available_idle_cpu(recent_used_cpu) || sched_idle_cpu(recent_used_cpu)) && cpumask_test_cpu(recent_used_cpu, p->cpus_ptr) && asym_fits_cpu(task_util, util_min, util_max, recent_used_cpu)) { - return recent_used_cpu; + + if (!static_branch_unlikely(&sched_cluster_active) || + cpus_share_resources(recent_used_cpu, target)) + return recent_used_cpu; + + } else { + recent_used_cpu = -1; } /* @@ -7508,6 +7649,17 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target) if ((unsigned)i < nr_cpumask_bits) return i; + /* + * For cluster machines which have lower sharing cache like L2 or + * LLC Tag, we tend to find an idle CPU in the target's cluster + * first. But prev_cpu or recent_used_cpu may also be a good candidate, + * use them if possible when no idle CPU found in select_idle_cpu(). + */ + if ((unsigned int)prev_aff < nr_cpumask_bits) + return prev_aff; + if ((unsigned int)recent_used_cpu < nr_cpumask_bits) + return recent_used_cpu; + return target; } @@ -12465,6 +12617,32 @@ static int task_is_throttled_fair(struct task_struct *p, int cpu) #endif return throttled_hierarchy(cfs_rq); } + +#ifdef CONFIG_CFS_BANDWIDTH +void account_ht_aware_quota(struct task_struct *p, u64 delta) +{ + struct sched_entity *se; + unsigned int ht_ratio; + struct cfs_rq *cfs_rq; + + /* We only account ht_aware_quota for cookied task. */ + if (sched_feat(SCHED_CORE_HT_AWARE_QUOTA) && p->core_cookie) { + se = &p->se; + cfs_rq = task_cfs_rq(p); + + if (se->parent) { + ht_ratio = se->parent->ht_ratio; + if (ht_ratio > 100 && ht_ratio <= 200) { + for_each_sched_entity(se) { + cfs_rq = cfs_rq_of(se); + account_cfs_rq_runtime(cfs_rq, + delta * (ht_ratio - 100) / 100); + } + } + } + } +} +#endif #else static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {} #endif @@ -12685,6 +12863,46 @@ void init_cfs_rq(struct cfs_rq *cfs_rq) } #ifdef CONFIG_FAIR_GROUP_SCHED + +#ifdef CONFIG_SCHED_SLI +static void update_nr_iowait_fair(struct task_struct *p, long inc) +{ + unsigned long flags; + struct sched_entity *se = p->se.parent; + u64 clock; + + if (!schedstat_enabled()) + return; + + clock = __rq_clock_broken(cpu_rq(task_cpu(p))); + + for_each_sched_entity(se) { + /* + * Avoid locking rq->lock from try_to_wakeup hot path, in + * the price of poor consistency among cgroup hierarchy, + * which we can tolerate. + * While accessing se->on_rq does need to hold rq->lock. We + * already do, because when inc==1, the caller is __schedule + * and task_move_group_fair + */ + spin_lock_irqsave(&se->iowait_lock, flags); + if (!se->on_rq && !schedstat_val(se->cg_nr_iowait) && inc > 0) + __schedstat_set(se->cg_iowait_start, clock); + if (schedstat_val(se->cg_iowait_start) > 0 && + schedstat_val(se->cg_nr_iowait) + inc == 0) { + __schedstat_add(se->cg_iowait_sum, clock - + schedstat_val(se->cg_iowait_start)); + __schedstat_set(se->cg_iowait_start, 0); + } + __schedstat_add(se->cg_nr_iowait, inc); + spin_unlock_irqrestore(&se->iowait_lock, flags); + } +} +#else +static void update_nr_iowait_fair(struct task_struct *p, long inc) {} +#endif + + static void task_change_group_fair(struct task_struct *p) { /* @@ -12694,6 +12912,22 @@ static void task_change_group_fair(struct task_struct *p) if (READ_ONCE(p->__state) == TASK_NEW) return; + /* + * p->in_iowait is obvious. If p is in_iowait, we should transfer + * iowait to the new cgroup, otherwise try_to_wake_up will decrease + * from the new cgroup, leaving old cgroup's nr_iowait to be 1, and + * new cgroup's nr_iowait to be -1 + * + * !p->on_rq is necessary too, because iowait and on_rq are not + * updated at the same time. After try_to_wake_up, p->in_iowait + * remains 1, while on_rq becomes 1. In this case, p is not at all + * in_iowait already, so don't be stupid to transfer nr_iowait. + * Similarly, when io_schedule, there's a window between setting + * p->in_iowait to 1 and setting p->on_rq to 0, don't either. + */ + if (p->in_iowait && !p->on_rq) + update_nr_iowait_fair(p, -1); + detach_task_cfs_rq(p); #ifdef CONFIG_SMP @@ -12702,6 +12936,9 @@ static void task_change_group_fair(struct task_struct *p) #endif set_task_rq(p, task_cpu(p)); attach_task_cfs_rq(p); + /* Same as above */ + if (p->in_iowait && !p->on_rq) + update_nr_iowait_fair(p, 1); } void free_fair_sched_group(struct task_group *tg) @@ -12750,6 +12987,9 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) init_cfs_rq(cfs_rq); init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]); init_entity_runnable_average(se); +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + se->ht_ratio = 100; +#endif } return 1; @@ -12834,6 +13074,9 @@ void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, /* guarantee group entities always have weight */ update_load_set(&se->load, NICE_0_LOAD); se->parent = parent; + seqlock_init(&se->idle_seqlock); + spin_lock_init(&se->iowait_lock); + se->cg_idle_start = se->cg_init_time = cpu_clock(cpu); } static DEFINE_MUTEX(shares_mutex); @@ -12992,6 +13235,16 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task return rr_interval; } +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_fair(struct task_struct *p, long inc) +{ + struct sched_entity *se = &p->se; + + for_each_sched_entity(se) + cfs_rq_of(se)->nr_uninterruptible += inc; +} +#endif + /* * All the scheduling class methods: */ @@ -13039,6 +13292,10 @@ DEFINE_SCHED_CLASS(fair) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_fair, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_fair, + .update_nr_iowait = update_nr_iowait_fair, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/features.h b/kernel/sched/features.h index f770168230ae4a09dd0f240957c0c7d749001a50..ee7fb7220ed8a9b711abd7364321092f52217e1a 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -89,3 +89,7 @@ SCHED_FEAT(UTIL_EST_FASTUP, true) SCHED_FEAT(LATENCY_WARN, false) SCHED_FEAT(HZ_BW, true) + +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) +SCHED_FEAT(SCHED_CORE_HT_AWARE_QUOTA, false) +#endif diff --git a/kernel/sched/isolation.c b/kernel/sched/isolation.c index 373d42c707bc5d65d70c18d66e0ffd1621d33f5b..6b045ffa045945bf5cc370fafe190ce8cb71d46e 100644 --- a/kernel/sched/isolation.c +++ b/kernel/sched/isolation.c @@ -53,8 +53,32 @@ int housekeeping_any_cpu(enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_any_cpu); +#ifdef CONFIG_CGROUP_SCHED +/* + * dyn_allowed -- allowed CPUs for wild tasks. + * + * dyn_isolated -- isolated CPUs for wild tasks. + * + * dyn_possible -- possible CPUs for dynamical isolation. + */ +static cpumask_var_t dyn_allowed; +static cpumask_var_t dyn_isolated; +static cpumask_var_t dyn_possible; + +static bool dyn_isolcpus_ready; + +DEFINE_STATIC_KEY_FALSE(dyn_isolcpus_enabled); +EXPORT_SYMBOL_GPL(dyn_isolcpus_enabled); +#endif + const struct cpumask *housekeeping_cpumask(enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return dyn_allowed; +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return housekeeping.cpumasks[type]; @@ -72,6 +96,12 @@ EXPORT_SYMBOL_GPL(housekeeping_affine); bool housekeeping_test_cpu(int cpu, enum hk_type type) { +#ifdef CONFIG_CGROUP_SCHED + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + if (BIT(type) & HK_FLAG_DOMAIN) + return cpumask_test_cpu(cpu, dyn_allowed); +#endif + if (static_branch_unlikely(&housekeeping_overridden)) if (housekeeping.flags & BIT(type)) return cpumask_test_cpu(cpu, housekeeping.cpumasks[type]); @@ -79,10 +109,30 @@ bool housekeeping_test_cpu(int cpu, enum hk_type type) } EXPORT_SYMBOL_GPL(housekeeping_test_cpu); +#ifdef CONFIG_CGROUP_SCHED +static inline void free_dyn_masks(void) +{ + free_cpumask_var(dyn_allowed); + free_cpumask_var(dyn_isolated); + free_cpumask_var(dyn_possible); +} +#endif + void __init housekeeping_init(void) { enum hk_type type; +#ifdef CONFIG_CGROUP_SCHED + if (zalloc_cpumask_var(&dyn_allowed, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_isolated, GFP_KERNEL) && + zalloc_cpumask_var(&dyn_possible, GFP_KERNEL)) { + cpumask_copy(dyn_allowed, cpu_possible_mask); + cpumask_copy(dyn_possible, cpu_possible_mask); + dyn_isolcpus_ready = true; + } else + free_dyn_masks(); +#endif + if (!housekeeping.flags) return; @@ -95,6 +145,13 @@ void __init housekeeping_init(void) /* We need at least one CPU to handle housekeeping work */ WARN_ON_ONCE(cpumask_empty(housekeeping.cpumasks[type])); } +#ifdef CONFIG_CGROUP_SCHED + if (dyn_isolcpus_ready && (housekeeping.flags & HK_FLAG_DOMAIN) && + type < HK_TYPE_MAX) { + cpumask_copy(dyn_allowed, housekeeping.cpumasks[type]); + cpumask_copy(dyn_possible, housekeeping.cpumasks[type]); + } +#endif } static void __init housekeeping_setup_type(enum hk_type type, @@ -239,3 +296,134 @@ static int __init housekeeping_isolcpus_setup(char *str) return housekeeping_setup(str, flags); } __setup("isolcpus=", housekeeping_isolcpus_setup); + +#ifdef CONFIG_CGROUP_SCHED +static int dyn_isolcpus_show(struct seq_file *s, void *p) +{ + seq_printf(s, "%*pbl\n", cpumask_pr_args(dyn_isolated)); + + return 0; +} + +static int dyn_isolcpus_open(struct inode *inode, struct file *file) +{ + return single_open(file, dyn_isolcpus_show, NULL); +} + +void wilds_cpus_allowed(struct cpumask *pmask) +{ + if (static_branch_unlikely(&dyn_isolcpus_enabled)) + cpumask_and(pmask, pmask, dyn_allowed); +} + +void update_wilds_cpumask(cpumask_var_t new_allowed, cpumask_var_t old_allowed) +{ + struct task_struct *g, *task; + + rcu_read_lock(); + for_each_process_thread(g, task) { + if (task->flags & PF_KTHREAD) + continue; + + if (!cpumask_equal(task->cpus_ptr, old_allowed)) + continue; + + set_cpus_allowed_ptr(task, new_allowed); + } + rcu_read_unlock(); +} + +static DEFINE_MUTEX(dyn_isolcpus_mutex); + +static ssize_t write_dyn_isolcpus(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + int ret = count; + cpumask_var_t isolated; + cpumask_var_t new_allowed; + cpumask_var_t old_allowed; + + mutex_lock(&dyn_isolcpus_mutex); + + if (!zalloc_cpumask_var(&isolated, GFP_KERNEL)) { + ret = -ENOMEM; + goto out; + } + + if (!zalloc_cpumask_var(&new_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_isolated; + } + + if (!zalloc_cpumask_var(&old_allowed, GFP_KERNEL)) { + ret = -ENOMEM; + goto free_new_allowed; + } + + if (cpumask_parselist_user(buf, count, isolated)) { + ret = -EINVAL; + goto free_all; + } + + if (!cpumask_subset(isolated, dyn_possible)) { + ret = -EINVAL; + goto free_all; + } + + /* At least reserve one for wild tasks to run */ + cpumask_andnot(new_allowed, dyn_possible, isolated); + if (!cpumask_intersects(new_allowed, cpu_online_mask)) { + ret = -EINVAL; + goto free_all; + } + + cpumask_copy(old_allowed, dyn_allowed); + cpumask_copy(dyn_allowed, new_allowed); + cpumask_copy(dyn_isolated, isolated); + + if (cpumask_empty(dyn_isolated)) + static_branch_disable(&dyn_isolcpus_enabled); + else + static_branch_enable(&dyn_isolcpus_enabled); + + update_wilds_cpumask(new_allowed, old_allowed); + + rebuild_sched_domains(); + workqueue_set_unbound_cpumask(new_allowed); + +free_all: + free_cpumask_var(old_allowed); +free_new_allowed: + free_cpumask_var(new_allowed); +free_isolated: + free_cpumask_var(isolated); +out: + mutex_unlock(&dyn_isolcpus_mutex); + + return ret; +} + +static const struct proc_ops proc_dyn_isolcpus_operations = { + .proc_open = dyn_isolcpus_open, + .proc_read = seq_read, + .proc_write = write_dyn_isolcpus, + .proc_lseek = noop_llseek, + .proc_release = single_release, +}; + +static int __init dyn_isolcpus_init(void) +{ + if (dyn_isolcpus_ready && + !proc_create("dyn_isolcpus", 0200, NULL, + &proc_dyn_isolcpus_operations)) { + dyn_isolcpus_ready = false; + free_dyn_masks(); + } + + if (!dyn_isolcpus_ready) + pr_err("Initialize Dynamical Isolation Failed\n"); + + return 0; +} +early_initcall(dyn_isolcpus_init); +#endif diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c index 52c8f8226b0d3590425ec8237634b5d8e0148b99..13761c5d1bfbacf06fd604e92209679faf48d075 100644 --- a/kernel/sched/loadavg.c +++ b/kernel/sched/loadavg.c @@ -56,8 +56,10 @@ /* Variables and functions for calc_load */ atomic_long_t calc_load_tasks; -unsigned long calc_load_update; +atomic_long_t calc_load_tasks_r; unsigned long avenrun[3]; +unsigned long avenrun_r[3]; +unsigned long calc_load_update; EXPORT_SYMBOL(avenrun); /* should be removed */ /** @@ -90,6 +92,29 @@ long calc_load_fold_active(struct rq *this_rq, long adjust) return delta; } +#ifdef CONFIG_SCHED_SLI +void get_avenrun_r(unsigned long *loads, unsigned long offset, int shift) +{ + loads[0] = (avenrun_r[0] + offset) << shift; + loads[1] = (avenrun_r[1] + offset) << shift; + loads[2] = (avenrun_r[2] + offset) << shift; +} + +long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + long nr_active, delta = 0; + + nr_active = this_rq->nr_running - adjust; + + if (nr_active != this_rq->calc_load_active_r) { + delta = nr_active - this_rq->calc_load_active_r; + this_rq->calc_load_active_r = nr_active; + } + + return delta; +} +#endif + /** * fixed_power_int - compute: x^n, in O(log n) time * @@ -203,6 +228,9 @@ calc_load_n(unsigned long load, unsigned long exp, * When making the ILB scale, we should try to pull this in as well. */ static atomic_long_t calc_load_nohz[2]; +#ifdef CONFIG_SCHED_SLI +static atomic_long_t calc_load_nohz_r[2]; +#endif static int calc_load_idx; static inline int calc_load_write_idx(void) @@ -233,13 +261,17 @@ static inline int calc_load_read_idx(void) static void calc_load_nohz_fold(struct rq *rq) { long delta; + int idx = calc_load_write_idx(); delta = calc_load_fold_active(rq, 0); - if (delta) { - int idx = calc_load_write_idx(); - + if (delta) atomic_long_add(delta, &calc_load_nohz[idx]); - } + +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_nohz_r[idx]); +#endif } void calc_load_nohz_start(void) @@ -291,6 +323,19 @@ static long calc_load_nohz_read(void) return delta; } +#ifdef CONFIG_SCHED_SLI +static long calc_load_nohz_r_read(void) +{ + int idx = calc_load_read_idx(); + long delta = 0; + + if (atomic_long_read(&calc_load_nohz_r[idx])) + delta = atomic_long_xchg(&calc_load_nohz_r[idx], 0); + + return delta; +} +#endif + /* * NO_HZ can leave us missing all per-CPU ticks calling * calc_load_fold_active(), but since a NO_HZ CPU folds its delta into @@ -320,6 +365,16 @@ static void calc_global_nohz(void) avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n); avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n); +#ifdef CONFIG_SCHED_SLI + /* Calc avenrun_r */ + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load_n(avenrun_r[0], EXP_1, active, n); + avenrun_r[1] = calc_load_n(avenrun_r[1], EXP_5, active, n); + avenrun_r[2] = calc_load_n(avenrun_r[2], EXP_15, active, n); +#endif + WRITE_ONCE(calc_load_update, sample_window + n * LOAD_FREQ); } @@ -336,6 +391,7 @@ static void calc_global_nohz(void) #else /* !CONFIG_NO_HZ_COMMON */ static inline long calc_load_nohz_read(void) { return 0; } +static inline long calc_load_nohz_r_read(void) { return 0; } static inline void calc_global_nohz(void) { } #endif /* CONFIG_NO_HZ_COMMON */ @@ -369,8 +425,28 @@ void calc_global_load(void) avenrun[1] = calc_load(avenrun[1], EXP_5, active); avenrun[2] = calc_load(avenrun[2], EXP_15, active); +#ifdef CONFIG_SCHED_SLI + /* + * Calculate load 1/5/15 for running tasks only. We do not + * invent common functions to keep the same layout as upstream. + */ + delta = calc_load_nohz_r_read(); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); + + active = atomic_long_read(&calc_load_tasks_r); + active = active > 0 ? active * FIXED_1 : 0; + + avenrun_r[0] = calc_load(avenrun_r[0], EXP_1, active); + avenrun_r[1] = calc_load(avenrun_r[1], EXP_5, active); + avenrun_r[2] = calc_load(avenrun_r[2], EXP_15, active); +#endif + WRITE_ONCE(calc_load_update, sample_window + LOAD_FREQ); + if (!async_load_calc_enabled()) + calc_cgroup_load(); + /* * In case we went to NO_HZ for multiple LOAD_FREQ intervals * catch up in bulk. @@ -393,5 +469,11 @@ void calc_global_load_tick(struct rq *this_rq) if (delta) atomic_long_add(delta, &calc_load_tasks); +#ifdef CONFIG_SCHED_SLI + delta = calc_load_fold_active_r(this_rq, 0); + if (delta) + atomic_long_add(delta, &calc_load_tasks_r); +#endif + this_rq->calc_load_update += LOAD_FREQ; } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 904dd85345973ea9e9b7306adaa1d1de62e3534c..6d22a7010ce25a34292e55ea3d32e62137f4c1a7 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -2700,6 +2700,16 @@ static int task_is_throttled_rt(struct task_struct *p, int cpu) } #endif +#ifdef CONFIG_SCHED_SLI +static void update_nr_uninterruptible_rt(struct task_struct *p, long inc) +{ + struct sched_rt_entity *se = &p->rt; + + for_each_sched_rt_entity(se) + rt_rq_of_se(se)->nr_uninterruptible += inc; +} +#endif + DEFINE_SCHED_CLASS(rt) = { .enqueue_task = enqueue_task_rt, @@ -2736,6 +2746,9 @@ DEFINE_SCHED_CLASS(rt) = { #ifdef CONFIG_SCHED_CORE .task_is_throttled = task_is_throttled_rt, #endif +#ifdef CONFIG_SCHED_SLI + .update_nr_uninterruptible = update_nr_uninterruptible_rt, +#endif #ifdef CONFIG_UCLAMP_TASK .uclamp_enabled = 1, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 04846272409cc00f20f24d8cc6456554d67aba0a..c5e21508bf6baa7a452fd6d95bcb60c488e510bf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -104,16 +104,31 @@ struct cpuidle_state; #define TASK_ON_RQ_QUEUED 1 #define TASK_ON_RQ_MIGRATING 2 +#ifdef CONFIG_CFS_BANDWIDTH +extern const u64 max_cfs_runtime; +extern unsigned int sysctl_sched_cfs_bw_burst_onset_percent; +#endif + extern __read_mostly int scheduler_running; extern unsigned long calc_load_update; extern atomic_long_t calc_load_tasks; +extern atomic_long_t calc_load_tasks_r; extern unsigned int sysctl_sched_child_runs_first; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +#ifdef CONFIG_SCHED_SLI +extern long calc_load_fold_active_r(struct rq *this_rq, long adjust); +#else +static inline long calc_load_fold_active_r(struct rq *this_rq, long adjust) +{ + return 0; +} +#endif + extern void call_trace_sched_update_nr_running(struct rq *rq, int count); extern unsigned int sysctl_sched_rt_period; @@ -333,6 +348,52 @@ struct rt_rq; extern struct list_head task_groups; +enum sched_lat_stat_item { + SCHED_LAT_WAIT, + SCHED_LAT_BLOCK, + SCHED_LAT_IOBLOCK, + SCHED_LAT_CGROUP_WAIT, + SCHED_LAT_NR_STAT +}; + +/* + * [0, 1ms) + * [1, 4ms) + * [4, 7ms) + * [7, 10ms) + * [10, 100ms) + * [100, 500ms) + * [500, 1000ms) + * [1000, 5000ms) + * [5000, 10000ms) + * [10000ms, INF) + * total(ms) + */ +/* Scheduler latency histogram distribution, in milliseconds */ +enum sched_lat_count_t { + SCHED_LAT_0_1, + SCHED_LAT_1_4, + SCHED_LAT_4_7, + SCHED_LAT_7_10, + SCHED_LAT_10_20, + SCHED_LAT_20_30, + SCHED_LAT_30_40, + SCHED_LAT_40_50, + SCHED_LAT_50_100, + SCHED_LAT_100_500, + SCHED_LAT_500_1000, + SCHED_LAT_1000_5000, + SCHED_LAT_5000_10000, + SCHED_LAT_10000_INF, + SCHED_LAT_TOTAL, + SCHED_LAT_NR, + SCHED_LAT_NR_COUNT, +}; + +struct sched_cgroup_lat_stat_cpu { + unsigned long item[SCHED_LAT_NR_STAT][SCHED_LAT_NR_COUNT]; +}; + struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH raw_spinlock_t lock; @@ -340,6 +401,10 @@ struct cfs_bandwidth { u64 quota; u64 runtime; u64 burst; + u64 init_buffer; + u64 current_buffer; + u64 buffer; + u64 max_overrun; u64 runtime_snap; s64 hierarchical_quota; @@ -411,7 +476,13 @@ struct task_group { /* Effective clamp values used for a task group */ struct uclamp_se uclamp[UCLAMP_CNT]; #endif +#if defined(CONFIG_SCHED_CORE) && defined(CONFIG_CFS_BANDWIDTH) + unsigned int ht_ratio; +#endif +#ifdef CONFIG_SCHED_SLI + struct sched_cgroup_lat_stat_cpu __percpu *lat_stat_cpu; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -456,8 +527,7 @@ extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, struct sched_entity *parent); extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b, struct cfs_bandwidth *parent); -extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b); -extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b); +extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, int init); extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq); extern bool cfs_task_bw_constrained(struct task_struct *p); @@ -649,6 +719,8 @@ struct cfs_rq { #endif #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ + + unsigned long nr_uninterruptible; }; static inline int rt_bandwidth_enabled(void) @@ -695,6 +767,8 @@ struct rt_rq { struct rq *rq; struct task_group *tg; #endif + + unsigned long nr_uninterruptible; }; static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq) @@ -1017,7 +1091,14 @@ struct rq { struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; - struct mm_struct *prev_mm; + + /* + * Frequent writing to prev_mm and clock_update_flags on local + * CPU causes cacheline containing idle to be invalidated on + * other CPUs. Put prev_mm and sequential fields on a new + * cacheline to fix it. + */ + struct mm_struct *prev_mm ____cacheline_aligned; unsigned int clock_update_flags; u64 clock; @@ -1103,6 +1184,9 @@ struct rq { /* calc_load related fields */ unsigned long calc_load_update; long calc_load_active; +#ifdef CONFIG_SCHED_SLI + long calc_load_active_r; +#endif #ifdef CONFIG_SCHED_HRTICK #ifdef CONFIG_SMP @@ -1155,8 +1239,10 @@ struct rq { unsigned long core_cookie; unsigned int core_forceidle_count; unsigned int core_forceidle_seq; - unsigned int core_forceidle_occupation; - u64 core_forceidle_start; + unsigned int core_sibidle_occupation; + u64 core_sibidle_start; + u64 core_sibidle_start_task; + unsigned int core_sibidle_count; #endif /* Scratch cpumask to be temporarily used under rq_lock */ @@ -1166,6 +1252,14 @@ struct rq { call_single_data_t cfsb_csd; struct list_head cfsb_csd_list; #endif + +#ifdef CONFIG_SCHED_ACPU + u64 acpu_idle_sum; + u64 sibidle_sum; + u64 sibidle_task_sum; + u64 last_acpu_update_time; + u64 last_acpu_update_time_task; +#endif }; #ifdef CONFIG_FAIR_GROUP_SCHED @@ -1318,6 +1412,11 @@ extern void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags); extern void sched_core_get(void); extern void sched_core_put(void); +#ifdef CONFIG_CFS_BANDWIDTH +extern void account_ht_aware_quota(struct task_struct *p, u64 delta); +#else +void account_ht_aware_quota(struct task_struct *p, u64 delta) {} +#endif #else /* !CONFIG_SCHED_CORE */ static inline bool sched_core_enabled(struct rq *rq) @@ -1466,6 +1565,11 @@ static inline struct cfs_rq *group_cfs_rq(struct sched_entity *grp) extern void update_rq_clock(struct rq *rq); +static inline u64 __rq_clock_broken(struct rq *rq) +{ + return READ_ONCE(rq->clock); +} + /* * rq::clock_update_flags bits * @@ -1868,11 +1972,13 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) DECLARE_PER_CPU(struct sched_domain __rcu *, sd_llc); DECLARE_PER_CPU(int, sd_llc_size); DECLARE_PER_CPU(int, sd_llc_id); +DECLARE_PER_CPU(int, sd_share_id); DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); extern struct static_key_false sched_asym_cpucapacity; +extern struct static_key_false sched_cluster_active; static __always_inline bool sched_asym_cpucap_active(void) { @@ -1959,12 +2065,12 @@ static inline const struct cpumask *task_user_cpus(struct task_struct *p) #if defined(CONFIG_SCHED_CORE) && defined(CONFIG_SCHEDSTATS) -extern void __sched_core_account_forceidle(struct rq *rq); +extern void __sched_core_account_sibidle(struct rq *rq); -static inline void sched_core_account_forceidle(struct rq *rq) +static inline void sched_core_account_sibidle(struct rq *rq) { if (schedstat_enabled()) - __sched_core_account_forceidle(rq); + __sched_core_account_sibidle(rq); } extern void __sched_core_tick(struct rq *rq); @@ -1977,7 +2083,7 @@ static inline void sched_core_tick(struct rq *rq) #else -static inline void sched_core_account_forceidle(struct rq *rq) {} +static inline void sched_core_account_sibidle(struct rq *rq) {} static inline void sched_core_tick(struct rq *rq) {} @@ -2290,6 +2396,8 @@ struct sched_class { #ifdef CONFIG_SCHED_CORE int (*task_is_throttled)(struct task_struct *p, int cpu); #endif + void (*update_nr_uninterruptible)(struct task_struct *p, long inc); + void (*update_nr_iowait)(struct task_struct *p, long inc); }; static inline void put_prev_task(struct rq *rq, struct task_struct *prev) @@ -2303,6 +2411,11 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void update_nr_iowait(struct task_struct *p, long inc) +{ + if (p->sched_class->update_nr_iowait) + p->sched_class->update_nr_iowait(p, inc); +} /* * Helper to define a sched_class instance; each one is placed in a separate @@ -3528,4 +3641,32 @@ static inline void init_sched_mm_cid(struct task_struct *t) { } extern u64 avg_vruntime(struct cfs_rq *cfs_rq); extern int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se); +#ifdef CONFIG_SCHED_SLI +extern u64 get_idle_time(struct kernel_cpustat *kcs, int cpu); +extern u64 get_iowait_time(struct kernel_cpustat *kcs, int cpu); +extern void task_ca_increase_nr_migrations(struct task_struct *tsk); +void cpu_update_latency(struct sched_entity *se, u64 delta); +void task_cpu_update_block(struct task_struct *tsk, u64 runtime); +void calc_cgroup_load(void); +bool async_load_calc_enabled(void); +struct task_group *cgroup_tg(struct cgroup *cgrp); +int sched_lat_stat_show(struct seq_file *sf, void *v); +int sched_lat_stat_write(struct cgroup_subsys_state *css, + struct cftype *cft, u64 val); +#else +static inline void task_ca_increase_nr_migrations(struct task_struct *tsk) { } +static inline void cpu_update_latency(struct sched_entity *se, + u64 delta) { } +static inline void task_cpu_update_block(struct task_struct *tsk, + u64 runtime) { } +static inline void calc_cgroup_load(void) { } +static inline bool async_load_calc_enabled(void) +{ + return false; +} +#endif + +long tg_get_cfs_quota(struct task_group *tg); +long tg_get_cfs_period(struct task_group *tg); + #endif /* _KERNEL_SCHED_SCHED_H */ diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c index 857f837f52cbed29d5f0d7e8a80a97497498fbd0..f43ae791bc726ccac2cdbd547587c6afb4a5143f 100644 --- a/kernel/sched/stats.c +++ b/kernel/sched/stats.c @@ -85,6 +85,7 @@ void __update_stats_enqueue_sleeper(struct rq *rq, struct task_struct *p, if (p) { if (p->in_iowait) { + task_cpu_update_block(p, delta); __schedstat_add(stats->iowait_sum, delta); __schedstat_inc(stats->iowait_count); trace_sched_stat_iowait(p, delta); diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 423d08947962c8acd7ac304e6a7a995fb6a58d5f..53b9ab66025768ce073f92ba6c1a5de04bc63894 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -666,11 +666,14 @@ static void destroy_sched_domains(struct sched_domain *sd) DEFINE_PER_CPU(struct sched_domain __rcu *, sd_llc); DEFINE_PER_CPU(int, sd_llc_size); DEFINE_PER_CPU(int, sd_llc_id); +DEFINE_PER_CPU(int, sd_share_id); DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing); DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_cpucapacity); + DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); +DEFINE_STATIC_KEY_FALSE(sched_cluster_active); static void update_top_cache_domain(int cpu) { @@ -691,6 +694,17 @@ static void update_top_cache_domain(int cpu) per_cpu(sd_llc_id, cpu) = id; rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds); + sd = lowest_flag_domain(cpu, SD_CLUSTER); + if (sd) + id = cpumask_first(sched_domain_span(sd)); + + /* + * This assignment should be placed after the sd_llc_id as + * we want this id equals to cluster id on cluster machines + * but equals to LLC id on non-Cluster machines. + */ + per_cpu(sd_share_id, cpu) = id; + sd = lowest_flag_domain(cpu, SD_NUMA); rcu_assign_pointer(per_cpu(sd_numa, cpu), sd); @@ -1548,6 +1562,7 @@ static struct cpumask ***sched_domains_numa_masks; */ #define TOPOLOGY_SD_FLAGS \ (SD_SHARE_CPUCAPACITY | \ + SD_CLUSTER | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING) @@ -2366,6 +2381,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att struct rq *rq = NULL; int i, ret = -ENOMEM; bool has_asym = false; + bool has_cluster = false; if (WARN_ON(cpumask_empty(cpu_map))) goto error; @@ -2491,12 +2507,18 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att WRITE_ONCE(d.rd->max_cpu_capacity, rq->cpu_capacity_orig); cpu_attach_domain(sd, d.rd, i); + + if (lowest_flag_domain(i, SD_CLUSTER)) + has_cluster = true; } rcu_read_unlock(); if (has_asym) static_branch_inc_cpuslocked(&sched_asym_cpucapacity); + if (has_cluster) + static_branch_inc_cpuslocked(&sched_cluster_active); + if (rq && sched_debug_verbose) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); @@ -2596,6 +2618,9 @@ static void detach_destroy_domains(const struct cpumask *cpu_map) if (rcu_access_pointer(per_cpu(sd_asym_cpucapacity, cpu))) static_branch_dec_cpuslocked(&sched_asym_cpucapacity); + if (static_branch_unlikely(&sched_cluster_active)) + static_branch_dec_cpuslocked(&sched_cluster_active); + rcu_read_lock(); for_each_cpu(i, cpu_map) cpu_attach_domain(NULL, &def_root_domain, i); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 354a2d294f526ad6688168443913385eda101fa1..777baff4d527b7b8078ecf60844d50de8fbbf7a0 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -63,6 +63,7 @@ #include #include #include +#include #include "../lib/kstrtox.h" @@ -96,6 +97,11 @@ EXPORT_SYMBOL_GPL(sysctl_long_vals); static const int six_hundred_forty_kb = 640 * 1024; #endif +#ifdef CONFIG_USER_NS +extern int unprivileged_userns_clone; +extern int userns_max_level; +extern int userns_max_level_max; +#endif static const int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; @@ -130,6 +136,8 @@ enum sysctl_writes_mode { static enum sysctl_writes_mode sysctl_writes_strict = SYSCTL_WRITES_STRICT; #endif /* CONFIG_PROC_SYSCTL */ +extern int sysctl_enable_context_readahead; + #if defined(HAVE_ARCH_PICK_MMAP_LAYOUT) || \ defined(CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT) int sysctl_legacy_va_layout; @@ -2042,6 +2050,72 @@ static struct ctl_table kern_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_INT_MAX, }, +#endif +#ifdef CONFIG_USER_NS + { + .procname = "unprivileged_userns_clone", + .data = &unprivileged_userns_clone, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec, + }, + { + .procname = "userns_max_level", + .data = &userns_max_level, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = &userns_max_level_max, + }, +#endif +#ifdef CONFIG_SCHED_ACPU + { + .procname = "sched_acpu", + .data = &sysctl_sched_acpu_enabled, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = sched_acpu_enable_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, +#endif /* CONFIG_SCHED_ACPU*/ +#ifdef CONFIG_RICH_CONTAINER + { + .procname = "rich_container_enable", + .data = &sysctl_rich_container_enable, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "rich_container_source", + .data = &sysctl_rich_container_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, + { + .procname = "rich_container_cpuinfo_source", + .data = &sysctl_rich_container_cpuinfo_source, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_TWO, + }, + { + .procname = "rich_container_cpuinfo_sharesbase", + .data = &sysctl_rich_container_cpuinfo_sharesbase, + .maxlen = sizeof(int), + .mode = 0600, + .proc_handler = proc_douintvec_minmax, + .extra1 = SYSCTL_TWO, + }, #endif { } }; @@ -2249,6 +2323,15 @@ static struct ctl_table vm_table[] = { .extra2 = (void *)&mmap_rnd_compat_bits_max, }, #endif + { + .procname = "enable_context_readahead", + .data = &sysctl_enable_context_readahead, + .maxlen = sizeof(sysctl_enable_context_readahead), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE, + }, { } }; diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 1d8e47bed3f118ae071b4c675a6812001c78e238..8846049c8fa3c210edd814ee09a49f191cfb3191 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c @@ -22,6 +22,19 @@ #include #include +/* + * sysctl determining whether unprivileged users may unshare a new + * userns. Allowed by default + */ +int unprivileged_userns_clone = 1; + +/* + * sysctl determining the maximum of nested level. + * Default to 33 to keep compatible with upstream. + */ +int userns_max_level = 33; +int userns_max_level_max = 33; + static struct kmem_cache *user_ns_cachep __read_mostly; static DEFINE_MUTEX(userns_state_mutex); @@ -88,7 +101,7 @@ int create_user_ns(struct cred *new) int ret, i; ret = -ENOSPC; - if (parent_ns->level > 32) + if (parent_ns->level >= userns_max_level) goto fail; ucounts = inc_user_namespaces(parent_ns, owner); diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 5cd6d4e269157973acecd00afc91646720fee2be..11102420a2c7fe8cfc0749f85179b45d6564b467 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,7 @@ static bool is_hardlockup(unsigned int cpu) return false; } +NOKPROBE_SYMBOL(is_hardlockup); static void watchdog_hardlockup_kick(void) { @@ -184,6 +186,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs) per_cpu(watchdog_hardlockup_warned, cpu) = false; } } +NOKPROBE_SYMBOL(watchdog_hardlockup_check); #else /* CONFIG_HARDLOCKUP_DETECTOR_COUNTS_HRTIMER */ @@ -559,8 +562,12 @@ static void watchdog_enable(unsigned int cpu) /* Initialize timestamp */ update_touch_ts(); /* Enable the hardlockup detector */ - if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) - watchdog_hardlockup_enable(cpu); + if (watchdog_enabled & WATCHDOG_HARDLOCKUP_ENABLED) { + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_enable(cpu); + else + sdei_watchdog_hardlockup_enable(cpu); + } } static void watchdog_disable(unsigned int cpu) @@ -574,7 +581,10 @@ static void watchdog_disable(unsigned int cpu) * delay between disabling the timer and disabling the hardlockup * detector causes a false positive. */ - watchdog_hardlockup_disable(cpu); + if (disable_sdei_nmi_watchdog) + watchdog_hardlockup_disable(cpu); + else + sdei_watchdog_hardlockup_disable(cpu); hrtimer_cancel(hrtimer); wait_for_completion(this_cpu_ptr(&softlockup_completion)); } @@ -1019,7 +1029,8 @@ void __init lockup_detector_init(void) cpumask_copy(&watchdog_cpumask, housekeeping_cpumask(HK_TYPE_TIMER)); - if (!watchdog_hardlockup_probe()) + if ((!disable_sdei_nmi_watchdog && !sdei_watchdog_hardlockup_probe()) || + !watchdog_hardlockup_probe()) watchdog_hardlockup_available = true; else allow_lockup_detector_init_retry = true; diff --git a/lib/Kconfig b/lib/Kconfig index c686f4adc1246a0e1d65f7c15a35d10e992ba2ca..777ba070bec78de4f8b3dacc1af4d29988a60eaa 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -532,7 +532,7 @@ config CHECK_SIGNATURE bool config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + bool "Force CPU masks off stack" help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index fa307f93fa2e20d043b4a1c9094023aedd3d1d92..ee019f0f85b6ce2126c02927ee46e7ccd097ecdc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1045,6 +1045,12 @@ config HAVE_HARDLOCKUP_DETECTOR_BUDDY depends on SMP default y +config SDEI_WATCHDOG + bool "SDEI NMI Watchdog support" + depends on ARM_SDE_INTERFACE + depends on HARDLOCKUP_DETECTOR + select HARDLOCKUP_DETECTOR_COUNTS_HRTIMER + # # Global switch whether to build a hardlockup detector at all. It is available # only when the architecture supports at least one implementation. There are @@ -1061,6 +1067,7 @@ config HARDLOCKUP_DETECTOR depends on HAVE_HARDLOCKUP_DETECTOR_PERF || HAVE_HARDLOCKUP_DETECTOR_BUDDY || HAVE_HARDLOCKUP_DETECTOR_ARCH imply HARDLOCKUP_DETECTOR_PERF imply HARDLOCKUP_DETECTOR_BUDDY + imply SDEI_WATCHDOG imply HARDLOCKUP_DETECTOR_ARCH select LOCKUP_DETECTOR @@ -1255,6 +1262,13 @@ config SCHEDSTATS application, you can say N to avoid the very slight overhead this adds. +config SCHED_ACPU + bool "ACPU info: account idle time of smt to task" + depends on DEBUG_KERNEL && PROC_FS && SMP && SCHED_SMT + default y + help + Add ACPU info in /proc//sched. + endmenu config DEBUG_TIMEKEEPING diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1e3447bccdb14d126b3c108fd27ab652b5a3a94f..9ecb35b76076175ae988e9c2f5d63deed8262ffb 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -41,9 +41,25 @@ struct workqueue_struct *bdi_wq; static struct dentry *bdi_debug_root; +#ifdef CONFIG_CGROUP_WRITEBACK +static struct dentry *memcg_blkcg_file; +static const struct file_operations memcg_blkcg_debug_fops; +#endif + static void bdi_debug_init(void) { bdi_debug_root = debugfs_create_dir("bdi", NULL); + +#ifdef CONFIG_CGROUP_WRITEBACK + if (!bdi_debug_root) + return; + + if (!cgwb_v1) + return; + + memcg_blkcg_file = debugfs_create_file("bdi_wb_link", 0444, bdi_debug_root, + NULL, &memcg_blkcg_debug_fops); +#endif } static int bdi_debug_stats_show(struct seq_file *m, void *v) @@ -500,6 +516,210 @@ static void wb_exit(struct bdi_writeback *wb) #include +struct memcg_blkcg_link { + struct list_head list; + struct rcu_head rcu; + struct cgroup_subsys_state *memcg_css; + struct cgroup_subsys_state *blkcg_css; +}; + +static RADIX_TREE(memcg_blkcg_tree, GFP_ATOMIC); +static DEFINE_SPINLOCK(memcg_blkcg_tree_lock); + +static int memcg_blkcg_link_show(struct seq_file *m, void *v) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + seq_puts(m, "memory <---> blkio\n"); + rcu_read_lock(); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + seq_printf(m, "%s:%5lu <---> %s:%5lu\n", + link->memcg_css->cgroup->kn->name, + kernfs_ino(link->memcg_css->cgroup->kn), + (link->blkcg_css == blkcg_root_css) ? + "root" : link->blkcg_css->cgroup->kn->name, + kernfs_ino(link->blkcg_css->cgroup->kn)); + } + rcu_read_unlock(); + + return 0; +} + +static int memcg_blkcg_link_open(struct inode *inode, struct file *file) +{ + return single_open(file, memcg_blkcg_link_show, inode->i_private); +} + +static const struct file_operations memcg_blkcg_debug_fops = { + .open = memcg_blkcg_link_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +int allocate_memcg_blkcg_links(int count, struct list_head *tmp_links) +{ + struct memcg_blkcg_link *link; + int i; + + if (!cgwb_v1) + return 0; + + for (i = 0; i < count; i++) { + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) { + free_memcg_blkcg_links(tmp_links); + return -ENOMEM; + } + list_add(&link->list, tmp_links); + } + return 0; +} + +static void link_free(struct rcu_head *head) +{ + struct memcg_blkcg_link *link = container_of(head, + struct memcg_blkcg_link, rcu); + kfree(link); +} + +void insert_memcg_blkcg_link(struct cgroup_subsys *ss, + struct list_head *tmp_links, + struct css_set *cset) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + struct cgroup_subsys_state *memcg_css; + int err; + + if (!cgwb_v1) + return; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + WARN_ON(list_empty(tmp_links)); + + memcg_css = cset->subsys[memory_cgrp_id]; + blkcg_css = cset->subsys[io_cgrp_id]; + + if ((memcg_css == &root_mem_cgroup->css) || + (blkcg_css == blkcg_root_css)) + return; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link && ((link->blkcg_css == blkcg_css) || + (link->blkcg_css == blkcg_root_css))) { + rcu_read_unlock(); + return; + } + rcu_read_unlock(); + + trace_insert_memcg_blkcg_link(memcg_css, blkcg_css, + link ? link->blkcg_css : NULL); + + spin_lock(&memcg_blkcg_tree_lock); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + blkcg_css = blkcg_root_css; + } + + link = list_first_entry(tmp_links, struct memcg_blkcg_link, list); + list_del_init(&link->list); + + link->memcg_css = memcg_css; + link->blkcg_css = blkcg_css; + err = radix_tree_insert(&memcg_blkcg_tree, memcg_css->id, link); + WARN_ON(err); + + spin_unlock(&memcg_blkcg_tree_lock); +} + +void free_memcg_blkcg_links(struct list_head *links_to_free) +{ + struct memcg_blkcg_link *link, *tmp_link; + + list_for_each_entry_safe(link, tmp_link, links_to_free, list) { + list_del(&link->list); + kfree(link); + } +} + +static void delete_memcg_link(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + + spin_lock(&memcg_blkcg_tree_lock); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + radix_tree_delete(&memcg_blkcg_tree, memcg_css->id); + call_rcu(&link->rcu, link_free); + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +static void delete_blkcg_link(struct cgroup_subsys_state *blkcg_css) +{ + struct memcg_blkcg_link *link; + struct radix_tree_iter iter; + void **slot; + + spin_lock(&memcg_blkcg_tree_lock); + radix_tree_for_each_slot(slot, &memcg_blkcg_tree, &iter, 0) { + link = *slot; + if (link->blkcg_css == blkcg_css) { + radix_tree_delete(&memcg_blkcg_tree, link->memcg_css->id); + call_rcu(&link->rcu, link_free); + } + } + spin_unlock(&memcg_blkcg_tree_lock); +} + +void delete_memcg_blkcg_link(struct cgroup_subsys *ss, + struct cgroup_subsys_state *css) +{ + if (!cgwb_v1) + return; + + if (ss->id != io_cgrp_id && ss->id != memory_cgrp_id) + return; + + if (ss->id == io_cgrp_id) + delete_blkcg_link(css); + if (ss->id == memory_cgrp_id) + delete_memcg_link(css); +} + +static struct cgroup_subsys_state *find_blkcg_css(struct cgroup_subsys_state *memcg_css) +{ + struct memcg_blkcg_link *link; + struct cgroup_subsys_state *blkcg_css; + + rcu_read_lock(); + link = radix_tree_lookup(&memcg_blkcg_tree, memcg_css->id); + if (link) { + blkcg_css = link->blkcg_css; + if (css_tryget_online(blkcg_css)) + goto out; + } + + /* + * If not blkcg_root_css and tryget failed, + * get a reference of blkcg_root_css and return. + */ + blkcg_css = blkcg_root_css; + css_get(blkcg_css); + +out: + rcu_read_unlock(); + return blkcg_css; +} + /* * cgwb_lock protects bdi->cgwb_tree, blkcg->cgwb_list, offline_cgwbs and * memcg->cgwb_list. bdi->cgwb_tree is also RCU protected. @@ -584,7 +804,10 @@ static int cgwb_create(struct backing_dev_info *bdi, int ret = 0; memcg = mem_cgroup_from_css(memcg_css); - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); memcg_cgwb_list = &memcg->cgwb_list; blkcg_cgwb_list = blkcg_get_cgwb_list(blkcg_css); @@ -705,7 +928,10 @@ struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, struct cgroup_subsys_state *blkcg_css; /* see whether the blkcg association has changed */ - blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + blkcg_css = cgroup_get_e_css(memcg_css->cgroup, &io_cgrp_subsys); + else + blkcg_css = find_blkcg_css(memcg_css); if (unlikely(wb->blkcg_css != blkcg_css || !wb_tryget(wb))) wb = NULL; css_put(blkcg_css); diff --git a/mm/cma.c b/mm/cma.c index 2b2494fd6b59a6832b16c83b06ce2d6d454a66de..0238fc625127366b7354172f611f973f8e3c0c5f 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -36,7 +36,10 @@ #include "internal.h" #include "cma.h" -struct cma cma_areas[MAX_CMA_AREAS]; +static struct cma cma_areas_data[MAX_CMA_AREAS]; +static unsigned int cma_areas_size = MAX_CMA_AREAS; +struct cma *cma_areas = cma_areas_data; + unsigned cma_area_count; static DEFINE_MUTEX(cma_mutex); @@ -159,6 +162,25 @@ void __init cma_reserve_pages_on_error(struct cma *cma) cma->reserve_pages_on_error = true; } +int __init cma_alloc_areas(unsigned int max_cma_size) +{ + struct cma *data; + + if (max_cma_size <= MAX_CMA_AREAS) + return 0; + + if (cma_area_count || cma_areas != cma_areas_data) + return -EPERM; + + data = memblock_alloc(max_cma_size * sizeof(*cma_areas), SMP_CACHE_BYTES); + if (!data) + return -ENOMEM; + + cma_areas = data; + cma_areas_size = max_cma_size; + return 0; +} + /** * cma_init_reserved_mem() - create custom contiguous area from reserved memory * @base: Base address of the reserved area @@ -179,7 +201,7 @@ int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size, struct cma *cma; /* Sanity checks */ - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -256,7 +278,7 @@ int __init cma_declare_contiguous_nid(phys_addr_t base, pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); - if (cma_area_count == ARRAY_SIZE(cma_areas)) { + if (cma_area_count == cma_areas_size) { pr_err("Not enough slots for CMA reserved regions!\n"); return -ENOSPC; } @@ -474,10 +496,12 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, spin_unlock_irq(&cma->lock); pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit); - mutex_lock(&cma_mutex); + if (!cma->no_mutex) + mutex_lock(&cma_mutex); ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, GFP_KERNEL | (no_warn ? __GFP_NOWARN : 0)); - mutex_unlock(&cma_mutex); + if (!cma->no_mutex) + mutex_unlock(&cma_mutex); if (ret == 0) { page = pfn_to_page(pfn); break; @@ -591,3 +615,11 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data) return 0; } + +void cma_enable_concurrency(struct cma *cma) +{ + if (!cma) + return; + + cma->no_mutex = true; +} diff --git a/mm/cma.h b/mm/cma.h index 88a0595670b766cd9780476f7dacb71fd9979e34..50275c1d98cc656497018577da57bb9417e47846 100644 --- a/mm/cma.h +++ b/mm/cma.h @@ -16,6 +16,7 @@ struct cma { unsigned long *bitmap; unsigned int order_per_bit; /* Order of pages represented by one bit */ spinlock_t lock; + bool no_mutex; #ifdef CONFIG_CMA_DEBUGFS struct hlist_head mem_head; spinlock_t mem_head_lock; @@ -33,7 +34,7 @@ struct cma { bool reserve_pages_on_error; }; -extern struct cma cma_areas[MAX_CMA_AREAS]; +extern struct cma *cma_areas; extern unsigned cma_area_count; static inline unsigned long cma_bitmap_maxno(struct cma *cma) diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index ce06b2884789a2bd97016b93916c8aa81dc288b7..9d4d27399f801530501e9bfb9fddf9cb973d505c 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -243,27 +243,6 @@ early_memremap_prot(resource_size_t phys_addr, unsigned long size, } #endif -#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT) - -void __init copy_from_early_mem(void *dest, phys_addr_t src, unsigned long size) -{ - unsigned long slop, clen; - char *p; - - while (size) { - slop = offset_in_page(src); - clen = size; - if (clen > MAX_MAP_CHUNK - slop) - clen = MAX_MAP_CHUNK - slop; - p = early_memremap(src & PAGE_MASK, clen + slop); - memcpy(dest, p + slop, clen); - early_memunmap(p, clen + slop); - dest += clen; - src += clen; - size -= clen; - } -} - #else /* CONFIG_MMU */ void __init __iomem * diff --git a/mm/filemap.c b/mm/filemap.c index d40a20c9d59f16a906872f2f0adb44399fa9de55..86a9ba1cbf81add099734001e434fb72603a67fe 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1626,8 +1626,10 @@ EXPORT_SYMBOL(folio_end_writeback); */ void __folio_lock(struct folio *folio) { + task_set_wait_res(TASK_WAIT_FOLIO, folio); folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE, EXCLUSIVE); + task_clear_wait_res(); } EXPORT_SYMBOL(__folio_lock); diff --git a/mm/gup.c b/mm/gup.c index 2f8a2d89fde19d1f2367b85e27f25f20e19e3151..29a720dfc16355f6032b002795115f0380094ff3 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2054,7 +2054,7 @@ static unsigned long collect_longterm_unpinnable_pages( continue; } - if (!folio_test_lru(folio) && drain_allow) { + if (drain_allow) { lru_add_drain_all(); drain_allow = false; } diff --git a/mm/kfence/core.c b/mm/kfence/core.c index 3872528d096380b0c412897e7cc7b56b4cb59eb9..d5329b1c560ba499932f47ad318096d0b92f7860 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #include #include #include +#include #include #include @@ -40,8 +42,9 @@ ({ \ const bool __cond = WARN_ON(cond); \ if (unlikely(__cond)) { \ - WRITE_ONCE(kfence_enabled, false); \ disabled_by_warn = true; \ + WRITE_ONCE(kfence_enabled, false); \ + static_branch_disable(&kfence_allocation_key); \ } \ __cond; \ }) @@ -50,8 +53,27 @@ static bool kfence_enabled __read_mostly; static bool disabled_by_warn __read_mostly; +/* true = node mode, false = global mode. */ +static bool kfence_pool_node_mode __read_mostly; +static DEFINE_MUTEX(kfence_mutex); +unsigned long kfence_num_objects __read_mostly = CONFIG_KFENCE_NUM_OBJECTS; +EXPORT_SYMBOL_GPL(kfence_num_objects); +static unsigned long kfence_num_objects_snap __read_mostly; /* Used to record upstream ver. */ +static int *kfence_node_map __read_mostly; /* Map real node to "virtual kfence node". */ +bool kfence_panic_on_fault __read_mostly; +struct kfence_alloc_node_cond { + long need; + long allocated; +}; +/* + * An array to record how many objects need to be allocated + * and how many has been allocated on each node. + */ +static struct kfence_alloc_node_cond *kfence_num_objects_stat; +/* Only used in BOOTING, record partition info about __kfence_pool_area[] */ +static unsigned long kfence_nr_areas_per_node; -unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; +long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #ifdef MODULE_PARAM_PREFIX @@ -59,25 +81,41 @@ EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ #endif #define MODULE_PARAM_PREFIX "kfence." -static int kfence_enable_late(void); +DEFINE_STATIC_KEY_FALSE(kfence_short_canary); +DEFINE_STATIC_KEY_FALSE(kfence_skip_interval); +static DEFINE_STATIC_KEY_FALSE(kfence_once_enabled); +DEFINE_STATIC_KEY_TRUE(kfence_order0_page); + +#define KFENCE_MAX_OBJECTS_PER_AREA (PUD_SIZE / PAGE_SIZE / 2 - 1) + +static void kfence_enable_late(void); static int param_set_sample_interval(const char *val, const struct kernel_param *kp) { unsigned long num; - int ret = kstrtoul(val, 0, &num); + int ret = kstrtol(val, 0, &num); if (ret < 0) return ret; - /* Using 0 to indicate KFENCE is disabled. */ - if (!num && READ_ONCE(kfence_enabled)) { - pr_info("disabled\n"); - WRITE_ONCE(kfence_enabled, false); + if (system_state == SYSTEM_BOOTING) { + *((long *)kp->arg) = num; + return 0; } - *((unsigned long *)kp->arg) = num; + /* Not allow sample interval switching between positive and negative */ + if ((kfence_sample_interval > 0 && num < 0) || + (kfence_sample_interval < 0 && num > 0)) { + return -EINVAL; + } + + if (!num) /* Using 0 to indicate KFENCE is disabled. */ + kfence_disable(); + + *((long *)kp->arg) = num; + + if (num && !READ_ONCE(kfence_enabled)) + return disabled_by_warn ? -EINVAL : (kfence_enable_late(), 0); - if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) - return disabled_by_warn ? -EINVAL : kfence_enable_late(); return 0; } @@ -86,7 +124,7 @@ static int param_get_sample_interval(char *buffer, const struct kernel_param *kp if (!READ_ONCE(kfence_enabled)) return sprintf(buffer, "0\n"); - return param_get_ulong(buffer, kp); + return param_get_long(buffer, kp); } static const struct kernel_param_ops sample_interval_param_ops = { @@ -95,6 +133,134 @@ static const struct kernel_param_ops sample_interval_param_ops = { }; module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); +static int param_set_num_objects(const char *val, const struct kernel_param *kp) +{ + unsigned long num; + int ret = kstrtoul(val, 0, &num); + + if (ret < 0) + return ret; + +#ifdef CONFIG_ARM64 + if (system_state == SYSTEM_BOOTING) + return 0; +#endif + + if (!num) + return -EINVAL; + + mutex_lock(&kfence_mutex); + + if (READ_ONCE(kfence_enabled)) { + ret = -EBUSY; /* can not change num_objects when enabled */ + goto out_unlock; + } + + *((unsigned long *)kp->arg) = num; + ret = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + return ret; +} + +static int param_get_num_objects(char *buffer, const struct kernel_param *kp) +{ + return param_get_ulong(buffer, kp); +} + +static const struct kernel_param_ops num_objects_param_ops = { + .set = param_set_num_objects, + .get = param_get_num_objects, +}; +module_param_cb(num_objects, &num_objects_param_ops, &kfence_num_objects, 0600); + +static int param_set_pool_mode(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (READ_ONCE(kfence_enabled)) + return -EINVAL; /* can not change mode when enabled */ + + if (!strcmp(s, "global")) + mode = false; + else if (!strcmp(s, "node")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_pool_mode(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "node" : "global"); +} + +static const struct kernel_param_ops pool_mode_param_ops = { + .set = param_set_pool_mode, + .get = param_get_pool_mode, +}; +module_param_cb(pool_mode, &pool_mode_param_ops, &kfence_pool_node_mode, 0600); + +static int param_set_order0_page(const char *val, const struct kernel_param *kp) +{ + bool res; + int ret = kstrtobool(val, &res); + + if (ret < 0) + return ret; + + if (res) + static_branch_enable(&kfence_order0_page); + else + static_branch_disable(&kfence_order0_page); + + return 0; +} + +static int param_get_order0_page(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%d\n", static_branch_likely(&kfence_order0_page) ? 1 : 0); +} + +static const struct kernel_param_ops order0_page_param_ops = { + .set = param_set_order0_page, + .get = param_get_order0_page, +}; +module_param_cb(order0_page, &order0_page_param_ops, NULL, 0600); + +static int param_set_fault(const char *val, const struct kernel_param *kp) +{ + bool mode; + char *s = strstrip((char *)val); + + if (!strcmp(s, "report")) + mode = false; + else if (!strcmp(s, "panic")) + mode = true; + else + return -EINVAL; + + *((bool *)kp->arg) = mode; + + return 0; +} + +static int param_get_fault(char *buffer, const struct kernel_param *kp) +{ + return sprintf(buffer, "%s\n", *(bool *)kp->arg ? "panic" : "report"); +} + +static const struct kernel_param_ops fault_param_ops = { + .set = param_set_fault, + .get = param_get_fault, +}; +module_param_cb(fault, &fault_param_ops, &kfence_panic_on_fault, 0600); + /* Pool usage% threshold when currently covered allocations are skipped. */ static unsigned long kfence_skip_covered_thresh __read_mostly = 75; module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); @@ -107,28 +273,39 @@ module_param_named(deferrable, kfence_deferrable, bool, 0444); static bool kfence_check_on_panic __read_mostly; module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); -/* The pool of pages used for guard pages and objects. */ -char *__kfence_pool __read_mostly; -EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ - /* - * Per-object metadata, with one-to-one mapping of object metadata to - * backing pages (in __kfence_pool). + * The pool of pages used for guard pages and objects. + * Only used in booting init state. Will be cleared after that. */ -static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); -struct kfence_metadata *kfence_metadata __read_mostly; +char **__kfence_pool_area; /* - * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). - * So introduce kfence_metadata_init to initialize metadata, and then make - * kfence_metadata visible after initialization is successful. This prevents - * potential UAF or access to uninitialized metadata. + * The pool of pages should be reserved earlier than kfence initialization. It's + * only assigned in arm64 architecture. */ -static struct kfence_metadata *kfence_metadata_init __read_mostly; +char *__kfence_pool_early_init; + +/* The binary tree maintaining all kfence pool areas */ +struct rb_root kfence_pool_root = RB_ROOT; +EXPORT_SYMBOL_GPL(kfence_pool_root); /* Freelist with available objects. */ -static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); -static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ +struct kfence_freelist_node { + struct list_head freelist; + raw_spinlock_t lock; +}; + +struct kfence_freelist_cpu { + struct list_head freelist; + unsigned long count; +}; + +struct kfence_freelist { + struct kfence_freelist_node *node; + struct kfence_freelist_cpu __percpu *cpu; +}; +static struct kfence_freelist freelist; +static atomic_t kfence_flush_res, kfence_refkill_res; /* * The static key to set up a KFENCE allocation; or if static keys are not used @@ -150,11 +327,11 @@ atomic_t kfence_allocation_gate = ATOMIC_INIT(1); * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM */ #define ALLOC_COVERED_HNUM 2 -#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) -#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) -#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) +static unsigned long alloc_covered_order __ro_after_init; +#define ALLOC_COVERED_HNEXT(h) hash_32(h, alloc_covered_order) +#define ALLOC_COVERED_SIZE (1 << alloc_covered_order) #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) -static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; +static atomic_t *alloc_covered __read_mostly; /* Stack depth used to determine uniqueness of an allocation. */ #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) @@ -171,18 +348,27 @@ enum kfence_counter_id { KFENCE_COUNTER_ALLOCS, KFENCE_COUNTER_FREES, KFENCE_COUNTER_ZOMBIES, + KFENCE_COUNTER_ALLOCATED_PAGE, + KFENCE_COUNTER_ALLOCS_PAGE, + KFENCE_COUNTER_FREES_PAGE, KFENCE_COUNTER_BUGS, KFENCE_COUNTER_SKIP_INCOMPAT, KFENCE_COUNTER_SKIP_CAPACITY, KFENCE_COUNTER_SKIP_COVERED, KFENCE_COUNTER_COUNT, }; -static atomic_long_t counters[KFENCE_COUNTER_COUNT]; +struct kfence_counter { + s64 counter[KFENCE_COUNTER_COUNT]; +}; +static struct kfence_counter __percpu *counters; static const char *const counter_names[] = { - [KFENCE_COUNTER_ALLOCATED] = "currently allocated", - [KFENCE_COUNTER_ALLOCS] = "total allocations", - [KFENCE_COUNTER_FREES] = "total frees", - [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", + [KFENCE_COUNTER_ALLOCATED] = "currently slab allocated", + [KFENCE_COUNTER_ALLOCS] = "total slab allocations", + [KFENCE_COUNTER_FREES] = "total slab frees", + [KFENCE_COUNTER_ZOMBIES] = "zombie slab allocations", + [KFENCE_COUNTER_ALLOCATED_PAGE] = "currently page allocated", + [KFENCE_COUNTER_ALLOCS_PAGE] = "total page allocations", + [KFENCE_COUNTER_FREES_PAGE] = "total page frees", [KFENCE_COUNTER_BUGS] = "total bugs", [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", @@ -194,13 +380,28 @@ static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); static inline bool should_skip_covered(void) { - unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; + unsigned long thresh; + s64 sum; + int cpu; + + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return false; + + thresh = (kfence_num_objects_snap * kfence_skip_covered_thresh) / 100; + sum = 0; + /* This may take some time but should be acceptable in sampling mode. */ + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[KFENCE_COUNTER_ALLOCATED]; - return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; + return sum > thresh; } static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) { + if (!kfence_num_objects_snap) + return 0; + num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); num_entries = filter_irq_stacks(stack_entries, num_entries); return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); @@ -210,10 +411,14 @@ static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries * Adds (or subtracts) count @val for allocation stack trace hash * @alloc_stack_hash from Counting Bloom filter. */ -static void alloc_covered_add(u32 alloc_stack_hash, int val) +static inline void alloc_covered_add(u32 alloc_stack_hash, int val) { int i; + /* Only use this feature in upstream mode */ + if (!kfence_num_objects_snap) + return; + for (i = 0; i < ALLOC_COVERED_HNUM; i++) { atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); @@ -249,14 +454,14 @@ static bool kfence_unprotect(unsigned long addr) static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) { - unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; - unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; + struct kfence_pool_area *kpa = meta->kpa; + unsigned long offset = (meta - kpa->meta + 1) * PAGE_SIZE * 2; + unsigned long pageaddr = (unsigned long)&kpa->addr[offset]; /* The checks do not affect performance; only called from slow-paths. */ /* Only call with a pointer into kfence_metadata. */ - if (KFENCE_WARN_ON(meta < kfence_metadata || - meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) + if (KFENCE_WARN_ON(meta < kpa->meta || meta >= kpa->meta + kpa->nr_objects)) return 0; /* @@ -280,8 +485,6 @@ metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state nex struct kfence_track *track = next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; - lockdep_assert_held(&meta->lock); - if (stack_entries) { memcpy(track->stack_entries, stack_entries, num_stack_entries * sizeof(stack_entries[0])); @@ -314,7 +517,7 @@ static inline bool check_canary_byte(u8 *addr) if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) return true; - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; meta = addr_to_metadata((unsigned long)addr); raw_spin_lock_irqsave(&meta->lock, flags); @@ -327,24 +530,36 @@ static inline bool check_canary_byte(u8 *addr) static inline void set_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * The canary may be written to part of the object memory, but it does * not affect it. The user should initialize the object before using it. */ - for (; addr < meta->addr; addr += sizeof(u64)) + for (addr = start; addr < meta->addr; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) + for (; addr < end; addr += sizeof(u64)) *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; } static inline void check_canary(const struct kfence_metadata *meta) { const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); - unsigned long addr = pageaddr; + unsigned long addr, start = pageaddr, end = pageaddr + PAGE_SIZE; + + /* this func will take most cost so we shrink it when no interval limit */ + if (static_branch_likely(&kfence_short_canary)) { + start = max(ALIGN_DOWN(meta->addr - 1, L1_CACHE_BYTES), start); + end = min(ALIGN(meta->addr + meta->size + 1, L1_CACHE_BYTES), end); + } /* * We'll iterate over each canary byte per-side until a corrupted byte @@ -356,7 +571,7 @@ static inline void check_canary(const struct kfence_metadata *meta) */ /* Apply to left of object. */ - for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { + for (addr = start; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) break; } @@ -376,7 +591,7 @@ static inline void check_canary(const struct kfence_metadata *meta) if (unlikely(!check_canary_byte((u8 *)addr))) return; } - for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { + for (; addr < end; addr += sizeof(u64)) { if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) { for (; addr - pageaddr < PAGE_SIZE; addr++) { @@ -387,47 +602,103 @@ static inline void check_canary(const struct kfence_metadata *meta) } } -static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, - unsigned long *stack_entries, size_t num_stack_entries, - u32 alloc_stack_hash) +static inline struct kfence_metadata * +get_free_meta_from_node(struct kfence_freelist_node *kfence_freelist) { - struct kfence_metadata *meta = NULL; + struct kfence_metadata *object = NULL; unsigned long flags; - struct slab *slab; - void *addr; - const bool random_right_allocate = get_random_u32_below(2); - const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && - !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - /* Try to obtain a free object. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - if (!list_empty(&kfence_freelist)) { - meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); - list_del_init(&meta->list); - } - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); - if (!meta) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); - return NULL; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + if (!list_empty(&kfence_freelist->freelist)) { + object = list_entry(kfence_freelist->freelist.next, struct kfence_metadata, list); + list_del_init(&object->list); } + if (object) + percpu_ref_get(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); - if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { - /* - * This is extremely unlikely -- we are reporting on a - * use-after-free, which locked meta->lock, and the reporting - * code via printk calls kmalloc() which ends up in - * kfence_alloc() and tries to grab the same object that we're - * reporting on. While it has never been observed, lockdep does - * report that there is a possibility of deadlock. Fix it by - * using trylock and bailing out gracefully. - */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - /* Put the object back on the freelist. */ - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + return object; +} - return NULL; +#define KFENCE_FREELIST_PERCPU_SIZE 100 + +static struct kfence_metadata * +get_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct kfence_metadata *object = NULL; + struct list_head *entry = &kfence_freelist->freelist; + + KFENCE_WARN_ON(!list_empty(&c->freelist)); + + raw_spin_lock(&kfence_freelist->lock); + + if (list_empty(&kfence_freelist->freelist)) + goto out; + + object = list_first_entry(entry, struct kfence_metadata, list); + list_del_init(&object->list); + + do { + entry = READ_ONCE(entry->next); + + if (entry == &kfence_freelist->freelist) { + entry = entry->prev; + break; + } + + c->count++; + } while (c->count < KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&c->freelist, &kfence_freelist->freelist, entry); + +out: + raw_spin_unlock(&kfence_freelist->lock); + + return object; +} + +static struct kfence_metadata *get_free_meta(int real_node) +{ + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *object; + int node = kfence_node_map[real_node]; + + if (node >= 0) + kfence_freelist = &freelist.node[node]; + else + kfence_freelist = &freelist.node[real_node]; + + /* If target page not on current node, directly get from its nodelist */ + if (unlikely(node != kfence_node_map[numa_node_id()] || kfence_num_objects_snap)) + return get_free_meta_from_node(kfence_freelist); + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + if (unlikely(!c->count)) { + object = get_free_meta_slowpath(c, kfence_freelist); + } else { + object = list_first_entry(&c->freelist, struct kfence_metadata, list); + list_del_init(&object->list); + c->count--; } + if (object) + percpu_ref_get(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); + + return object; +} + +static inline void __init_meta(struct kfence_metadata *meta, size_t size, struct kmem_cache *cache, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); meta->addr = metadata_to_pageaddr(meta); /* Unprotect if we're reusing this page. */ @@ -442,27 +713,72 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g * is that the out-of-bounds accesses detected are deterministic for * such allocations. */ - if (random_right_allocate) { + if (cache && this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS] % 2) { /* Allocate on the "right" side, re-calculate address. */ meta->addr += PAGE_SIZE - size; meta->addr = ALIGN_DOWN(meta->addr, cache->align); } - addr = (void *)meta->addr; - /* Update remaining metadata. */ metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ WRITE_ONCE(meta->cache, cache); meta->size = size; meta->alloc_stack_hash = alloc_stack_hash; +} + +static void put_free_meta(struct kfence_metadata *object); +static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, + unsigned long *stack_entries, size_t num_stack_entries, + u32 alloc_stack_hash, int node) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; + unsigned long flags; + struct page *page; + struct slab *slab; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); + + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; + } + + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + */ + /* Put the object back on the freelist. */ + put_free_meta(meta); + + return NULL; + } + + __init_meta(meta, size, cache, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); + addr = (void *)meta->addr; alloc_covered_add(alloc_stack_hash, 1); /* Set required slab fields. */ - slab = virt_to_slab((void *)meta->addr); + page = virt_to_page(addr); + slab = page_slab(page); + __SetPageSlab(page); slab->slab_cache = cache; +#ifdef CONFIG_MEMCG + slab->memcg_data = (unsigned long)&meta->objcg | MEMCG_DATA_OBJCGS; +#endif #if defined(CONFIG_SLUB) slab->objects = 1; #elif defined(CONFIG_SLAB) @@ -485,81 +801,238 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g if (random_fault) kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS]++; return addr; } -static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +static struct page *kfence_guarded_alloc_page(int node, unsigned long *stack_entries, + size_t num_stack_entries, u32 alloc_stack_hash) { - struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + struct kfence_metadata *meta; unsigned long flags; - bool init; - - raw_spin_lock_irqsave(&meta->lock, flags); + struct page *page; + void *addr; + const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && + !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); - if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { - /* Invalid or double-free, bail out. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); - kfence_report_error((unsigned long)addr, false, NULL, meta, - KFENCE_ERROR_INVALID_FREE); - raw_spin_unlock_irqrestore(&meta->lock, flags); - return; + /* Try to obtain a free object. */ + meta = get_free_meta(node); + if (!meta) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_CAPACITY]++; + return NULL; } - /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ - kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, - KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, - &assert_page_exclusive); - - if (CONFIG_KFENCE_STRESS_TEST_FAULTS) - kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ + if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { + /* + * This is extremely unlikely -- we are reporting on a + * use-after-free, which locked meta->lock, and the reporting + * code via printk calls kmalloc() which ends up in + * kfence_alloc() and tries to grab the same object that we're + * reporting on. While it has never been observed, lockdep does + * report that there is a possibility of deadlock. Fix it by + * using trylock and bailing out gracefully. + * Put the object back on the freelist. + */ + put_free_meta(meta); - /* Restore page protection if there was an OOB access. */ - if (meta->unprotected_page) { - memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); - kfence_protect(meta->unprotected_page); - meta->unprotected_page = 0; + return NULL; } - /* Mark the object as freed. */ - metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); - init = slab_want_init_on_free(meta->cache); + __init_meta(meta, PAGE_SIZE, NULL, stack_entries, num_stack_entries, alloc_stack_hash); + raw_spin_unlock_irqrestore(&meta->lock, flags); - alloc_covered_add(meta->alloc_stack_hash, -1); + addr = (void *)meta->addr; + alloc_covered_add(alloc_stack_hash, 1); - /* Check canary bytes for memory corruption. */ - check_canary(meta); + page = virt_to_page(addr); + if (PageSlab(page)) { + struct slab *slab = page_slab(page); - /* - * Clear memory if init-on-free is set. While we protect the page, the - * data is still there, and after a use-after-free is detected, we - * unprotect the page, so the data is still accessible. - */ - if (!zombie && unlikely(init)) - memzero_explicit(addr, meta->size); + /* + * For performance considerations, + * we clean slab info here (when allocating pages). + * So that slabs can reuse their flags and obj_cgroups + * without being cleared or freed if the previous user + * is slab too. + */ + slab->slab_cache = NULL; +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + page->mapping = NULL; +#ifdef CONFIG_DEBUG_VM + atomic_set(&page->_refcount, 0); +#endif - /* Protect to detect use-after-frees. */ - kfence_protect((unsigned long)addr); + if (random_fault) + kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]++; + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCS_PAGE]++; + + return page; +} + +static inline void put_free_meta_to_node(struct kfence_metadata *object, + struct kfence_freelist_node *kfence_freelist) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_add_tail(&object->list, &kfence_freelist->freelist); + percpu_ref_put(&object->kpa->refcnt); + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static void put_free_meta_slowpath(struct kfence_freelist_cpu *c, + struct kfence_freelist_node *kfence_freelist) +{ + struct list_head *entry = &c->freelist, new_list; + + do { + entry = entry->next; + c->count--; + } while (c->count > KFENCE_FREELIST_PERCPU_SIZE); + + list_cut_position(&new_list, &c->freelist, entry); + raw_spin_lock(&kfence_freelist->lock); + list_splice_tail(&new_list, &kfence_freelist->freelist); + raw_spin_unlock(&kfence_freelist->lock); +} + +static void put_free_meta(struct kfence_metadata *object) +{ + int node = object->kpa->node; + unsigned long flags; + struct kfence_freelist_cpu *c; + struct kfence_freelist_node *kfence_freelist = &freelist.node[node]; + + KFENCE_WARN_ON(!list_empty(&object->list)); + + /* If meta not on current node, just return it to its own nodelist */ + if (unlikely(!kfence_node_map || node != kfence_node_map[numa_node_id()] || + kfence_num_objects_snap)) { + put_free_meta_to_node(object, kfence_freelist); + return; + } + + local_irq_save(flags); + c = get_cpu_ptr(freelist.cpu); + + list_add_tail(&object->list, &c->freelist); + c->count++; + + if (unlikely(c->count == KFENCE_FREELIST_PERCPU_SIZE * 2)) + put_free_meta_slowpath(c, kfence_freelist); + + percpu_ref_put(&object->kpa->refcnt); + + put_cpu_ptr(c); + local_irq_restore(flags); +} + +static inline bool __free_meta(void *addr, struct kfence_metadata *meta, bool zombie, bool is_page) +{ + struct kcsan_scoped_access assert_page_exclusive; + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + unsigned long flags; + bool init; + + raw_spin_lock_irqsave(&meta->lock, flags); + + if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { + /* Invalid or double-free, bail out. */ + this_cpu_counter->counter[KFENCE_COUNTER_BUGS]++; + kfence_report_error((unsigned long)addr, false, NULL, meta, + KFENCE_ERROR_INVALID_FREE); + raw_spin_unlock_irqrestore(&meta->lock, flags); + return false; + } + + /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ + kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, + KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, + &assert_page_exclusive); + + if (CONFIG_KFENCE_STRESS_TEST_FAULTS) + kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ + + /* Restore page protection if there was an OOB access. */ + if (meta->unprotected_page) { + memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); + kfence_protect(meta->unprotected_page); + meta->unprotected_page = 0; + } + + /* Mark the object as freed. */ + metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); + if (!is_page) + init = slab_want_init_on_free(meta->cache); + + raw_spin_unlock_irqrestore(&meta->lock, flags); + + alloc_covered_add(meta->alloc_stack_hash, -1); + + if (!is_page) { + /* Check canary bytes for memory corruption. */ + check_canary(meta); + + /* + * Clear memory if init-on-free is set. While we protect the page, the + * data is still there, and after a use-after-free is detected, we + * unprotect the page, so the data is still accessible. + */ + if (!zombie && unlikely(slab_want_init_on_free(meta->cache))) + memzero_explicit(addr, meta->size); + } + + /* Protect to detect use-after-frees. */ + kfence_protect((unsigned long)addr); + + kcsan_end_scoped_access(&assert_page_exclusive); + + return true; +} + +static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, zombie, false)) + return; - kcsan_end_scoped_access(&assert_page_exclusive); if (!zombie) { /* Add it to the tail of the freelist for reuse. */ - raw_spin_lock_irqsave(&kfence_freelist_lock, flags); - KFENCE_WARN_ON(!list_empty(&meta->list)); - list_add_tail(&meta->list, &kfence_freelist); - raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); + put_free_meta(meta); - atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); - atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES]++; } else { /* See kfence_shutdown_cache(). */ - atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); + this_cpu_counter->counter[KFENCE_COUNTER_ZOMBIES]++; } } +static void kfence_guarded_free_page(struct page *page, void *addr, struct kfence_metadata *meta) +{ + struct kfence_counter *this_cpu_counter = raw_cpu_ptr(counters); + + if (!__free_meta(addr, meta, false, true)) + return; + + put_free_meta(meta); + + this_cpu_counter->counter[KFENCE_COUNTER_ALLOCATED_PAGE]--; + this_cpu_counter->counter[KFENCE_COUNTER_FREES_PAGE]++; + +} + static void rcu_guarded_free(struct rcu_head *h) { struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); @@ -567,22 +1040,39 @@ static void rcu_guarded_free(struct rcu_head *h) kfence_guarded_free((void *)meta->addr, meta, false); } -/* - * Initialization of the KFENCE pool after its allocation. - * Returns 0 on success; otherwise returns the address up to - * which partial initialization succeeded. - */ -static unsigned long kfence_init_pool(void) +static void kfence_clear_page_info(unsigned long addr, unsigned long size) +{ + unsigned long i; + + for (i = addr; i < addr + size; i += PAGE_SIZE) { + struct page *page = virt_to_page((void *)i); + + if (PageSlab(page)) { +#ifdef CONFIG_MEMCG + page->memcg_data = 0; +#endif + __ClearPageSlab(page); + } + __ClearPageKfence(page); + page->mapping = NULL; + atomic_set(&page->_refcount, 1); + kfence_unprotect(i); + } +} + +static bool __kfence_init_pool_area(struct kfence_pool_area *kpa) { - unsigned long addr; + char *__kfence_pool = kpa->addr; + struct kfence_metadata *kfence_metadata = kpa->meta; + struct kfence_freelist_node *kfence_freelist = &freelist.node[kpa->node]; + unsigned long addr = (unsigned long)__kfence_pool, flags; struct page *pages; int i; - if (!arch_kfence_init_pool()) - return (unsigned long)__kfence_pool; + if (!__kfence_pool_early_init && !arch_kfence_init_pool(kpa)) + goto err; - addr = (unsigned long)__kfence_pool; - pages = virt_to_page(__kfence_pool); + pages = virt_to_page((void *)addr); /* * Set up object pages: they must have PG_slab set, to avoid freeing @@ -592,17 +1082,10 @@ static unsigned long kfence_init_pool(void) * fast-path in SLUB, and therefore need to ensure kfree() correctly * enters __slab_free() slow-path. */ - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; + for (i = 0; i < kpa->pool_size / PAGE_SIZE; i++) { + struct page *page = nth_page(pages, i); - __folio_set_slab(slab_folio(slab)); -#ifdef CONFIG_MEMCG - slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | - MEMCG_DATA_OBJCGS; -#endif + __SetPageKfence(page); } /* @@ -613,96 +1096,676 @@ static unsigned long kfence_init_pool(void) */ for (i = 0; i < 2; i++) { if (unlikely(!kfence_protect(addr))) - return addr; + goto err; addr += PAGE_SIZE; } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata_init[i]; + /* Protect the right redzone. */ + for (i = 0; i < kpa->nr_objects; i++) { + if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + goto err; + addr += 2 * PAGE_SIZE; + } + + addr = (unsigned long)__kfence_pool + 2 * PAGE_SIZE; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kfence_metadata[i]; + + /* Initialize metadata. */ + INIT_LIST_HEAD(&meta->list); + raw_spin_lock_init(&meta->lock); + meta->state = KFENCE_OBJECT_UNUSED; + meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ + meta->kpa = kpa; + list_add_tail(&meta->list, &kfence_freelist->freelist); + /* No fail after here, since we've added this pool to freelist. */ + + addr += 2 * PAGE_SIZE; + } + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + /* + * The pool is live and will never be deallocated from this point on. + * Remove the pool object from the kmemleak object tree, as it would + * otherwise overlap with allocations returned by kfence_alloc(), which + * are registered with kmemleak through the slab post-alloc hook. + */ + if (PageReserved(pages)) + kmemleak_ignore_phys(__pa(__kfence_pool)); + + return true; + +err: + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + return false; +} + +static bool kfence_rb_less(struct rb_node *a, const struct rb_node *b) +{ + return (unsigned long)kfence_rbentry(a)->addr < (unsigned long)kfence_rbentry(b)->addr; +} + +static void __init kfence_alloc_pool_node(int node) +{ + unsigned long nr_need = kfence_num_objects_stat[node].need; + unsigned long nr_request = min(nr_need, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long index = kfence_nr_areas_per_node * node; + + while (nr_need) { + unsigned long kfence_pool_size = (nr_request + 1) * 2 * PAGE_SIZE; + + __kfence_pool_area[index] = memblock_alloc_node(kfence_pool_size, PUD_SIZE, node); + if (!__kfence_pool_area[index]) { + pr_err("kfence alloc pool on node %d failed\n", node); + break; + } + index++; + nr_need -= nr_request; + nr_request = min(nr_request, nr_need); + } +} + +static void kpa_release(struct percpu_ref *ref); +static void kfence_free_area(struct work_struct *work); +static inline bool init_kpa(struct kfence_pool_area *kpa, char *__kfence_pool, int node, + unsigned long nr_objects, unsigned long pool_size) +{ + kpa->meta = vzalloc_node(sizeof(struct kfence_metadata) * nr_objects, node); + if (!kpa->meta) + goto fail; + if (percpu_ref_init(&kpa->refcnt, kpa_release, PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) + goto fail; + INIT_WORK(&kpa->work, kfence_free_area); + kpa->addr = __kfence_pool; + kpa->pool_size = pool_size; + kpa->nr_objects = nr_objects; + kpa->node = node; + atomic_set(&kpa->_ref, 1); /* held by rb tree */ + + if (!__kfence_init_pool_area(kpa)) + goto fail; + + return true; + +fail: + vfree(kpa->meta); + percpu_ref_exit(&kpa->refcnt); + + return false; +} + +static bool __init kfence_init_pool_area(int node, int area) +{ + int index = node * kfence_nr_areas_per_node + area; + char *__kfence_pool = __kfence_pool_area[index]; + struct kfence_pool_area *kpa; + unsigned long nr_objects, pool_size; + + if (!__kfence_pool) + return false; + + nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, __kfence_pool, node, nr_objects, pool_size)) + goto fail; + + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + __kfence_pool_area[index] = NULL; + kfence_num_objects_stat[node].allocated += nr_objects; + + return true; + +fail: + memblock_free_late(__pa(__kfence_pool), pool_size); + __kfence_pool_area[index] = NULL; + kfree(kpa); + + return false; +} + +static bool __init kfence_init_pool(void) +{ + int area, node; + bool success_once = false; + + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + if (kfence_init_pool_area(node, area)) + success_once = true; + } + } + + return success_once; +} + +static void kfence_alloc_pool_late_node(int node, struct list_head *ready, bool fallback) +{ + unsigned long nr_need, nr_request; + struct kfence_alloc_node_cond *knos = &kfence_num_objects_stat[node]; + gfp_t gfp_mask = GFP_KERNEL | __GFP_ZERO; + + if (knos->allocated >= knos->need) + return; + + nr_need = roundup(knos->need - knos->allocated, KFENCE_MAX_OBJECTS_PER_AREA); + nr_request = KFENCE_MAX_OBJECTS_PER_AREA; + if (!fallback) + gfp_mask |= __GFP_THISNODE; + + while (nr_need) { + struct page *page; + struct kfence_pool_area *kpa; + unsigned long nr_pages = (nr_request + 1) * 2; +#ifdef CONFIG_CONTIG_ALLOC + page = alloc_contig_pages(nr_pages, gfp_mask, node, NULL); +#else + pr_warn("anolis kfence only supports enabled later with CONFIG_CONTIG_ALLOC\n"); + page = NULL; +#endif + if (!page) { + pr_err("kfence alloc pool on node %d failed\n", node); + return; + } + kpa = kzalloc_node(sizeof(struct kfence_pool_area), GFP_KERNEL, node); + if (!kpa) + goto fail; + + if (!init_kpa(kpa, page_to_virt(page), node, nr_request, nr_pages * PAGE_SIZE)) + goto fail; + + list_add(&kpa->list, ready); + nr_need -= nr_request; + knos->allocated += nr_request; + nr_request = min(nr_request, nr_need); + + continue; + +fail: +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(page), nr_pages); +#endif + kfree(kpa); + + return; + } +} + +static void kfence_free_pool_area(struct kfence_pool_area *kpa) +{ + phys_addr_t base = __pa(kpa->addr), size = kpa->pool_size; + phys_addr_t cursor = PFN_UP(base); + phys_addr_t end = PFN_DOWN(base + size); + + kmemleak_free_part_phys(base, size); + for (; cursor < end; cursor++) { + __free_pages_core(pfn_to_page(cursor), 0); + totalram_pages_inc(); + } +} + +static void kfence_free_pool_late_area(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_CONTIG_ALLOC + free_contig_range(page_to_pfn(virt_to_page(kpa->addr)), kpa->pool_size / PAGE_SIZE); +#endif +} + +static void get_kpa(struct kfence_pool_area *kpa) +{ + atomic_inc(&kpa->_ref); +} + +static void put_kpa(struct kfence_pool_area *kpa) +{ + if (atomic_dec_and_test(&kpa->_ref)) + kfree(kpa); +} + +static int kfence_update_pool_root(void *info) +{ + struct list_head *ready_list = info; + struct kfence_pool_area *kpa; + struct rb_node *cur, *next; + + for (cur = rb_first(&kfence_pool_root); cur; cur = next) { + kpa = kfence_rbentry(cur); + next = rb_next(cur); + if (!kpa->nr_objects) { + rb_erase(&kpa->rb_node, &kfence_pool_root); + put_kpa(kpa); + } else { + percpu_ref_resurrect(&kpa->refcnt); + } + } + + while (!list_empty(ready_list)) { + kpa = list_first_entry(ready_list, struct kfence_pool_area, list); + rb_add(&kpa->rb_node, &kfence_pool_root, kfence_rb_less); + list_del(&kpa->list); + } + + return 0; +} + +/* + * Flush this cpu's per cpu freelist to per node freelist. + * + * We don't need more sync methods to prevent race, because we can + * only reach here in two routes (with both kfence is disabled + * so no new allocatings will occur): + * + * 1) from update_kfence_node_map() when enabling kfence + * Since kfence_node_map is set to NULL, the objects + * will be directly freed to the per node freelist. + * + * 2) from kfence_free_area() when a kpa being released + * Since the refcnt of this kpa is down to 0, no objects + * from this kpa will be freed to per cpu freelist. + * If some objects from other kpas are freed after this + * check, it is ok because we will only free the space + * of our target kpa. Just let objects from other kpas + * remain in per cpu freelist. + */ +static void kfence_flush(struct kfence_freelist_cpu *c) +{ + struct kfence_freelist_node *kfence_freelist; + struct kfence_metadata *meta; + unsigned long flags; + + if (list_empty(&c->freelist)) { + if (KFENCE_WARN_ON(c->count)) + c->count = 0; + return; + } + + meta = list_first_entry(&c->freelist, struct kfence_metadata, list); + kfence_freelist = &freelist.node[meta->kpa->node]; + + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + list_splice_tail_init(&c->freelist, &kfence_freelist->freelist); + c->count = 0; + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); +} + +static DECLARE_WAIT_QUEUE_HEAD(kfence_flush_wait); +static void kfence_flush_call(void *info) +{ + struct kfence_freelist_cpu *c = get_cpu_ptr(freelist.cpu); + + kfence_flush(c); + put_cpu_ptr(c); + + if (!atomic_dec_return(&kfence_flush_res)) + wake_up(&kfence_flush_wait); +} + +/* Flush percpu freelists on all cpus and wait for return. */ +static void kfence_flush_all_and_wait(void) +{ + int cpu; + + cpus_read_lock(); + atomic_set(&kfence_flush_res, num_online_cpus()); + on_each_cpu(kfence_flush_call, NULL, 0); + + /* Flush offline cpus. */ + preempt_disable(); + for_each_cpu_andnot(cpu, cpu_possible_mask, cpu_online_mask) { + kfence_flush(per_cpu_ptr(freelist.cpu, cpu)); + } + preempt_enable(); + cpus_read_unlock(); + + wait_event_idle(kfence_flush_wait, !atomic_read(&kfence_flush_res)); +} + +static bool kfence_can_recover_tlb(struct kfence_pool_area *kpa) +{ +#ifdef CONFIG_X86_64 + /* only recover 1GiB aligned tlb */ + return kpa->pool_size == PUD_SIZE; +#else + /* + * On arm64, the direct mapping area is already splited to page granularity + * with CONFIG_RODATA_FULL_DEFAULT_ENABLED=y, or CONFIG_KFENCE=y. So we will + * not recover tlb to pud huge. See upstream commit 840b23986344 + * ("arm64, kfence: enable KFENCE for ARM64") in detail. + */ + return false; +#endif +} + +static inline void __kfence_recover_tlb(unsigned long addr) +{ + if (!arch_kfence_free_pool(addr)) + pr_warn("fail to recover tlb to 1G at 0x%p-0x%p\n", + (void *)addr, (void *)(addr + PUD_SIZE)); +} + +static inline void kfence_recover_tlb(struct kfence_pool_area *kpa) +{ + unsigned long base = ALIGN_DOWN((unsigned long)kpa->addr, PUD_SIZE); + + if (kfence_can_recover_tlb(kpa)) + __kfence_recover_tlb(base); +} + +/* Free a specific area. The refcnt has been down to 0. */ +static void kfence_free_area(struct work_struct *work) +{ + unsigned long flags, i; + struct page *page; + struct kfence_pool_area *kpa = container_of(work, struct kfence_pool_area, work); + struct kfence_freelist_node *kfence_freelist; + + mutex_lock(&kfence_mutex); + if (!kpa->nr_objects || !percpu_ref_is_zero(&kpa->refcnt)) + goto out_unlock; + + kfence_flush_all_and_wait(); + + kfence_freelist = &freelist.node[kpa->node]; + raw_spin_lock_irqsave(&kfence_freelist->lock, flags); + for (i = 0; i < kpa->nr_objects; i++) + list_del(&kpa->meta[i].list); + + raw_spin_unlock_irqrestore(&kfence_freelist->lock, flags); + + pr_info("freed %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + + kfence_clear_page_info((unsigned long)kpa->addr, kpa->pool_size); + kfence_recover_tlb(kpa); + page = virt_to_page(kpa->addr); + + if (PageReserved(page)) + kfence_free_pool_area(kpa); + else + kfence_free_pool_late_area(kpa); + + vfree(kpa->meta); + kpa->meta = NULL; + percpu_ref_exit(&kpa->refcnt); + kpa->nr_objects = 0; + kpa->pool_size = 0; + +out_unlock: + mutex_unlock(&kfence_mutex); + put_kpa(kpa); +} + +static void kpa_release(struct percpu_ref *ref) +{ + struct kfence_pool_area *kpa = container_of(ref, struct kfence_pool_area, refcnt); + + get_kpa(kpa); + if (!queue_work(system_long_wq, &kpa->work)) + put_kpa(kpa); +} + +static void calculate_need_alloc(void) +{ + int node, nr_kpas, base, remain, nr_node_has_cpu; + enum node_states node_stat = N_CPU; + + if (!kfence_num_objects_stat) + return; + + if (kfence_pool_node_mode) { + for_each_node(node) { + kfence_num_objects_stat[node].need = kfence_num_objects; + } + return; + } + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + kfence_num_objects_stat[first_online_node].need = kfence_num_objects; + return; + } + + /* In global mode, we only alloc on nodes with cpus (i.e., not on pmem nodes) */ + nr_node_has_cpu = num_node_state(node_stat); + if (!nr_node_has_cpu) { + node_stat = N_ONLINE; + nr_node_has_cpu = num_node_state(node_stat); + } + nr_kpas = kfence_num_objects / KFENCE_MAX_OBJECTS_PER_AREA; + base = nr_kpas / nr_node_has_cpu; + remain = nr_kpas - base * nr_node_has_cpu; + for_each_node_state(node, node_stat) { + kfence_num_objects_stat[node].need = (base + (!!remain)) * + KFENCE_MAX_OBJECTS_PER_AREA; + if (remain) + remain--; + } +} + +static inline bool __check_map_change(int *new_node_map) +{ + int node; + + for_each_node(node) { + if (kfence_node_map[node] != new_node_map[node]) + return true; + } + + return false; +} + +static void update_kfence_node_map(int *new_node_map) +{ + int *old_node_map; + int node; + enum node_states node_stat = N_CPU; + struct zonelist *zonelist; + struct zone *zone; + struct zoneref *z; + + memset(new_node_map, -1, sizeof(int) * nr_node_ids); + + if (!num_node_state(node_stat)) + node_stat = N_ONLINE; + + for_each_node_state(node, node_stat) { + if (kfence_num_objects_stat[node].allocated) { + new_node_map[node] = node; + continue; + } + + /* We borrow from zonelist to get the nearest node to map. */ + zonelist = node_zonelist(node, GFP_KERNEL); + for_each_zone_zonelist_nodemask(zone, z, zonelist, ZONE_NORMAL, NULL) { + if (kfence_num_objects_stat[zone_to_nid(zone)].allocated) { + new_node_map[node] = zone_to_nid(zone); + break; + } + } + } + + /* It's the first time of init */ + if (!kfence_node_map) { + kfence_node_map = new_node_map; + return; + } + + if (!__check_map_change(new_node_map)) { + kfree(new_node_map); + return; + } + + old_node_map = kfence_node_map; + kfence_node_map = NULL; + synchronize_rcu(); + + kfence_flush_all_and_wait(); + + kfence_node_map = new_node_map; + kfree(old_node_map); +} + +/* + * Get the last kfence.booting_max= from boot cmdline. + * Mainly copied from get_last_crashkernel(). + */ +static __init char *get_last_kfence_booting_max(char *name) +{ + char *p = boot_command_line, *ck_cmdline = NULL; + + /* find kfence.booting_max and use the last one if there are more */ + p = strstr(p, name); + while (p) { + char *end_p = strchr(p, ' '); + + if (!end_p) + end_p = p + strlen(p); + ck_cmdline = p; + p = strstr(p+1, name); + } + + if (!ck_cmdline) + return NULL; + + ck_cmdline += strlen(name); + return ck_cmdline; +} + +/* + * This function parses command lines in the format + * + * kfence.booting_max=ramsize-range:size[,...] + * + * The function returns 0 on success and -EINVAL on failure. + * Mainly copied from parse_crashkernel_mem(). + */ +static int __init parse_kfence_booting_max(char *cmdline, + unsigned long long system_ram, + unsigned long long *reserve_max) +{ + char *cur = cmdline, *tmp; + + /* for each entry of the comma-separated list */ + do { + unsigned long long start, end = ULLONG_MAX, size; + + /* get the start of the range */ + start = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (*cur != '-') { + pr_warn("kfence.booting_max: '-' expected\n"); + return -EINVAL; + } + cur++; + + /* if no ':' is here, than we read the end */ + if (*cur != ':') { + end = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; + if (end <= start) { + pr_warn("kfence.booting_max: end <= start\n"); + return -EINVAL; + } + } + + if (*cur != ':') { + pr_warn("kfence.booting_max: ':' expected\n"); + return -EINVAL; + } + cur++; - /* Initialize metadata. */ - INIT_LIST_HEAD(&meta->list); - raw_spin_lock_init(&meta->lock); - meta->state = KFENCE_OBJECT_UNUSED; - meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); + size = memparse(cur, &tmp); + if (cur == tmp) { + pr_warn("kfence.booting_max: Memory value expected\n"); + return -EINVAL; + } + cur = tmp; - /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) - goto reset_slab; + /* match ? */ + if (system_ram >= start && system_ram < end) { + *reserve_max = size; + break; + } + } while (*cur++ == ','); - addr += 2 * PAGE_SIZE; - } + if (!*reserve_max) + pr_info("kfence.booting_max size resulted in zero bytes, disabled\n"); - /* - * Make kfence_metadata visible only when initialization is successful. - * Otherwise, if the initialization fails and kfence_metadata is freed, - * it may cause UAF in kfence_shutdown_cache(). - */ - smp_store_release(&kfence_metadata, kfence_metadata_init); return 0; +} -reset_slab: - for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { - struct slab *slab = page_slab(nth_page(pages, i)); - - if (!i || (i % 2)) - continue; -#ifdef CONFIG_MEMCG - slab->memcg_data = 0; -#endif - __folio_clear_slab(slab_folio(slab)); - } +/* === DebugFS Interface ==================================================== */ - return addr; +static void print_pool_size(struct seq_file *seq, unsigned long byte) +{ + if (byte < SZ_1K) + seq_printf(seq, "%lu B\n", byte); + else if (byte < SZ_1M) + seq_printf(seq, "%lu KB\n", byte / SZ_1K); + else if (byte < SZ_1G) + seq_printf(seq, "%lu MB\n", byte / SZ_1M); + else + seq_printf(seq, "%lu GB\n", byte / SZ_1G); } -static bool __init kfence_init_pool_early(void) +static int stats_show(struct seq_file *seq, void *v) { - unsigned long addr; + int i, cpu; + struct kfence_pool_area *kpa; + struct rb_node *iter; + unsigned long *size_count; - if (!__kfence_pool) - return false; + seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - addr = kfence_init_pool(); + if (!counters) + return 0; - if (!addr) { + for (i = 0; i < KFENCE_COUNTER_COUNT; i++) { + s64 sum = 0; /* - * The pool is live and will never be deallocated from this point on. - * Ignore the pool object from the kmemleak phys object tree, as it would - * otherwise overlap with allocations returned by kfence_alloc(), which - * are registered with kmemleak through the slab post-alloc hook. + * This calculation may not accurate, but don't mind since we are + * mostly interested in bugs and zombies. They are rare and likely + * not changed during calculating. */ - kmemleak_ignore_phys(__pa(__kfence_pool)); - return true; + for_each_possible_cpu(cpu) + sum += per_cpu_ptr(counters, cpu)->counter[i]; + seq_printf(seq, "%-35s:%20lld\n", counter_names[i], sum); } - /* - * Only release unprotected pages, and do not try to go back and change - * page attributes due to risk of failing to do so as well. If changing - * page attributes for some pages fails, it is very likely that it also - * fails for the first page, and therefore expect addr==__kfence_pool in - * most failure cases. - */ - memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); - __kfence_pool = NULL; - - memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); - kfence_metadata_init = NULL; - - return false; -} + size_count = kmalloc_array(nr_node_ids * 2, sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); + if (!size_count) + return 0; -/* === DebugFS Interface ==================================================== */ + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (!kpa->nr_objects) + continue; + size_count[kpa->node] += kpa->nr_objects; + size_count[kpa->node + nr_node_ids] += kpa->pool_size; + } + mutex_unlock(&kfence_mutex); -static int stats_show(struct seq_file *seq, void *v) -{ - int i; + seq_puts(seq, "\nnode\tobject_size\tpool_size\n"); + for_each_node(i) { + seq_printf(seq, "%-8d%-16lu", i, size_count[i]); + print_pool_size(seq, size_count[i + nr_node_ids]); + } - seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); - for (i = 0; i < KFENCE_COUNTER_COUNT; i++) - seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); + kfree(size_count); return 0; } @@ -715,28 +1778,59 @@ DEFINE_SHOW_ATTRIBUTE(stats); */ static void *start_object(struct seq_file *seq, loff_t *pos) { - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); + loff_t index = *pos; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + mutex_lock(&kfence_mutex); + kfence_for_each_area(kpa, iter) { + if (index >= kpa->nr_objects) { + index -= kpa->nr_objects; + continue; + } + return &kpa->meta[index]; + } return NULL; } static void stop_object(struct seq_file *seq, void *v) { + mutex_unlock(&kfence_mutex); } static void *next_object(struct seq_file *seq, void *v, loff_t *pos) { + struct kfence_metadata *meta = (struct kfence_metadata *)v; + struct kfence_pool_area *kpa = meta->kpa; + struct rb_node *cur = &kpa->rb_node; + ++*pos; - if (*pos < CONFIG_KFENCE_NUM_OBJECTS) - return (void *)((long)*pos + 1); - return NULL; + ++meta; + if (meta - kpa->meta < kpa->nr_objects) + return meta; + seq_puts(seq, "---------------------------------\n"); +next_meta: + cur = rb_next(cur); + if (!cur) + return NULL; + kpa = kfence_rbentry(cur); + if (!kpa->nr_objects) + goto next_meta; + + return kpa->meta; } static int show_object(struct seq_file *seq, void *v) { - struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; + struct kfence_metadata *meta = (struct kfence_metadata *)v; unsigned long flags; + char buf[20]; + + if (!meta) + return 0; + sprintf(buf, "node %d:\n", meta->kpa->node); + seq_puts(seq, buf); raw_spin_lock_irqsave(&meta->lock, flags); kfence_print_object(seq, meta); raw_spin_unlock_irqrestore(&meta->lock, flags); @@ -753,14 +1847,10 @@ static const struct seq_operations objects_sops = { }; DEFINE_SEQ_ATTRIBUTE(objects); -static int kfence_debugfs_init(void) +static int __init kfence_debugfs_init(void) { - struct dentry *kfence_dir; - - if (!READ_ONCE(kfence_enabled)) - return 0; + struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); - kfence_dir = debugfs_create_dir("kfence", NULL); debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); return 0; @@ -772,13 +1862,17 @@ late_initcall(kfence_debugfs_init); static void kfence_check_all_canary(void) { + struct kfence_pool_area *kpa; + struct rb_node *iter; int i; - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { - struct kfence_metadata *meta = &kfence_metadata[i]; + kfence_for_each_area(kpa, iter) { + for (i = 0; i < kpa->nr_objects; i++) { + struct kfence_metadata *meta = &kpa->meta[i]; - if (meta->state == KFENCE_OBJECT_ALLOCATED) - check_canary(meta); + if (meta->state == KFENCE_OBJECT_ALLOCATED) + check_canary(meta); + } } } @@ -840,36 +1934,79 @@ static void toggle_allocation_gate(struct work_struct *work) /* === Public interface ===================================================== */ -void __init kfence_alloc_pool_and_metadata(void) +int __init update_kfence_booting_max(void) { - if (!kfence_sample_interval) - return; + static bool done __initdata; + + unsigned long long parse_mem = PUD_SIZE; + unsigned long nr_pages, nr_obj_max; + char *cmdline; + int ret; /* - * If the pool has already been initialized by arch, there is no need to - * re-allocate the memory pool. + * We may reach here twice because some arch like aarch64 + * will call this function first. */ - if (!__kfence_pool) - __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); + if (done) + return 0; + done = true; - if (!__kfence_pool) { - pr_err("failed to allocate pool\n"); - return; - } + /* Boot cmdline is not set. Just leave. */ + cmdline = get_last_kfence_booting_max("kfence.booting_max="); + if (!cmdline) + return 0; + + ret = parse_kfence_booting_max(cmdline, memblock_phys_mem_size(), &parse_mem); + /* disable booting kfence on parsing fail. */ + if (ret) + goto nokfence; + + nr_pages = min_t(unsigned long, parse_mem, PUD_SIZE) / PAGE_SIZE; + /* We need at least 4 pages to enable KFENCE. */ + if (nr_pages < 4) + goto nokfence; - /* The memory allocated by memblock has been zeroed out. */ - kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); - if (!kfence_metadata_init) { - pr_err("failed to allocate metadata\n"); - memblock_free(__kfence_pool, KFENCE_POOL_SIZE); - __kfence_pool = NULL; + nr_obj_max = nr_pages / 2 - 1; + if (kfence_num_objects > nr_obj_max) { + kfence_num_objects = nr_obj_max; + return 1; } + + return 0; + +nokfence: + kfence_num_objects = 0; + return 1; } -static void kfence_init_enable(void) +/* Only run for the first time. */ +static bool kfence_setup_once(void) { - if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) - static_branch_enable(&kfence_allocation_key); + int i; + + /* + * freelist.node, freelist.cpu, counters are inited together, + * we only need to check one of them and know whether + * we are now in re-enabling. + */ + if (counters) + return true; + + freelist.node = kmalloc_array(nr_node_ids, sizeof(struct kfence_freelist_node), + GFP_KERNEL); + freelist.cpu = alloc_percpu(struct kfence_freelist_cpu); + counters = alloc_percpu(struct kfence_counter); + + if (!freelist.node || !freelist.cpu || !counters) + goto fail; + + for_each_node(i) { + INIT_LIST_HEAD(&freelist.node[i].freelist); + raw_spin_lock_init(&freelist.node[i].lock); + } + + for_each_possible_cpu(i) + INIT_LIST_HEAD(&per_cpu_ptr(freelist.cpu, i)->freelist); if (kfence_deferrable) INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); @@ -879,119 +2016,328 @@ static void kfence_init_enable(void) if (kfence_check_on_panic) atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); + return true; + +fail: + kfree(freelist.node); + freelist.node = NULL; + free_percpu(freelist.cpu); + freelist.cpu = NULL; + free_percpu(counters); + counters = NULL; + return false; +} + +static void start_kfence(void) +{ + unsigned long total_nr_objects = 0; + struct kfence_pool_area *kpa; + struct rb_node *iter; + + kfence_for_each_area(kpa, iter) { + pr_info("initialized - using %lu bytes for %lu objects on node %d at 0x%p-0x%p\n", + kpa->pool_size, kpa->nr_objects, kpa->node, (void *)kpa->addr, + (void *)(kpa->addr + kpa->pool_size)); + total_nr_objects += kpa->nr_objects; + } + + /* Update kfence_num_objects to export to /sys/module/ */ + if (total_nr_objects > KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = rounddown(total_nr_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects = total_nr_objects; + + /* Forget upstream mode. */ + if (kfence_num_objects_snap && total_nr_objects > kfence_num_objects_snap) { + kfence_num_objects_snap = 0; + kvfree(alloc_covered); + alloc_covered = NULL; + } + WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + static_branch_enable(&kfence_once_enabled); + static_branch_enable(&kfence_allocation_key); + if (kfence_sample_interval < 0) { + static_branch_enable(&kfence_short_canary); + static_branch_enable(&kfence_skip_interval); + } else { + static_branch_disable(&kfence_skip_interval); + queue_delayed_work(system_unbound_wq, &kfence_timer, 0); + } +} + +void __init kfence_alloc_pool_and_metadata(void) +{ + int node; + + /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ + if (!READ_ONCE(kfence_sample_interval)) + return; + + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA) { + /* + * Not allow both pool size < 1GiB and enabling node mode. + * Not allow both pool size < 1GiB and non-interval alloc. + */ + if (kfence_pool_node_mode || kfence_sample_interval < 0) + goto fail; + + /* + * Only limit upstream mode for online environment, + * as it makes no sense for limiting debug setup. + */ + update_kfence_booting_max(); + if (!kfence_num_objects) + goto fail; + } + + kfence_num_objects_stat = memblock_alloc(sizeof(struct kfence_alloc_node_cond) * + nr_node_ids, PAGE_SIZE); + if (!kfence_num_objects_stat) + goto fail; + + /* + * If pool size less than 1GiB, use the upstream mode; + * else, align pool size up to 1GiB, for tlb split and + * recover thought. + */ + if (kfence_num_objects >= KFENCE_MAX_OBJECTS_PER_AREA) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + else + kfence_num_objects_snap = kfence_num_objects; + + calculate_need_alloc(); + + for_each_node(node) { + if (kfence_nr_areas_per_node < kfence_num_objects_stat[node].need) + kfence_nr_areas_per_node = kfence_num_objects_stat[node].need; + } + kfence_nr_areas_per_node /= KFENCE_MAX_OBJECTS_PER_AREA; + if (!kfence_nr_areas_per_node) + kfence_nr_areas_per_node = 1; + + __kfence_pool_area = memblock_alloc(sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node, PAGE_SIZE); + if (!__kfence_pool_area) + goto fail; + + if (__kfence_pool_early_init) { + __kfence_pool_area[first_online_node] = __kfence_pool_early_init; + return; + } + + for_each_node(node) + kfence_alloc_pool_node(node); + + return; - pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, - CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, - (void *)(__kfence_pool + KFENCE_POOL_SIZE)); +fail: + if (kfence_num_objects_stat) { + memblock_free(kfence_num_objects_stat, + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } + WRITE_ONCE(kfence_sample_interval, 0); } void __init kfence_init(void) { + unsigned long nr_objects = min(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); + unsigned long kfence_pool_size = (nr_objects + 1) * 2 * PAGE_SIZE; + int node, area, index; + int *new_node_map; + stack_hash_seed = get_random_u32(); /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ - if (!kfence_sample_interval) + if (!READ_ONCE(kfence_sample_interval)) return; - if (!kfence_init_pool_early()) { - pr_err("%s failed\n", __func__); - return; + if (!kfence_setup_once()) + goto fail_alloc; + + if (kfence_num_objects_snap) { + alloc_covered_order = ilog2(kfence_num_objects_snap) + 2; + alloc_covered = kvmalloc_array(ALLOC_COVERED_SIZE, sizeof(atomic_t), + GFP_KERNEL | __GFP_ZERO); + if (!alloc_covered) + goto fail_alloc; + } + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail_coverd; + + if (!kfence_init_pool()) { + pr_err("%s failed on all nodes!\n", __func__); + goto fail_node_map; + } + + update_kfence_node_map(new_node_map); + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail_coverd: + kvfree(alloc_covered); + alloc_covered = NULL; +fail_alloc: + for_each_node(node) { + for (area = 0; area < kfence_nr_areas_per_node; area++) { + index = kfence_nr_areas_per_node * node + area; + if (__kfence_pool_area[index]) { + memblock_free_late(__pa(__kfence_pool_area[index]), + kfence_pool_size); + __kfence_pool_area[index] = NULL; + } + } } - kfence_init_enable(); +out: + memblock_free_late(__pa(__kfence_pool_area), sizeof(char *) * nr_node_ids * + kfence_nr_areas_per_node); + __kfence_pool_area = NULL; + memblock_free_late(__pa(kfence_num_objects_stat), + sizeof(struct kfence_alloc_node_cond) * nr_node_ids); + kfence_num_objects_stat = NULL; + } -static int kfence_init_late(void) +static DECLARE_WAIT_QUEUE_HEAD(kfence_refkill_wait); +static void kfence_kill_confirm(struct percpu_ref *ref) { - const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE; - const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE; - unsigned long addr = (unsigned long)__kfence_pool; - unsigned long free_size = KFENCE_POOL_SIZE; - int err = -ENOMEM; + if (!atomic_dec_return(&kfence_refkill_res)) + wake_up(&kfence_refkill_wait); +} -#ifdef CONFIG_CONTIG_ALLOC - struct page *pages; +static void kfence_enable_late(void) +{ + struct kfence_pool_area *kpa; + LIST_HEAD(ready_list); + struct rb_node *iter; + int *new_node_map; + int node; - pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, - NULL); - if (!pages) - return -ENOMEM; + if (!READ_ONCE(kfence_sample_interval)) + return; - __kfence_pool = page_to_virt(pages); - pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, - NULL); - if (pages) - kfence_metadata_init = page_to_virt(pages); -#else - if (nr_pages_pool > MAX_ORDER_NR_PAGES || - nr_pages_meta > MAX_ORDER_NR_PAGES) { - pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); - return -EINVAL; - } + /* + * If kfence pool is initialized later, the early init kfence pool has + * been released, reset the pointer here to avoid re-initialization if + * split_linear_mapping disabled. + */ + __kfence_pool_early_init = NULL; - __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); - if (!__kfence_pool) - return -ENOMEM; + mutex_lock(&kfence_mutex); - kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); -#endif + if (READ_ONCE(kfence_enabled)) + goto out; - if (!kfence_metadata_init) - goto free_pool; + /* + * Keep upstream mode remaining the same. + * Otherwise we "forget" the upstream version whose pool size < 1GiB. + */ + if (kfence_num_objects > kfence_num_objects_snap || kfence_pool_node_mode) + kfence_num_objects = roundup(kfence_num_objects, KFENCE_MAX_OBJECTS_PER_AREA); - memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE); - addr = kfence_init_pool(); - if (!addr) { - kfence_init_enable(); - kfence_debugfs_init(); - return 0; + if (kfence_num_objects < KFENCE_MAX_OBJECTS_PER_AREA && kfence_sample_interval < 0) + goto fail; + + if (!kfence_setup_once()) + goto fail; + + /* pre-alloc here for update_kfence_node_map() to avoid complex error handling later. */ + new_node_map = kmalloc_array(nr_node_ids, sizeof(int), GFP_KERNEL | __GFP_ZERO); + if (!new_node_map) + goto fail; + + kfence_num_objects_stat = kmalloc_array(nr_node_ids, sizeof(struct kfence_alloc_node_cond), + GFP_KERNEL | __GFP_ZERO); + if (!kfence_num_objects_stat) + goto fail_node_map; + + calculate_need_alloc(); + + kfence_for_each_area(kpa, iter) { + if (kpa->nr_objects >= KFENCE_MAX_OBJECTS_PER_AREA || kfence_num_objects_snap) + kfence_num_objects_stat[kpa->node].allocated += kpa->nr_objects; } - pr_err("%s failed\n", __func__); - free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); - err = -EBUSY; + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, false); -#ifdef CONFIG_CONTIG_ALLOC - free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)), - nr_pages_meta); -free_pool: - free_contig_range(page_to_pfn(virt_to_page((void *)addr)), - free_size / PAGE_SIZE); -#else - free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE); -free_pool: - free_pages_exact((void *)addr, free_size); -#endif + /* + * Try to alloc again if there exists some nodes we fail to alloc on. + * These nodes may have no enough contig memory, so fallback to find on + * other nodes. + */ + for_each_node(node) + kfence_alloc_pool_late_node(node, &ready_list, true); + + update_kfence_node_map(new_node_map); + kfree(kfence_num_objects_stat); + kfence_num_objects_stat = NULL; + + stop_machine(kfence_update_pool_root, &ready_list, NULL); - kfence_metadata_init = NULL; - __kfence_pool = NULL; - return err; + if (RB_EMPTY_ROOT(&kfence_pool_root)) + goto fail; + + start_kfence(); + goto out; + +fail_node_map: + kfree(new_node_map); +fail: + WRITE_ONCE(kfence_sample_interval, 0); +out: + mutex_unlock(&kfence_mutex); } -static int kfence_enable_late(void) +void kfence_disable(void) { - if (!__kfence_pool) - return kfence_init_late(); + struct kfence_pool_area *kpa; + struct rb_node *iter; - WRITE_ONCE(kfence_enabled, true); - queue_delayed_work(system_unbound_wq, &kfence_timer, 0); - pr_info("re-enabled\n"); - return 0; + mutex_lock(&kfence_mutex); + + if (!xchg(&kfence_enabled, false)) + goto out_unlock; + + synchronize_rcu(); + + atomic_set(&kfence_allocation_gate, 1); +#ifdef CONFIG_KFENCE_STATIC_KEYS + wake_up(&allocation_wait); +#endif + static_branch_disable(&kfence_allocation_key); + + atomic_set(&kfence_refkill_res, 0); + kfence_for_each_area(kpa, iter) { + atomic_inc(&kfence_refkill_res); + percpu_ref_kill_and_confirm(&kpa->refcnt, kfence_kill_confirm); + } + + /* + * We must wait here until all percpu_ref being killed. + * After all tasks finished, then release the mutex lock. + */ + wait_event_idle(kfence_refkill_wait, !atomic_read(&kfence_refkill_res)); + +out_unlock: + mutex_unlock(&kfence_mutex); } -void kfence_shutdown_cache(struct kmem_cache *s) +static void kfence_shutdown_cache_area(struct kmem_cache *s, struct kfence_pool_area *kpa) { unsigned long flags; - struct kfence_metadata *meta; + struct kfence_metadata *meta, *kfence_metadata = kpa->meta; int i; - /* Pairs with release in kfence_init_pool(). */ - if (!smp_load_acquire(&kfence_metadata)) - return; - - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { bool in_use; meta = &kfence_metadata[i]; @@ -1030,7 +2376,7 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } - for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + for (i = 0; i < kpa->nr_objects; i++) { meta = &kfence_metadata[i]; /* See above. */ @@ -1044,7 +2390,19 @@ void kfence_shutdown_cache(struct kmem_cache *s) } } -void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) +void kfence_shutdown_cache(struct kmem_cache *s) +{ + struct kfence_pool_area *kpa; + struct rb_node *iter; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return; + + kfence_for_each_area(kpa, iter) + kfence_shutdown_cache_area(s, kpa); +} + +void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags, int node) { unsigned long stack_entries[KFENCE_STACK_DEPTH]; size_t num_stack_entries; @@ -1055,7 +2413,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) * we don't disable KFENCE without making an allocation. */ if (size > PAGE_SIZE) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1066,7 +2424,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) */ if ((flags & GFP_ZONEMASK) || (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; return NULL; } @@ -1077,6 +2435,9 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) if (s->flags & SLAB_SKIP_KFENCE) return NULL; + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + if (atomic_inc_return(&kfence_allocation_gate) > 1) return NULL; #ifdef CONFIG_KFENCE_STATIC_KEYS @@ -1093,28 +2454,94 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) } #endif +alloc: if (!READ_ONCE(kfence_enabled)) return NULL; num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } + } + + if (node == NUMA_NO_NODE) + node = numa_node_id(); + + return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, + alloc_stack_hash, node); +} + +#define GFP_KFENCE_NOT_ALLOC ((GFP_ZONEMASK & ~__GFP_HIGHMEM) | __GFP_NOKFENCE | __GFP_THISNODE) +struct page *__kfence_alloc_page(int node, gfp_t flags) +{ + unsigned long stack_entries[KFENCE_STACK_DEPTH]; + size_t num_stack_entries; + u32 alloc_stack_hash; + + if (!static_branch_likely(&kfence_order0_page)) + return NULL; + + if ((flags & GFP_KFENCE_NOT_ALLOC) || (flags & GFP_USER) == GFP_USER) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_INCOMPAT]++; + return NULL; + } + + if (static_branch_likely(&kfence_skip_interval)) + goto alloc; + + if (atomic_inc_return(&kfence_allocation_gate) > 1) + return NULL; +#ifdef CONFIG_KFENCE_STATIC_KEYS /* - * Do expensive check for coverage of allocation in slow-path after - * allocation_gate has already become non-zero, even though it might - * mean not making any allocation within a given sample interval. - * - * This ensures reasonable allocation coverage when the pool is almost - * full, including avoiding long-lived allocations of the same source - * filling up the pool (e.g. pagecache allocations). + * waitqueue_active() is fully ordered after the update of + * kfence_allocation_gate per atomic_inc_return(). */ - alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); - if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { - atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); + if (waitqueue_active(&allocation_wait)) { + /* + * Calling wake_up() here may deadlock when allocations happen + * from within timer code. Use an irq_work to defer it. + */ + irq_work_queue(&wake_up_kfence_timer_work); + } +#endif + +alloc: + if (!READ_ONCE(kfence_enabled)) return NULL; + + num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); + + if (!static_branch_likely(&kfence_skip_interval)) { + /* + * Do expensive check for coverage of allocation in slow-path after + * allocation_gate has already become non-zero, even though it might + * mean not making any allocation within a given sample interval. + * + * This ensures reasonable allocation coverage when the pool is almost + * full, including avoiding long-lived allocations of the same source + * filling up the pool (e.g. pagecache allocations). + */ + alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); + if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_SKIP_COVERED]++; + return NULL; + } } - return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, - alloc_stack_hash); + return kfence_guarded_alloc_page(node, stack_entries, num_stack_entries, alloc_stack_hash); } size_t kfence_ksize(const void *addr) @@ -1130,7 +2557,12 @@ size_t kfence_ksize(const void *addr) void *kfence_object_start(const void *addr) { - const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + struct kfence_metadata *meta; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return NULL; + + meta = addr_to_metadata((unsigned long)addr); /* * Read locklessly -- if there is a race with __kfence_alloc(), this is @@ -1158,20 +2590,34 @@ void __kfence_free(void *addr) kfence_guarded_free(addr, meta, false); } +void __kfence_free_page(struct page *page, void *addr) +{ + struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); + + kfence_guarded_free_page(page, addr, meta); +} + bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) { - const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; struct kfence_metadata *to_report = NULL; enum kfence_error_type error_type; + struct kfence_pool_area *kpa; unsigned long flags; + int page_index; + + if (!static_branch_unlikely(&kfence_once_enabled)) + return false; - if (!is_kfence_address((void *)addr)) + kpa = get_kfence_pool_area((void *)addr); + if (!kpa) return false; if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ return kfence_unprotect(addr); /* ... unprotect and proceed. */ - atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); + raw_cpu_ptr(counters)->counter[KFENCE_COUNTER_BUGS]++; + + page_index = (addr - (unsigned long)kpa->addr) / PAGE_SIZE; if (page_index % 2) { /* This is a redzone, report a buffer overflow. */ diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index f46fbb03062b9c050e1a3fb698743b5b4d46b912..071aec5feb96aff8703cea7fa77dc153d8afcdf5 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -100,46 +100,83 @@ struct kfence_metadata { #ifdef CONFIG_MEMCG struct obj_cgroup *objcg; #endif + struct kfence_pool_area *kpa; }; -#define KFENCE_METADATA_SIZE PAGE_ALIGN(sizeof(struct kfence_metadata) * \ - CONFIG_KFENCE_NUM_OBJECTS) +extern bool kfence_panic_on_fault; +DECLARE_STATIC_KEY_FALSE(kfence_short_canary); -extern struct kfence_metadata *kfence_metadata; +/* KFENCE error types for report generation. */ +enum kfence_error_type { + KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ + KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ + KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ + KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ + KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ +}; + +void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, + const struct kfence_metadata *meta, enum kfence_error_type type); + +void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); +void kfence_disable(void); +extern void __free_pages_core(struct page *page, unsigned int order); + +extern struct rb_root kfence_pool_root; +#define kfence_rbentry(cur) rb_entry((cur), struct kfence_pool_area, rb_node) +#define kfence_for_each_area(kpa, iter) \ + for ((iter) = rb_first(&kfence_pool_root); \ + (iter) && ((kpa) = kfence_rbentry((iter)));\ + (iter) = rb_next((iter))) + +/** + * get_kfence_pool_area() - find the kfence pool area of the address + * @addr: address to check + * + * Return: the kfence pool area, NULL if not a kfence address + */ +static inline struct kfence_pool_area *get_kfence_pool_area(const void *addr) +{ + struct rb_node *cur; + struct kfence_pool_area *res = NULL; + + for (cur = kfence_pool_root.rb_node; cur;) { + struct kfence_pool_area *kpa = kfence_rbentry(cur); + + if ((unsigned long)addr < (unsigned long)kpa->addr) + cur = cur->rb_left; + else { + res = kpa; + cur = cur->rb_right; + } + } + + return is_kfence_address_area(addr, res) ? res : NULL; +} static inline struct kfence_metadata *addr_to_metadata(unsigned long addr) { long index; + struct kfence_metadata *kfence_metadata; + struct kfence_pool_area *kpa = get_kfence_pool_area((void *)addr); /* The checks do not affect performance; only called from slow-paths. */ - if (!is_kfence_address((void *)addr)) + if (!kpa) return NULL; + kfence_metadata = kpa->meta; + /* * May be an invalid index if called with an address at the edge of * __kfence_pool, in which case we would report an "invalid access" * error. */ - index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1; - if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS) + index = (addr - (unsigned long)kpa->addr) / (PAGE_SIZE * 2) - 1; + if (index < 0 || index >= kpa->nr_objects) return NULL; return &kfence_metadata[index]; } -/* KFENCE error types for report generation. */ -enum kfence_error_type { - KFENCE_ERROR_OOB, /* Detected a out-of-bounds access. */ - KFENCE_ERROR_UAF, /* Detected a use-after-free access. */ - KFENCE_ERROR_CORRUPTION, /* Detected a memory corruption on free. */ - KFENCE_ERROR_INVALID, /* Invalid access of unknown type. */ - KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ -}; - -void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs, - const struct kfence_metadata *meta, enum kfence_error_type type); - -void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); - #endif /* MM_KFENCE_KFENCE_H */ diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c index 95b2b84c296d08d3b3a989f1da1df1e55fa951fe..6e26b9b7ed4a71a8349675befab767c1098b2bbd 100644 --- a/mm/kfence/kfence_test.c +++ b/mm/kfence/kfence_test.c @@ -226,6 +226,8 @@ static __always_inline void test_free(void *ptr) kfree(ptr); } +#define test_free_page(addr) free_page((unsigned long)addr) + /* * If this should be a KFENCE allocation, and on which side the allocation and * the closest guard page should be. @@ -243,6 +245,7 @@ enum allocation_policy { */ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy) { + long _kfence_sample_interval = kfence_sample_interval; void *alloc; unsigned long timeout, resched_after; const char *policy_name; @@ -269,13 +272,15 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); /* * Especially for non-preemption kernels, ensure the allocation-gate * timer can catch up: after @resched_after, every failed allocation * attempt yields, to ensure the allocation-gate timer is scheduled. */ - resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval); + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); do { if (test_cache) alloc = kmem_cache_alloc(test_cache, gfp); @@ -305,6 +310,9 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat } else if (policy == ALLOCATE_NONE) return alloc; + if (kfence_sample_interval < 0 && policy == ALLOCATE_NONE) + return alloc; + test_free(alloc); if (time_after(jiffies, resched_after)) @@ -315,6 +323,50 @@ static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocat return NULL; /* Unreachable. */ } +static struct page *test_alloc_page(struct kunit *test, bool is_vmalloc) +{ + long _kfence_sample_interval = kfence_sample_interval; + struct page *alloc; + void *addr; + unsigned long timeout, resched_after; + + kunit_info(test, "%s: size=%zu vmalloc=%d\n", __func__, PAGE_SIZE, is_vmalloc); + + /* + * 100x the sample interval should be more than enough to ensure we get + * a KFENCE allocation eventually. + */ + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); + /* + * Especially for non-preemption kernels, ensure the allocation-gate + * timer can catch up: after @resched_after, every failed allocation + * attempt yields, to ensure the allocation-gate timer is scheduled. + */ + resched_after = jiffies + msecs_to_jiffies(_kfence_sample_interval); + do { + if (is_vmalloc) { + addr = vmalloc(PAGE_SIZE); + alloc = vmalloc_to_page(addr); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + vfree(addr); + } else { + alloc = alloc_page(GFP_KERNEL); + if (is_kfence_address(page_to_virt(alloc))) + return alloc; + __free_page(alloc); + } + + if (time_after(jiffies, resched_after)) + cond_resched(); + } while (time_before(jiffies, timeout)); + + KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate page from KFENCE"); + return NULL; /* Unreachable. */ +} + static void test_out_of_bounds_read(struct kunit *test) { size_t size = 32; @@ -349,6 +401,33 @@ static void test_out_of_bounds_read(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_read_page, + .is_write = false, + }; + char *buf; + struct page *page; + + /* Test both sides. */ + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf + PAGE_SIZE; + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_out_of_bounds_write(struct kunit *test) { size_t size = 32; @@ -367,6 +446,24 @@ static void test_out_of_bounds_write(struct kunit *test) test_free(buf); } +static void test_out_of_bounds_write_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_OOB, + .fn = test_out_of_bounds_write_page, + .is_write = true, + }; + char *buf; + struct page *page; + + page = test_alloc_page(test, false); + buf = page_address(page); + expect.addr = buf - 1; + WRITE_ONCE(*expect.addr, 42); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); + test_free_page(buf); +} + static void test_use_after_free_read(struct kunit *test) { const size_t size = 32; @@ -383,6 +480,22 @@ static void test_use_after_free_read(struct kunit *test) KUNIT_EXPECT_TRUE(test, report_matches(&expect)); } +static void test_use_after_free_read_page(struct kunit *test) +{ + struct expect_report expect = { + .type = KFENCE_ERROR_UAF, + .fn = test_use_after_free_read_page, + .is_write = false, + }; + struct page *page; + + page = test_alloc_page(test, false); + expect.addr = page_address(page); + test_free_page(expect.addr); + READ_ONCE(*expect.addr); + KUNIT_EXPECT_TRUE(test, report_matches(&expect)); +} + static void test_double_free(struct kunit *test) { const size_t size = 32; @@ -609,7 +722,7 @@ static void test_gfpzero(struct kunit *test) int i; /* Skip if we think it'd take too long. */ - KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100); + KFENCE_TEST_REQUIRES(test, kfence_sample_interval <= 100 && kfence_num_objects <= 255); setup_test_cache(test, size, 0, NULL); buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY); @@ -624,7 +737,7 @@ static void test_gfpzero(struct kunit *test) break; test_free(buf2); - if (kthread_should_stop() || (i == CONFIG_KFENCE_NUM_OBJECTS)) { + if (kthread_should_stop() || (i == kfence_num_objects)) { kunit_warn(test, "giving up ... cannot get same object back\n"); return; } @@ -641,12 +754,19 @@ static void test_gfpzero(struct kunit *test) static void test_invalid_access(struct kunit *test) { - const struct expect_report expect = { + struct expect_report expect = { .type = KFENCE_ERROR_INVALID, .fn = test_invalid_access, - .addr = &__kfence_pool[10], .is_write = false, }; + struct rb_node *cur = kfence_pool_root.rb_node; + char *__kfence_pool; + + if (!cur) + return; + + __kfence_pool = kfence_rbentry(cur)->addr; + expect.addr = &__kfence_pool[10]; READ_ONCE(__kfence_pool[10]); KUNIT_EXPECT_TRUE(test, report_matches(&expect)); @@ -731,6 +851,7 @@ static void test_krealloc(struct kunit *test) /* Test that some objects from a bulk allocation belong to KFENCE pool. */ static void test_memcache_alloc_bulk(struct kunit *test) { + long _kfence_sample_interval = kfence_sample_interval; const size_t size = 32; bool pass = false; unsigned long timeout; @@ -741,7 +862,9 @@ static void test_memcache_alloc_bulk(struct kunit *test) * 100x the sample interval should be more than enough to ensure we get * a KFENCE allocation eventually. */ - timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval); + if (kfence_sample_interval < 0) + _kfence_sample_interval = 100; + timeout = jiffies + msecs_to_jiffies(100 * _kfence_sample_interval); do { void *objects[100]; int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects), @@ -767,6 +890,37 @@ static void test_memcache_alloc_bulk(struct kunit *test) KUNIT_EXPECT_FALSE(test, report_available()); } +static void test_kernel_stack(struct kunit *test) +{ + unsigned long vaddr = (unsigned long)current->stack; + struct page *page; + int i; + + KFENCE_TEST_REQUIRES(test, IS_ENABLED(CONFIG_VMAP_STACK) && kfence_sample_interval < 0); + + for (i = 0 ; i < 1<kpa->meta; const int size = abs(meta->size); const unsigned long start = meta->addr; const struct kmem_cache *const cache = meta->cache; + struct page *page = virt_to_page((void *)start); lockdep_assert_held(&meta->lock); @@ -141,7 +143,8 @@ void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *met seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n", meta - kfence_metadata, (void *)start, (void *)(start + size - 1), - size, (cache && cache->name) ? cache->name : ""); + size, (cache && cache->name) ? cache->name : PageSlab(page) ? + "" : "PAGE"); kfence_print_stack(seq, meta, true); @@ -163,7 +166,11 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, /* Do not show contents of object nor read into following guard page. */ end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr) - : min(show_until_addr, PAGE_ALIGN(address))); + : static_branch_likely(&kfence_short_canary) ? + min(show_until_addr, + ALIGN(meta->addr + meta->size + 1, + L1_CACHE_BYTES)) : + min(show_until_addr, PAGE_ALIGN(address))); pr_cont("["); for (cur = (const u8 *)address; cur < end; cur++) { @@ -186,7 +193,7 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r const struct kfence_metadata *meta, enum kfence_error_type type) { unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; - const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; + ptrdiff_t object_index = -1; int num_stack_entries; int skipnr = 0; @@ -201,8 +208,11 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) return; - if (meta) + if (meta) { lockdep_assert_held(&meta->lock); + object_index = meta - meta->kpa->meta; + } + /* * Because we may generate reports in printk-unfriendly parts of the * kernel, such as scheduler code, the use of printk() could deadlock. @@ -272,7 +282,8 @@ void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *r lockdep_on(); - check_panic_on_warn("KFENCE"); + if (kfence_panic_on_fault) + panic("kfence.fault=panic set ...\n"); /* We encountered a memory safety error, taint the kernel! */ add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8a881ab21f6cbd97c6464c30a3496936c7cd367a..bff53fe9041bca2efc3189e04b22cea249b11e50 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -64,6 +64,7 @@ #include #include #include +#include #include "internal.h" #include #include @@ -371,7 +372,12 @@ struct cgroup_subsys_state *mem_cgroup_css_from_folio(struct folio *folio) { struct mem_cgroup *memcg = folio_memcg(folio); +#ifdef CONFIG_CGROUP_WRITEBACK + if (!memcg || + (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgwb_v1)) +#else if (!memcg || !cgroup_subsys_on_dfl(memory_cgrp_subsys)) +#endif memcg = root_mem_cgroup; return &memcg->css; @@ -7367,6 +7373,18 @@ static int __init cgroup_memory(char *s) } __setup("cgroup.memory=", cgroup_memory); +#ifdef CONFIG_CGROUP_WRITEBACK +bool cgwb_v1; + +static int __init enable_cgroup_writeback_v1(char *s) +{ + cgwb_v1 = true; + + return 0; +} +__setup("cgwb_v1", enable_cgroup_writeback_v1); +#endif + /* * subsys_initcall() for memory controller. * @@ -7911,4 +7929,112 @@ static int __init mem_cgroup_swap_init(void) } subsys_initcall(mem_cgroup_swap_init); +#endif /* CONFIG_MEMCG_SWAP */ + +#ifdef CONFIG_RICH_CONTAINER +static inline struct mem_cgroup *css_memcg(struct cgroup_subsys_state *css) +{ + return css ? container_of(css, struct mem_cgroup, css) : NULL; +} + +/* with rcu lock held */ +struct mem_cgroup *rich_container_get_memcg(void) +{ + struct cgroup_subsys_state *css; + struct mem_cgroup *memcg_src; + + if (sysctl_rich_container_source == 1) + css = NULL; + else + css = task_css(current, memory_cgrp_id); + + if (css) { + memcg_src = css_memcg(css); + } else { + read_lock(&tasklist_lock); + memcg_src = mem_cgroup_from_task(task_active_pid_ns(current)->child_reaper); + read_unlock(&tasklist_lock); + } + + if (css_tryget(&memcg_src->css)) + return memcg_src; + else + return NULL; +} + +void memcg_meminfo(struct mem_cgroup *memcg, + struct sysinfo *info, struct sysinfo_ext *ext) +{ + struct mem_cgroup *iter; + unsigned long limit, memsw_limit, usage, totalram_pages_tmp; + unsigned long pagecache, memcg_wmark, swap_size; + int i; + + ext->cached = memcg_page_state(memcg, NR_FILE_PAGES); + ext->file_dirty = memcg_page_state(memcg, NR_FILE_DIRTY); + ext->writeback = memcg_page_state(memcg, NR_WRITEBACK); + ext->anon_mapped = memcg_page_state(memcg, NR_ANON_MAPPED); + ext->file_mapped = memcg_page_state(memcg, NR_FILE_MAPPED); + ext->slab_reclaimable = memcg_page_state(memcg, NR_SLAB_RECLAIMABLE_B); + ext->slab_unreclaimable = + memcg_page_state(memcg, NR_SLAB_UNRECLAIMABLE_B); + ext->kernel_stack_kb = memcg_page_state(memcg, NR_KERNEL_STACK_KB); + ext->writeback_temp = 0; +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + ext->anon_thps = memcg_page_state(memcg, NR_ANON_THPS); +#endif + ext->shmem_thps = 0; + ext->shmem_pmd_mapped = 0; + + swap_size = memcg_page_state(memcg, MEMCG_SWAP); + limit = memsw_limit = PAGE_COUNTER_MAX; + for (iter = memcg; iter; iter = parent_mem_cgroup(iter)) { + limit = min(limit, iter->memory.max); + memsw_limit = min(memsw_limit, iter->memsw.max); + } + usage = mem_cgroup_usage(memcg, false); + totalram_pages_tmp = totalram_pages(); + info->totalram = limit > totalram_pages_tmp ? totalram_pages_tmp : limit; + info->sharedram = memcg_page_state(memcg, NR_SHMEM); + info->freeram = info->totalram - usage; + /* these are not accounted by memcg yet */ + /* if give bufferram the global value, free may show a quite + * large number in the ±buffers/caches row, the reason is + * it's equal to group_used - global_buffer - group_cached, + * if global_buffer > group_used, we get a rewind large value. + */ + info->bufferram = 0; + info->totalhigh = totalhigh_pages(); + info->freehigh = nr_free_highpages(); + info->mem_unit = PAGE_SIZE; + + /* fill in swinfo */ + si_swapinfo(info); + if (memsw_limit < info->totalswap) + info->totalswap = memsw_limit; + info->freeswap = info->totalswap - swap_size; + + for (i = 0; i < NR_LRU_LISTS; i++) + ext->lrupages[i] = memcg_page_state(memcg, NR_LRU_BASE + i); + + /* Like what si_mem_available() does */ + + // TODO: memcg_wmark depends on background async page reclaim, waiting + // for it. + + //memcg_wmark = memcg->memory.wmark_high; + //if (memcg->wmark_ratio && info->totalram > memcg_wmark) + // memcg_wmark = info->totalram - memcg_wmark; + //else + // memcg_wmark = 0; + memcg_wmark = 0; + + pagecache = ext->lrupages[LRU_ACTIVE_FILE] + + ext->lrupages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, memcg_wmark); + ext->available = info->freeram + pagecache; + ext->available += ext->slab_reclaimable - + min(ext->slab_reclaimable / 2, memcg_wmark); +} + #endif /* CONFIG_SWAP */ diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 4d6e43c88489a0ef1755656a7512577902374496..4ba8348e20e49c2b9a75c93676300eaf9037fca0 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -60,6 +60,7 @@ #include #include #include +#include #include "swap.h" #include "internal.h" #include "ras/ras_event.h" @@ -2438,10 +2439,17 @@ static void memory_failure_work_func(struct work_struct *work) spin_unlock_irqrestore(&mf_cpu->lock, proc_flags); if (!gotten) break; - if (entry.flags & MF_SOFT_OFFLINE) + if (entry.flags & MF_SOFT_OFFLINE) { soft_offline_page(entry.pfn, entry.flags); - else - memory_failure(entry.pfn, entry.flags); + } else if (!memory_failure(entry.pfn, entry.flags)) { + /* + * If the pfn reported by ghes can not be recovered, set + * the corresponding page table of linear mapping range + * to be non-present, which avoids the speculative + * access of corrupted memory. + */ + set_memory_np((unsigned long)page_to_virt(pfn_to_page(entry.pfn)), 1); + } } } diff --git a/mm/memory.c b/mm/memory.c index 517221f013035345f611a2a595dd31f0fbbe53b1..ffd60a4e88611af201225482bdaf6e440cdea317 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1770,6 +1770,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, tlb_finish_mmu(&tlb); hugetlb_zap_end(vma, details); } +EXPORT_SYMBOL_GPL(zap_page_range_single); /** * zap_vma_ptes - remove ptes mapping the vma @@ -2558,6 +2559,176 @@ int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long } EXPORT_SYMBOL(vm_iomap_memory); +#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL +static int insert_page_into_pte_locked_mkspecial(struct mm_struct *mm, pte_t *pte, + unsigned long addr, struct page *page, pgprot_t prot) +{ + /* + * The page to be inserted should be either anonymous page or file page. + * + * In general, the anonymous page used in dio should be pinned, while + * the file page used in buffer IO is either locked (read) or writeback + * (sync). On the other hand, file page used in IO metadata read (e.g., + * ext4_get_inode_loc) can be unlocked, and the buffer_head is locked + * instead. + * + * Finally, it is the caller's responsibility to ensure the validity of + * pages to be inserted, i.e., such pages are used for IO requests. + */ + if (!PageAnon(page) && !page_is_file_lru(page)) + return -EINVAL; + + flush_dcache_page(page); + + if (!pte_none(*pte)) + return -EBUSY; + set_pte_at(mm, addr, pte, pte_mkspecial(mk_pte(page, prot))); + return 0; +} + +static int insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page *page, pgprot_t prot) +{ + struct mm_struct *mm = vma->vm_mm; + int retval; + pte_t *pte; + spinlock_t *ptl; + + retval = -ENOMEM; + pte = get_locked_pte(mm, addr, &ptl); + if (!pte) + goto out; + retval = insert_page_into_pte_locked_mkspecial(mm, pte, addr, page, prot); + pte_unmap_unlock(pte, ptl); +out: + return retval; +} + +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + if (addr < vma->vm_start || addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_page_mkspecial(vma, addr, page, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); + +#ifdef pte_index +/* + * insert_pages_mkspecial() amortizes the cost of spinlock operations + * when inserting pages in a loop. Arch *must* define pte_index. + */ +static int insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num, pgprot_t prot) +{ + pmd_t *pmd = NULL; + pte_t *start_pte, *pte; + spinlock_t *pte_lock; + struct mm_struct *const mm = vma->vm_mm; + unsigned long curr_page_idx = 0; + unsigned long remaining_pages_total = *num; + unsigned long pages_to_write_in_pmd; + int ret; +more: + ret = -EFAULT; + pmd = walk_to_pmd(mm, addr); + if (!pmd) + goto out; + + pages_to_write_in_pmd = min_t(unsigned long, + remaining_pages_total, PTRS_PER_PTE - pte_index(addr)); + + /* Allocate the PTE if necessary; takes PMD lock once only. */ + ret = -ENOMEM; + if (pte_alloc(mm, pmd)) + goto out; + + while (pages_to_write_in_pmd) { + int pte_idx = 0; + const int batch_size = min_t(int, pages_to_write_in_pmd, 8); + + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { + int err = insert_page_into_pte_locked_mkspecial(mm, pte, + addr, pages[curr_page_idx], prot); + if (unlikely(err)) { + pte_unmap_unlock(start_pte, pte_lock); + ret = err; + remaining_pages_total -= pte_idx; + goto out; + } + addr += PAGE_SIZE; + ++curr_page_idx; + } + pte_unmap_unlock(start_pte, pte_lock); + pages_to_write_in_pmd -= batch_size; + remaining_pages_total -= batch_size; + } + if (remaining_pages_total) + goto more; + ret = 0; +out: + *num = remaining_pages_total; + return ret; +} +#endif /* pte_index */ + +/* + * vm_insert_pages_mkspecial - variant of vm_insert_pages using insert_pfn. + * + * The main purpose of vm_insert_pages_mkspecial is to combine the advantages of + * vm_insert_pages (batching the pmd lock) and remap_pfn_range_notrack (skipping + * track_pfn_insert). + * + * The caller should ensure the isolation (refcounted, PG_locked, PG_writeback, etc.) + * of @pages, and account for error case where a subset of @pages are mapped. + */ +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ +#ifdef pte_index + const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1; + + if (addr < vma->vm_start || end_addr >= vma->vm_end) + return -EFAULT; + if (!(vma->vm_flags & VM_MIXEDMAP)) { + BUG_ON(mmap_read_trylock(vma->vm_mm)); + BUG_ON(vma->vm_flags & VM_PFNMAP); + vm_flags_set(vma, VM_MIXEDMAP); + } + return insert_pages_mkspecial(vma, addr, pages, num, vma->vm_page_prot); +#else + unsigned long idx = 0, pgcount = *num; + int err = -EINVAL; + + for (; idx < pgcount; ++idx) { + err = vm_insert_page_mkspecial(vma, addr + (PAGE_SIZE * idx), pages[idx]); + if (err) + break; + } + *num = pgcount - idx; + return err; +#endif /* pte_index */ +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#else +int vm_insert_page_mkspecial(struct vm_area_struct *vma, unsigned long addr, struct page *page) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_page_mkspecial); +int vm_insert_pages_mkspecial(struct vm_area_struct *vma, unsigned long addr, + struct page **pages, unsigned long *num) +{ + return -EINVAL; +} +EXPORT_SYMBOL(vm_insert_pages_mkspecial); +#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data, bool create, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 85741403948f55261f953deeb7e30de716533caf..cfc8105c41cb7a4f1b3c202c405c632ac95fe805 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -52,6 +52,7 @@ #include #include #include +#include #include #include "internal.h" #include "shuffle.h" @@ -955,6 +956,12 @@ static inline bool free_page_is_bad(struct page *page) if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE))) return false; +#ifdef CONFIG_KFENCE + /* It's not performance sensitive when reaching here */ + if (PageKfence(page)) + return false; +#endif + /* Something has gone sideways, find it */ free_page_is_bad_report(page); return true; @@ -1132,7 +1139,7 @@ static __always_inline bool free_pages_prepare(struct page *page, } page_cpupid_reset_last(page); - page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP; + page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_KFENCE; reset_page_owner(page, order); page_table_check_free(page, order); @@ -1266,6 +1273,9 @@ static void __free_pages_ok(struct page *page, unsigned int order, if (!free_pages_prepare(page, order, fpi_flags)) return; + if (unlikely(!order && kfence_free_page(page))) + return; + /* * Calling get_pfnblock_migratetype() without spin_lock_irqsave() here * is used to avoid calling get_pfnblock_migratetype() under the lock. @@ -2373,6 +2383,10 @@ static void free_unref_page_commit(struct zone *zone, struct per_cpu_pages *pcp, bool free_high; __count_vm_events(PGFREE, 1 << order); + + if (unlikely(!order && kfence_free_page(page))) + return; + pindex = order_to_pindex(migratetype, order); list_add(&page->pcp_list, &pcp->lists[pindex]); pcp->count += 1 << order; @@ -2774,10 +2788,20 @@ struct page *rmqueue(struct zone *preferred_zone, WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); if (likely(pcp_allowed_order(order))) { +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + order != pageblock_order) { + page = rmqueue_pcplist(preferred_zone, zone, order, + migratetype, alloc_flags); + if (likely(page)) + goto out; + } +#else page = rmqueue_pcplist(preferred_zone, zone, order, migratetype, alloc_flags); if (likely(page)) goto out; +#endif } page = rmqueue_buddy(preferred_zone, zone, order, alloc_flags, @@ -4338,7 +4362,9 @@ unsigned long __alloc_pages_bulk(gfp_t gfp, int preferred_nid, continue; } - page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, + page = kfence_alloc_page(0, preferred_nid, gfp); + if (likely(!page)) + page = __rmqueue_pcplist(zone, 0, ac.migratetype, alloc_flags, pcp, pcp_list); if (unlikely(!page)) { /* Try and allocate at least one page */ @@ -4422,6 +4448,12 @@ struct page *__alloc_pages(gfp_t gfp, unsigned int order, int preferred_nid, */ alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp); + page = kfence_alloc_page(order, preferred_nid, alloc_gfp); + if (unlikely(page)) { + prep_new_page(page, 0, alloc_gfp, alloc_flags); + goto out; + } + /* First allocation attempt */ page = get_page_from_freelist(alloc_gfp, order, alloc_flags, &ac); if (likely(page)) diff --git a/mm/readahead.c b/mm/readahead.c index 6925e6959fd3fff76cdca6b01c348f3daade9c02..dd24dd555d09a59d385e4ac03038c8f495348d68 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -131,6 +131,9 @@ #include "internal.h" +/* enable context readahead default */ +int sysctl_enable_context_readahead = 1; + /* * Initialise a struct file's readahead state. Assumes that the caller has * memset *ra to zero. @@ -630,9 +633,11 @@ static void ondemand_readahead(struct readahead_control *ractl, * Query the page cache and look for the traces(cached history pages) * that a sequential stream would leave behind. */ - if (try_context_readahead(ractl->mapping, ra, index, req_size, - max_pages)) + if (sysctl_enable_context_readahead && + try_context_readahead(ractl->mapping, ra, index, req_size, + max_pages)) { goto readit; + } /* * standalone, small random read diff --git a/mm/slab.c b/mm/slab.c index 9ad3d0f2d1a5e0b32dcad0c544975cdb6431f648..dba95fd61ffb7bf011964247132fe046f5bf39e6 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3222,7 +3222,7 @@ slab_alloc_node(struct kmem_cache *cachep, struct list_lru *lru, gfp_t flags, if (unlikely(!cachep)) return NULL; - objp = kfence_alloc(cachep, orig_size, flags); + objp = kfence_alloc_node(cachep, orig_size, flags, nodeid); if (unlikely(objp)) goto out; diff --git a/mm/slub.c b/mm/slub.c index f7940048138c5a1c665673fa8a75df9c7115fa06..e0a43a4ca53295649331b1568406d5614c8235ee 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1866,6 +1866,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node, struct slab *slab; unsigned int order = oo_order(oo); + flags |= __GFP_NOKFENCE; if (node == NUMA_NO_NODE) folio = (struct folio *)alloc_pages(flags, order); else @@ -3461,7 +3462,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list if (!s) return NULL; - object = kfence_alloc(s, orig_size, gfpflags); + object = kfence_alloc_node(s, orig_size, gfpflags, node); if (unlikely(object)) goto out; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ccd8cefeea7ba07d8188da16e995c13cc4d5d8a8..df3b0d64bea6a33fe7365a492afd610cfe910c7a 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -334,7 +334,10 @@ EXPORT_SYMBOL_GPL(vsock_find_connected_socket); void vsock_remove_sock(struct vsock_sock *vsk) { - vsock_remove_bound(vsk); + /* Transport reassignment must not remove the binding. */ + if (sock_flag(sk_vsock(vsk), SOCK_DEAD)) + vsock_remove_bound(vsk); + vsock_remove_connected(vsk); } EXPORT_SYMBOL_GPL(vsock_remove_sock); @@ -810,12 +813,13 @@ static void __vsock_release(struct sock *sk, int level) */ lock_sock_nested(sk, level); + sock_orphan(sk); + if (vsk->transport) vsk->transport->release(vsk); else if (sock_type_connectible(sk->sk_type)) vsock_remove_sock(vsk); - sock_orphan(sk); sk->sk_shutdown = SHUTDOWN_MASK; skb_queue_purge(&sk->sk_receive_queue); diff --git a/scripts/package/buildtar b/scripts/package/buildtar index 65b4ea50296219e2cfed406dddd3cb4eac0737ea..93158943a4f73d868660e80eb488a3475512e7ac 100755 --- a/scripts/package/buildtar +++ b/scripts/package/buildtar @@ -64,6 +64,9 @@ case "${ARCH}" in alpha) [ -f "${objtree}/arch/alpha/boot/vmlinux.gz" ] && cp -v -- "${objtree}/arch/alpha/boot/vmlinux.gz" "${tmpdir}/boot/vmlinuz-${KERNELRELEASE}" ;; + sw_64) + [ -f "${objtree}/arch/sw_64/boot/vmlinux.bin" ] && cp -v -- "${objtree}/arch/sw_64/boot/vmlinux.bin" "${tmpdir}/boot/vmlinux-bin-${KERNELRELEASE}" + ;; parisc*) [ -f "${KBUILD_IMAGE}" ] && cp -v -- "${KBUILD_IMAGE}" "${tmpdir}/boot/vmlinux-${KERNELRELEASE}" [ -f "${objtree}/lifimage" ] && cp -v -- "${objtree}/lifimage" "${tmpdir}/boot/lifimage-${KERNELRELEASE}" diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index 5044224cf6714b3e5738f1e6d30dda05c589e3ff..2586bcd5f43a3ab28b3ac512e6136e7c1bede7fd 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -26,7 +26,7 @@ set_debarch() { # Attempt to find the correct Debian architecture case "$UTS_MACHINE" in - i386|ia64|alpha|m68k|riscv*) + i386|ia64|alpha|m68k|riscv*|sw_64) debarch="$UTS_MACHINE" ;; x86_64) debarch=amd64 ;; diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c index 40ae6b2c7a6da590f36d33caa543fd1376ba4945..73558f7eb690543f6da7eff1b6c93c4c5d4b0bad 100644 --- a/scripts/recordmcount.c +++ b/scripts/recordmcount.c @@ -52,6 +52,12 @@ #define R_AARCH64_CALL26 283 +#ifndef EM_SW64 +#define EM_SW64 0x9916 +#define R_SW64_NONE 0 +#define R_SW64_REFQUAD 2 /* Direct 64 bit */ +#endif + static int fd_map; /* File descriptor for file being modified. */ static int mmap_failed; /* Boolean flag. */ static char gpfx; /* prefix for global symbol name (sometimes '_') */ @@ -326,6 +332,16 @@ static int make_nop_arm64(void *map, size_t const offset) return 0; } +static unsigned char ideal_nop4_sw64[4] = {0x5f, 0x07, 0xff, 0x43}; + +static int make_nop_sw64(void *map, size_t const offset) +{ + /* Convert to nop */ + ulseek(offset, SEEK_SET); + uwrite(ideal_nop, 4); + return 0; +} + static int write_file(const char *fname) { char tmp_file[strlen(fname) + 4]; @@ -475,6 +491,21 @@ static int LARCH64_is_fake_mcount(Elf64_Rel const *rp) return 1; } +#define SW64_FAKEMCOUNT_OFFSET 4 + +static int sw64_is_fake_mcount(Elf64_Rel const *rp) +{ + static Elf64_Addr old_r_offset = ~(Elf64_Addr)0; + Elf64_Addr current_r_offset = _w(rp->r_offset); + int is_fake; + + is_fake = (old_r_offset != ~(Elf64_Addr)0) && + (current_r_offset - old_r_offset == SW64_FAKEMCOUNT_OFFSET); + old_r_offset = current_r_offset; + + return is_fake; +} + /* 64-bit EM_MIPS has weird ELF64_Rela.r_info. * http://techpubs.sgi.com/library/manuals/4000/007-4658-001/pdf/007-4658-001.pdf * We interpret Table 29 Relocation Operation (Elf64_Rel, Elf64_Rela) [p.40] @@ -598,6 +629,14 @@ static int do_file(char const *const fname) case EM_S390: /* reltype: e_class */ break; case EM_SH: reltype = R_SH_DIR32; gpfx = 0; break; case EM_SPARCV9: reltype = R_SPARC_64; break; + case EM_SW64: + reltype = R_SW64_REFQUAD; + make_nop = make_nop_sw64; + rel_type_nop = R_SW64_NONE; + ideal_nop = ideal_nop4_sw64; + mcount_adjust_64 = -12; + is_fake_mcount64 = sw64_is_fake_mcount; + break; case EM_X86_64: make_nop = make_nop_x86; ideal_nop = ideal_nop5_x86_64; diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c index 7f3a000fab0ce0f9827f215156fb35cffc6c49b6..df37a85cf27cc79d938406c2b54a778d3945b66f 100644 --- a/sound/hda/hdac_controller.c +++ b/sound/hda/hdac_controller.c @@ -410,7 +410,10 @@ void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus) { unsigned long timeout; - snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + if (bus->hygon_dword_access) + snd_hdac_chip_updatel(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); + else + snd_hdac_chip_updateb(bus, GCTL, AZX_GCTL_RESET, AZX_GCTL_RESET); timeout = jiffies + msecs_to_jiffies(100); while (!snd_hdac_chip_readb(bus, GCTL) && time_before(jiffies, timeout)) @@ -475,7 +478,10 @@ static void azx_int_disable(struct hdac_bus *bus) /* disable interrupts in stream descriptor */ list_for_each_entry(azx_dev, &bus->stream_list, list) - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_INT_MASK, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0); /* disable SIE for all streams & disable controller CIE and GIE */ snd_hdac_chip_writel(bus, INTCTL, 0); diff --git a/sound/hda/hdac_stream.c b/sound/hda/hdac_stream.c index 214a0680524b0b487b6803cdd0667966ab3a0bbd..1bf30b8f4bff258c61d1c1751d213ed4bacb3a71 100644 --- a/sound/hda/hdac_stream.c +++ b/sound/hda/hdac_stream.c @@ -146,11 +146,15 @@ void snd_hdac_stream_start(struct hdac_stream *azx_dev) stripe_ctl = snd_hdac_get_stream_stripe_ctl(bus, azx_dev->substream); else stripe_ctl = 0; - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, + stripe_ctl); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, stripe_ctl); } /* set DMA start and interrupt mask */ - if (bus->access_sdnctl_in_dword) + if (bus->access_sdnctl_in_dword || bus->hygon_dword_access) snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_DMA_START | SD_INT_MASK); else @@ -166,11 +170,21 @@ EXPORT_SYMBOL_GPL(snd_hdac_stream_start); */ static void snd_hdac_stream_clear(struct hdac_stream *azx_dev) { - snd_hdac_stream_updateb(azx_dev, SD_CTL, - SD_CTL_DMA_START | SD_INT_MASK, 0); - snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ - if (azx_dev->stripe) - snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + struct hdac_bus *bus = azx_dev->bus; + + if (bus->hygon_dword_access) { + snd_hdac_stream_updatel(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updatel(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } else { + snd_hdac_stream_updateb(azx_dev, SD_CTL, + SD_CTL_DMA_START | SD_INT_MASK, 0); + snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK); /* to be sure */ + if (azx_dev->stripe) + snd_hdac_stream_updateb(azx_dev, SD_CTL_3B, SD_CTL_STRIPE_MASK, 0); + } azx_dev->running = false; } @@ -225,12 +239,16 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) { unsigned char val; int dma_run_state; + struct hdac_bus *bus = azx_dev->bus; snd_hdac_stream_clear(azx_dev); dma_run_state = snd_hdac_stream_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START; - snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, 0, SD_CTL_STREAM_RESET); /* wait for hardware to report that the stream entered reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, (val & SD_CTL_STREAM_RESET), 3, 300); @@ -238,7 +256,10 @@ void snd_hdac_stream_reset(struct hdac_stream *azx_dev) if (azx_dev->bus->dma_stop_delay && dma_run_state) udelay(azx_dev->bus->dma_stop_delay); - snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + if (bus->hygon_dword_access) + snd_hdac_stream_updatel(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); + else + snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_CTL_STREAM_RESET, 0); /* wait for hardware to report that the stream is out of reset */ snd_hdac_stream_readb_poll(azx_dev, SD_CTL, val, !(val & SD_CTL_STREAM_RESET), 3, 300); diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index 406779625fb5935cf54b34036bea3f48c03407e5..b69e7b94673c6a8130a007af3e8e9524bf894081 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -1061,6 +1061,16 @@ static void stream_update(struct hdac_bus *bus, struct hdac_stream *s) } } +static void azx_rirb_zxdelay(struct azx *chip, int enable) +{ + if (chip->remap_diu_addr) { + if (!enable) + writel(0x0, (char *)chip->remap_diu_addr + 0x490a8); + else + writel(0x1000000, (char *)chip->remap_diu_addr + 0x490a8); + } +} + irqreturn_t azx_interrupt(int irq, void *dev_id) { struct azx *chip = dev_id; @@ -1103,9 +1113,14 @@ irqreturn_t azx_interrupt(int irq, void *dev_id) azx_writeb(chip, RIRBSTS, RIRB_INT_MASK); active = true; if (status & RIRB_INT_RESPONSE) { - if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) + if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) || + (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY)) { + azx_rirb_zxdelay(chip, 1); udelay(80); + } snd_hdac_bus_update_rirb(bus); + if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) + azx_rirb_zxdelay(chip, 0); } } } while (active && ++repeat < 10); diff --git a/sound/pci/hda/hda_controller.h b/sound/pci/hda/hda_controller.h index 8556031bcd68e48c4ad1c6d4d4a094fce0b2a3fb..9db89f4c7b3f84300c6e654c70ea02bdd7c8422b 100644 --- a/sound/pci/hda/hda_controller.h +++ b/sound/pci/hda/hda_controller.h @@ -45,6 +45,7 @@ #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ #define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */ #define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */ +#define AZX_DCAPS_RIRB_PRE_DELAY (1 << 31) /* Put a delay before read */ enum { AZX_SNOOP_TYPE_NONE, @@ -143,6 +144,8 @@ struct azx { unsigned int disabled:1; /* disabled by vga_switcheroo */ unsigned int pm_prepared:1; + void __iomem *remap_diu_addr; + /* GTS present */ unsigned int gts_present:1; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 75148485b75538008b24385ac1fa8d7b9d2b93a2..038e5724d016705e6fcd60c7742d55d291f1b379 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -237,7 +237,9 @@ enum { AZX_DRIVER_CTHDA, AZX_DRIVER_CMEDIA, AZX_DRIVER_ZHAOXIN, + AZX_DRIVER_ZXHDMI, AZX_DRIVER_LOONGSON, + AZX_DRIVER_HYGON, AZX_DRIVER_GENERIC, AZX_NUM_DRIVERS, /* keep this as last entry */ }; @@ -349,7 +351,9 @@ static const char * const driver_short_names[] = { [AZX_DRIVER_CTHDA] = "HDA Creative", [AZX_DRIVER_CMEDIA] = "HDA C-Media", [AZX_DRIVER_ZHAOXIN] = "HDA Zhaoxin", + [AZX_DRIVER_ZXHDMI] = "HDA Zhaoxin HDMI", [AZX_DRIVER_LOONGSON] = "HDA Loongson", + [AZX_DRIVER_HYGON] = "HDA Hygon", [AZX_DRIVER_GENERIC] = "HD-Audio Generic", }; @@ -371,6 +375,31 @@ static void update_pci_byte(struct pci_dev *pci, unsigned int reg, pci_write_config_byte(pci, reg, data); } +static int azx_init_pci_zx(struct azx *chip) +{ + struct snd_card *card = chip->card; + unsigned int diu_reg; + struct pci_dev *diu_pci = NULL; + + azx_bus(chip)->polling_mode = 1; + diu_pci = pci_get_device(PCI_VENDOR_ID_ZHAOXIN, 0x3a03, NULL); + if (!diu_pci) { + dev_info(card->dev, "zx_hda no KX-5000 device.\n"); + return -ENXIO; + } + pci_read_config_dword(diu_pci, PCI_BASE_ADDRESS_0, &diu_reg); + chip->remap_diu_addr = ioremap(diu_reg, 0x50000); + pci_dev_put(diu_pci); + dev_info(card->dev, "zx_hda %x %p\n", diu_reg, chip->remap_diu_addr); + return 0; +} + +static void azx_free_pci_zx(struct azx *chip) +{ + if (chip->remap_diu_addr) + iounmap(chip->remap_diu_addr); +} + static void azx_init_pci(struct azx *chip) { int snoop_type = azx_get_snoop_type(chip); @@ -1360,6 +1389,9 @@ static void azx_free(struct azx *chip) hda->init_failed = 1; /* to be sure */ complete_all(&hda->probe_wait); + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_free_pci_zx(chip); + if (use_vga_switcheroo(hda)) { if (chip->disabled && hda->probe_continued) snd_hda_unlock_devices(&chip->bus); @@ -1547,7 +1579,8 @@ static int check_position_fix(struct azx *chip, int fix) } /* Check VIA/ATI HD Audio Controller exist */ - if (chip->driver_type == AZX_DRIVER_VIA) { + if (chip->driver_type == AZX_DRIVER_VIA || + chip->driver_type == AZX_DRIVER_ZHAOXIN) { dev_dbg(chip->card->dev, "Using VIACOMBO position fix\n"); return POS_FIX_VIACOMBO; } @@ -1701,7 +1734,7 @@ static void azx_check_snoop_available(struct azx *chip) snoop = true; if (azx_get_snoop_type(chip) == AZX_SNOOP_TYPE_NONE && - chip->driver_type == AZX_DRIVER_VIA) { + (chip->driver_type == AZX_DRIVER_VIA || chip->driver_type == AZX_DRIVER_ZHAOXIN)) { /* force to non-snoop mode for a new VIA controller * when BIOS is set */ @@ -1751,6 +1784,8 @@ static int default_bdl_pos_adj(struct azx *chip) case AZX_DRIVER_ICH: case AZX_DRIVER_PCH: return 1; + case AZX_DRIVER_ZXHDMI: + return 128; default: return 32; } @@ -1876,6 +1911,15 @@ static int azx_first_init(struct azx *chip) bus->access_sdnctl_in_dword = 1; } + if (chip->driver_type == AZX_DRIVER_HYGON && + chip->pci->device == PCI_DEVICE_ID_HYGON_18H_M05H_HDA) + bus->hygon_dword_access = 1; + + chip->remap_diu_addr = NULL; + + if (chip->driver_type == AZX_DRIVER_ZXHDMI) + azx_init_pci_zx(chip); + err = pcim_iomap_regions(pci, 1 << 0, "ICH HD audio"); if (err < 0) return err; @@ -1977,6 +2021,7 @@ static int azx_first_init(struct azx *chip) chip->capture_streams = ATIHDMI_NUM_CAPTURE; break; case AZX_DRIVER_GFHDMI: + case AZX_DRIVER_ZXHDMI: case AZX_DRIVER_GENERIC: default: chip->playback_streams = ICH6_NUM_PLAYBACK; @@ -2689,6 +2734,15 @@ static const struct pci_device_id azx_ids[] = { { PCI_VDEVICE(VIA, 0x9170), .driver_data = AZX_DRIVER_GENERIC }, /* VIA GFX VT6122/VX11 */ { PCI_VDEVICE(VIA, 0x9140), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(VIA, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(VIA, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* SIS966 */ { PCI_VDEVICE(SI, 0x7502), .driver_data = AZX_DRIVER_SIS }, /* ULI M5461 */ @@ -2744,11 +2798,23 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, /* Zhaoxin */ { PCI_VDEVICE(ZHAOXIN, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN }, + { PCI_VDEVICE(ZHAOXIN, 0x9141), .driver_data = AZX_DRIVER_GENERIC }, + { PCI_VDEVICE(ZHAOXIN, 0x9142), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9144), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9145), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, + { PCI_VDEVICE(ZHAOXIN, 0x9146), .driver_data = AZX_DRIVER_ZXHDMI | AZX_DCAPS_POSFIX_LPIB | + AZX_DCAPS_NO_MSI | AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_NO_64BIT }, /* Loongson HDAudio*/ { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDA), .driver_data = AZX_DRIVER_LOONGSON }, { PCI_VDEVICE(LOONGSON, PCI_DEVICE_ID_LOONGSON_HDMI), .driver_data = AZX_DRIVER_LOONGSON }, + /* Hygon HDAudio */ + { PCI_VDEVICE(HYGON, PCI_DEVICE_ID_HYGON_18H_M05H_HDA), + .driver_data = AZX_DRIVER_HYGON | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_NO_MSI }, { 0, } }; MODULE_DEVICE_TABLE(pci, azx_ids); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 1cde2a69bdb4baa0d95d34dde3e1a86933931268..f6053fe32b2bb450c0183a9378af5e6092700fc7 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -4485,6 +4485,20 @@ static int patch_via_hdmi(struct hda_codec *codec) return patch_simple_hdmi(codec, VIAHDMI_CVT_NID, VIAHDMI_PIN_NID); } +/* Zhaoxin HDMI Implementation */ +static int patch_zhaoxin_hdmi(struct hda_codec *codec) +{ + int err; + + err = patch_generic_hdmi(codec); + codec->no_sticky_stream = 1; + + if (err) + return err; + + return 0; +} + static int patch_gf_hdmi(struct hda_codec *codec) { int err; @@ -4607,6 +4621,15 @@ HDA_CODEC_ENTRY(0x11069f80, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f81, "VX900 HDMI/DP", patch_via_hdmi), HDA_CODEC_ENTRY(0x11069f84, "VX11 HDMI/DP", patch_generic_hdmi), HDA_CODEC_ENTRY(0x11069f85, "VX11 HDMI/DP", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x11069f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x11069f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), HDA_CODEC_ENTRY(0x80860054, "IbexPeak HDMI", patch_i915_cpt_hdmi), HDA_CODEC_ENTRY(0x80862800, "Geminilake HDMI", patch_i915_glk_hdmi), HDA_CODEC_ENTRY(0x80862801, "Bearlake HDMI", patch_generic_hdmi), @@ -4639,6 +4662,15 @@ HDA_CODEC_ENTRY(0x80862880, "CedarTrail HDMI", patch_generic_hdmi), HDA_CODEC_ENTRY(0x80862882, "Valleyview2 HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x80862883, "Braswell HDMI", patch_i915_byt_hdmi), HDA_CODEC_ENTRY(0x808629fb, "Crestline HDMI", patch_generic_hdmi), +HDA_CODEC_ENTRY(0x1d179f88, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f89, "KX-5000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8a, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8b, "KX-6000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8c, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8d, "KX-6000G HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8e, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f8f, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), +HDA_CODEC_ENTRY(0x1d179f90, "KX-7000 HDMI/DP", patch_zhaoxin_hdmi), /* special ID for generic HDMI */ HDA_CODEC_ENTRY(HDA_CODEC_ID_GENERIC_HDMI, "Generic HDMI", patch_generic_hdmi), {} /* terminator */ diff --git a/tools/arch/sw_64/include/asm/barrier.h b/tools/arch/sw_64/include/asm/barrier.h new file mode 100644 index 0000000000000000000000000000000000000000..bc4aeffeb6819026d3760f38d237477dd6d62709 --- /dev/null +++ b/tools/arch/sw_64/include/asm/barrier.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOOLS_LINUX_ASM_SW64_BARRIER_H +#define _TOOLS_LINUX_ASM_SW64_BARRIER_H + +#define mb() __asm__ __volatile__("mb" : : : "memory") +#define rmb() __asm__ __volatile__("mb" : : : "memory") +#define wmb() __asm__ __volatile__("mb" : : : "memory") + +#endif /* _TOOLS_LINUX_ASM_SW64_BARRIER_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/bitsperlong.h b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..f6a510c2823340fb47701a6066989456bde5bca4 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/bitsperlong.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef __ASM_SW64_BITSPERLONG_H +#define __ASM_SW64_BITSPERLONG_H + +#define __BITS_PER_LONG 64 + +#include + +#endif /* __ASM_SW64_BITSPERLONG_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/errno.h b/tools/arch/sw_64/include/uapi/asm/errno.h new file mode 100644 index 0000000000000000000000000000000000000000..2a43a943581a9b80dd4c7549aff13c14f2479213 --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/errno.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _SW64_ERRNO_H +#define _SW64_ERRNO_H + +#include + +#undef EAGAIN /* 11 in errno-base.h */ + +#define EDEADLK 11 /* Resource deadlock would occur */ + +#define EAGAIN 35 /* Try again */ +#define EWOULDBLOCK EAGAIN /* Operation would block */ +#define EINPROGRESS 36 /* Operation now in progress */ +#define EALREADY 37 /* Operation already in progress */ +#define ENOTSOCK 38 /* Socket operation on non-socket */ +#define EDESTADDRREQ 39 /* Destination address required */ +#define EMSGSIZE 40 /* Message too long */ +#define EPROTOTYPE 41 /* Protocol wrong type for socket */ +#define ENOPROTOOPT 42 /* Protocol not available */ +#define EPROTONOSUPPORT 43 /* Protocol not supported */ +#define ESOCKTNOSUPPORT 44 /* Socket type not supported */ +#define EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define EPFNOSUPPORT 46 /* Protocol family not supported */ +#define EAFNOSUPPORT 47 /* Address family not supported by protocol */ +#define EADDRINUSE 48 /* Address already in use */ +#define EADDRNOTAVAIL 49 /* Cannot assign requested address */ +#define ENETDOWN 50 /* Network is down */ +#define ENETUNREACH 51 /* Network is unreachable */ +#define ENETRESET 52 /* Network dropped connection because of reset */ +#define ECONNABORTED 53 /* Software caused connection abort */ +#define ECONNRESET 54 /* Connection reset by peer */ +#define ENOBUFS 55 /* No buffer space available */ +#define EISCONN 56 /* Transport endpoint is already connected */ +#define ENOTCONN 57 /* Transport endpoint is not connected */ +#define ESHUTDOWN 58 /* Cannot send after transport endpoint shutdown */ +#define ETOOMANYREFS 59 /* Too many references: cannot splice */ +#define ETIMEDOUT 60 /* Connection timed out */ +#define ECONNREFUSED 61 /* Connection refused */ +#define ELOOP 62 /* Too many symbolic links encountered */ +#define ENAMETOOLONG 63 /* File name too long */ +#define EHOSTDOWN 64 /* Host is down */ +#define EHOSTUNREACH 65 /* No route to host */ +#define ENOTEMPTY 66 /* Directory not empty */ + +#define EUSERS 68 /* Too many users */ +#define EDQUOT 69 /* Quota exceeded */ +#define ESTALE 70 /* Stale file handle */ +#define EREMOTE 71 /* Object is remote */ + +#define ENOLCK 77 /* No record locks available */ +#define ENOSYS 78 /* Function not implemented */ + +#define ENOMSG 80 /* No message of desired type */ +#define EIDRM 81 /* Identifier removed */ +#define ENOSR 82 /* Out of streams resources */ +#define ETIME 83 /* Timer expired */ +#define EBADMSG 84 /* Not a data message */ +#define EPROTO 85 /* Protocol error */ +#define ENODATA 86 /* No data available */ +#define ENOSTR 87 /* Device not a stream */ + +#define ENOPKG 92 /* Package not installed */ + +#define EILSEQ 116 /* Illegal byte sequence */ + +/* The following are just random noise.. */ +#define ECHRNG 88 /* Channel number out of range */ +#define EL2NSYNC 89 /* Level 2 not synchronized */ +#define EL3HLT 90 /* Level 3 halted */ +#define EL3RST 91 /* Level 3 reset */ + +#define ELNRNG 93 /* Link number out of range */ +#define EUNATCH 94 /* Protocol driver not attached */ +#define ENOCSI 95 /* No CSI structure available */ +#define EL2HLT 96 /* Level 2 halted */ +#define EBADE 97 /* Invalid exchange */ +#define EBADR 98 /* Invalid request descriptor */ +#define EXFULL 99 /* Exchange full */ +#define ENOANO 100 /* No anode */ +#define EBADRQC 101 /* Invalid request code */ +#define EBADSLT 102 /* Invalid slot */ + +#define EDEADLOCK EDEADLK + +#define EBFONT 104 /* Bad font file format */ +#define ENONET 105 /* Machine is not on the network */ +#define ENOLINK 106 /* Link has been severed */ +#define EADV 107 /* Advertise error */ +#define ESRMNT 108 /* Srmount error */ +#define ECOMM 109 /* Communication error on send */ +#define EMULTIHOP 110 /* Multihop attempted */ +#define EDOTDOT 111 /* RFS specific error */ +#define EOVERFLOW 112 /* Value too large for defined data type */ +#define ENOTUNIQ 113 /* Name not unique on network */ +#define EBADFD 114 /* File descriptor in bad state */ +#define EREMCHG 115 /* Remote address changed */ + +#define EUCLEAN 117 /* Structure needs cleaning */ +#define ENOTNAM 118 /* Not a XENIX named type file */ +#define ENAVAIL 119 /* No XENIX semaphores available */ +#define EISNAM 120 /* Is a named type file */ +#define EREMOTEIO 121 /* Remote I/O error */ + +#define ELIBACC 122 /* Can not access a needed shared library */ +#define ELIBBAD 123 /* Accessing a corrupted shared library */ +#define ELIBSCN 124 /* .lib section in a.out corrupted */ +#define ELIBMAX 125 /* Attempting to link in too many shared libraries */ +#define ELIBEXEC 126 /* Cannot exec a shared library directly */ +#define ERESTART 127 /* Interrupted system call should be restarted */ +#define ESTRPIPE 128 /* Streams pipe error */ + +#define ENOMEDIUM 129 /* No medium found */ +#define EMEDIUMTYPE 130 /* Wrong medium type */ +#define ECANCELED 131 /* Operation Cancelled */ +#define ENOKEY 132 /* Required key not available */ +#define EKEYEXPIRED 133 /* Key has expired */ +#define EKEYREVOKED 134 /* Key has been revoked */ +#define EKEYREJECTED 135 /* Key was rejected by service */ + +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + +#define ERFKILL 138 /* Operation not possible due to RF-kill */ + +#define EHWPOISON 139 /* Memory page has hardware error */ + +#endif /* _SW64_ERRNO_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/mman.h b/tools/arch/sw_64/include/uapi/asm/mman.h new file mode 100644 index 0000000000000000000000000000000000000000..a9603c93a34bf4416c6db36701263ce678325e1c --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/mman.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H +#define MADV_DODUMP 17 +#define MADV_DOFORK 11 +#define MADV_DONTDUMP 16 +#define MADV_DONTFORK 10 +#define MADV_DONTNEED 6 +#define MADV_FREE 8 +#define MADV_HUGEPAGE 14 +#define MADV_MERGEABLE 12 +#define MADV_NOHUGEPAGE 15 +#define MADV_NORMAL 0 +#define MADV_RANDOM 1 +#define MADV_REMOVE 9 +#define MADV_SEQUENTIAL 2 +#define MADV_UNMERGEABLE 13 +#define MADV_WILLNEED 3 +#define MAP_ANONYMOUS 0x10 +#define MAP_DENYWRITE 0x02000 +#define MAP_EXECUTABLE 0x04000 +#define MAP_FILE 0 +#define MAP_FIXED 0x100 +#define MAP_GROWSDOWN 0x01000 +#define MAP_HUGETLB 0x100000 +#define MAP_LOCKED 0x08000 +#define MAP_NONBLOCK 0x40000 +#define MAP_NORESERVE 0x10000 +#define MAP_POPULATE 0x20000 +#define MAP_STACK 0x80000 +#define PROT_EXEC 0x4 +#define PROT_GROWSDOWN 0x01000000 +#define PROT_GROWSUP 0x02000000 +#define PROT_NONE 0x0 +#define PROT_READ 0x1 +#define PROT_SEM 0x8 +#define PROT_WRITE 0x2 +/* MADV_HWPOISON is undefined on alpha, fix it for perf */ +#define MADV_HWPOISON 100 +/* MADV_SOFT_OFFLINE is undefined on alpha, fix it for perf */ +#define MADV_SOFT_OFFLINE 101 +/* MAP_32BIT is undefined on alpha, fix it for perf */ +#define MAP_32BIT 0 +/* MAP_UNINITIALIZED is undefined on alpha, fix it for perf */ +#define MAP_UNINITIALIZED 0 +#endif /* TOOLS_ARCH_SW64_UAPI_ASM_MMAN_FIX_H */ diff --git a/tools/arch/sw_64/include/uapi/asm/perf_regs.h b/tools/arch/sw_64/include/uapi/asm/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..892be52610265f85cab2fff62d256c093e5593ff --- /dev/null +++ b/tools/arch/sw_64/include/uapi/asm/perf_regs.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ + +#ifndef _ASM_SW64_PERF_REGS_H +#define _ASM_SW64_PERF_REGS_H + +enum perf_event_sw64_regs { + PERF_REG_SW64_R0, + PERF_REG_SW64_R1, + PERF_REG_SW64_R2, + PERF_REG_SW64_R3, + PERF_REG_SW64_R4, + PERF_REG_SW64_R5, + PERF_REG_SW64_R6, + PERF_REG_SW64_R7, + PERF_REG_SW64_R8, + PERF_REG_SW64_R9, + PERF_REG_SW64_R10, + PERF_REG_SW64_R11, + PERF_REG_SW64_R12, + PERF_REG_SW64_R13, + PERF_REG_SW64_R14, + PERF_REG_SW64_R15, + PERF_REG_SW64_R16, + PERF_REG_SW64_R17, + PERF_REG_SW64_R18, + PERF_REG_SW64_R19, + PERF_REG_SW64_R20, + PERF_REG_SW64_R21, + PERF_REG_SW64_R22, + PERF_REG_SW64_R23, + PERF_REG_SW64_R24, + PERF_REG_SW64_R25, + PERF_REG_SW64_R26, + PERF_REG_SW64_R27, + PERF_REG_SW64_R28, + PERF_REG_SW64_GP, + PERF_REG_SW64_SP, + PERF_REG_SW64_PC, + PERF_REG_SW64_MAX, +}; +#endif /* _ASM_SW64_PERF_REGS_H */ diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 798e60b5454b7e108a07550cb354a8ee27c0de8f..dd31390524b79a9e67148e8731f596af1e998bd8 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 21 /* N 32-bit words worth of info */ +#define NCAPINTS 23 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* @@ -443,6 +443,9 @@ #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000006, word 22 */ +#define X86_FEATURE_ZXPAUSE (22*32 + 0) /* ZHAOXIN ZXPAUSE */ + /* * BUG word(s) */ diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h index fafe9be7a6f4ff6b7adc0ae3ea34d30b3d9ba79d..cbf8c3ea37fe30afe40a382e88de85eee1998534 100644 --- a/tools/arch/x86/include/asm/disabled-features.h +++ b/tools/arch/x86/include/asm/disabled-features.h @@ -131,6 +131,8 @@ #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 #define DISABLED_MASK20 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define DISABLED_MASK21 0 +#define DISABLED_MASK22 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h index 1d111350197f3169a8eec402d77980dd617c6b95..3456f6deca51684feb87ebcd6ed21116565b6e2e 100644 --- a/tools/arch/x86/include/asm/msr-index.h +++ b/tools/arch/x86/include/asm/msr-index.h @@ -72,12 +72,23 @@ #define MSR_IA32_UMWAIT_CONTROL 0xe1 #define MSR_IA32_UMWAIT_CONTROL_C02_DISABLE BIT(0) #define MSR_IA32_UMWAIT_CONTROL_RESERVED BIT(1) + +#define MSR_ZX_PAUSE_CONTROL 0x187f +#define MSR_ZX_PAUSE_CONTROL_C02_DISABLE BIT(0) +#define MSR_ZX_PAUSE_CONTROL_RESERVED BIT(1) + /* * The time field is bit[31:2], but representing a 32bit value with * bit[1:0] zero. */ #define MSR_IA32_UMWAIT_CONTROL_TIME_MASK (~0x03U) +/* + * The time field is bit[31:2], but representing a 32bit value with + * bit[1:0] zero. + */ +#define MSR_ZX_PAUSE_CONTROL_TIME_MASK (~0x03U) + /* Abbreviated from Intel SDM name IA32_CORE_CAPABILITIES */ #define MSR_IA32_CORE_CAPS 0x000000cf #define MSR_IA32_CORE_CAPS_INTEGRITY_CAPS_BIT 2 @@ -753,6 +764,13 @@ #define MSR_TMTA_LRTI_READOUT 0x80868018 #define MSR_TMTA_LRTI_VOLT_MHZ 0x8086801a +/* + * Zhaoxin extend VMCS capabilities: + * bit 0: exec-cntl3 VMCS field. + */ +#define MSR_ZX_EXT_VMCS_CAPS 0x1675 +#define MSR_ZX_VMCS_EXEC_CTL3 BIT(0) + /* Intel defined MSRs. */ #define MSR_IA32_P5_MC_ADDR 0x00000000 #define MSR_IA32_P5_MC_TYPE 0x00000001 diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index 7ba1726b71c7b8bfc95888dc78508998bba263fe..76953f757f3c32d593f1c079eb69ce13961e7caf 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -99,6 +99,8 @@ #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 #define REQUIRED_MASK20 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) +#define REQUIRED_MASK21 0 +#define REQUIRED_MASK22 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 23) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature index 934e2777a2dbcd9062f873c039f1853867937ed3..fb290a90b263d10b94d23d359e0a49498c4b5602 100644 --- a/tools/build/Makefile.feature +++ b/tools/build/Makefile.feature @@ -54,6 +54,7 @@ FEATURE_TESTS_BASIC := \ libtracefs \ libcrypto \ libunwind \ + libunwind-sw_64 \ pthread-attr-setaffinity-np \ pthread-barrier \ reallocarray \ diff --git a/tools/build/feature/Makefile b/tools/build/feature/Makefile index dad79ede4e0ae0030ee401a1daf5d59ff871dcb6..cb57e46cec4bd4a9c07dab7d8977e1763b39a361 100644 --- a/tools/build/feature/Makefile +++ b/tools/build/feature/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 include ../../scripts/Makefile.include +ARCH ?= $(shell uname -m) FILES= \ test-all.bin \ test-backtrace.bin \ @@ -45,6 +46,7 @@ FILES= \ test-libunwind-x86_64.bin \ test-libunwind-arm.bin \ test-libunwind-aarch64.bin \ + test-libunwind-sw_64.bin \ test-libunwind-debug-frame-arm.bin \ test-libunwind-debug-frame-aarch64.bin \ test-pthread-attr-setaffinity-np.bin \ @@ -86,7 +88,11 @@ all: $(FILES) __BUILD = $(CC) $(CFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.c,$(@F)) $(LDFLAGS) BUILD = $(__BUILD) > $(@:.bin=.make.output) 2>&1 +ifeq ($(ARCH),sw_64) + BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl -liberty -lz +else BUILD_BFD = $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl +endif BUILD_ALL = $(BUILD) -fstack-protector-all -O2 -D_FORTIFY_SOURCE=2 -ldw -lelf -lnuma -lelf -lslang $(FLAGS_PERL_EMBED) $(FLAGS_PYTHON_EMBED) -DPACKAGE='"perf"' -lbfd -ldl -lz -llzma -lzstd -lcap __BUILDXX = $(CXX) $(CXXFLAGS) -MD -Wall -Werror -o $@ $(patsubst %.bin,%.cpp,$(@F)) $(LDFLAGS) @@ -189,6 +195,9 @@ $(OUTPUT)test-libunwind-arm.bin: $(OUTPUT)test-libunwind-aarch64.bin: $(BUILD) -lelf -lunwind-aarch64 +$(OUTPUT)test-libunwind-sw_64.bin: + $(BUILD) -lelf -lunwind-sw_64 + $(OUTPUT)test-libunwind-debug-frame-arm.bin: $(BUILD) -lelf -lunwind-arm diff --git a/tools/build/feature/test-libunwind-sw_64.c b/tools/build/feature/test-libunwind-sw_64.c new file mode 100644 index 0000000000000000000000000000000000000000..274948b961f424dee01016f16b7f72475d9f5880 --- /dev/null +++ b/tools/build/feature/test-libunwind-sw_64.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +static unw_accessors_t accessors; + +int main(void) +{ + unw_addr_space_t addr_space; + + addr_space = unw_create_addr_space(&accessors, 0); + if (addr_space) + return 0; + + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + + return 0; +} diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h index c65267afc3415b194b9f60f8c56024daf6e8a535..036e2fc92d1aeecf6589e4e5c125a0353a2857b7 100644 --- a/tools/include/uapi/asm/bitsperlong.h +++ b/tools/include/uapi/asm/bitsperlong.h @@ -13,6 +13,8 @@ #include "../../../arch/ia64/include/uapi/asm/bitsperlong.h" #elif defined(__alpha__) #include "../../../arch/alpha/include/uapi/asm/bitsperlong.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/bitsperlong.h" #else #include #endif diff --git a/tools/include/uapi/asm/errno.h b/tools/include/uapi/asm/errno.h index 869379f91fe487ad3611966b09bda47462da0b70..bcfa3d742933d923aa241d5c7aff35b60515e595 100644 --- a/tools/include/uapi/asm/errno.h +++ b/tools/include/uapi/asm/errno.h @@ -11,6 +11,8 @@ #include "../../../arch/mips/include/uapi/asm/errno.h" #elif defined(__hppa__) #include "../../../arch/parisc/include/uapi/asm/errno.h" +#elif defined(__sw_64__) +#include "../../../arch/sw_64/include/uapi/asm/errno.h" #else #include #endif diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt index 856f0dfb8e5a30f228460911bd0f297aaa291ef6..192ab0415ee9e855ff61c74f88d15632ee3364b2 100644 --- a/tools/perf/Documentation/perf-c2c.txt +++ b/tools/perf/Documentation/perf-c2c.txt @@ -21,9 +21,9 @@ you to track down the cacheline contentions. On Intel, the tool is based on load latency and precise store facility events provided by Intel CPUs. On PowerPC, the tool uses random instruction sampling -with thresholding feature. On AMD, the tool uses IBS op pmu (due to hardware -limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses SPE to -sample load and store operations, therefore hardware and kernel support is +with thresholding feature. On AMD and Hygon, the tool uses IBS op pmu (due to +hardware limitations, perf c2c is not supported on Zen3 cpus). On Arm64 it uses +SPE to sample load and store operations, therefore hardware and kernel support is required. See linkperf:perf-arm-spe[1] for a setup guide. Due to the statistical nature of Arm SPE sampling, not every memory operation will be sampled. @@ -152,7 +152,7 @@ default on Intel: cpu/mem-loads,ldlat=30/P cpu/mem-stores/P -following on AMD: +following on AMD and Hygon: ibs_op// diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index d66b52407e19c6f4dde9fded0da2d8dcf0a95d45..a48258330cc0e5f6c39401540e13280b05162d6e 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -94,6 +94,12 @@ ifeq ($(SRCARCH),csky) NO_PERF_REGS := 0 endif +ifeq ($(SRCARCH),sw_64) + NO_PERF_REGS := 0 + CFLAGS += -mieee + LIBUNWIND_LIBS = -lunwind -lunwind-sw_64 +endif + ifeq ($(ARCH),s390) NO_PERF_REGS := 0 CFLAGS += -fPIC -I$(OUTPUT)arch/s390/include/generated @@ -640,6 +646,13 @@ ifndef NO_LIBUNWIND CFLAGS += -DNO_LIBUNWIND_DEBUG_FRAME_AARCH64 endif endif + ifeq ($(feature-libunwind-sw_64), 1) + $(call detected,CONFIG_LIBUNWIND_SW64) + CFLAGS += -DHAVE_LIBUNWIND_SW_64_SUPPORT + LDFLAGS += -lunwind-sw_64 + EXTLIBS_LIBUNWIND += -lunwind-sw_64 + have_libunwind = 1 + endif ifneq ($(feature-libunwind), 1) msg := $(warning No libunwind found. Please install libunwind-dev[el] >= 1.1 and/or set LIBUNWIND_DIR); diff --git a/tools/perf/arch/sw_64/Build b/tools/perf/arch/sw_64/Build new file mode 100644 index 0000000000000000000000000000000000000000..36222e64bbf7d00c5701c4f6fed009da02782cca --- /dev/null +++ b/tools/perf/arch/sw_64/Build @@ -0,0 +1,2 @@ +perf-y += util/ +perf-$(CONFIG_DWARF_UNWIND) += tests/ diff --git a/tools/perf/arch/sw_64/Makefile b/tools/perf/arch/sw_64/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..1aa9dd772489a6d1c5ccc03f9bbe9539251ff744 --- /dev/null +++ b/tools/perf/arch/sw_64/Makefile @@ -0,0 +1,4 @@ +ifndef NO_DWARF +PERF_HAVE_DWARF_REGS := 1 +endif +PERF_HAVE_ARCH_REGS_QUERY_REGISTER_OFFSET := 1 diff --git a/tools/perf/arch/sw_64/include/arch-tests.h b/tools/perf/arch/sw_64/include/arch-tests.h new file mode 100644 index 0000000000000000000000000000000000000000..90ec4c8cb8802d22e7780a826adcbc94b7cfd17a --- /dev/null +++ b/tools/perf/arch/sw_64/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/sw_64/include/perf_regs.h b/tools/perf/arch/sw_64/include/perf_regs.h new file mode 100644 index 0000000000000000000000000000000000000000..e0c1b15375b5c2c7e975df45adeb644d1eed2ccf --- /dev/null +++ b/tools/perf/arch/sw_64/include/perf_regs.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include +#include +#include + +void perf_regs_load(u64 *regs); + +#define PERF_REGS_MASK ((1ULL << PERF_REG_SW64_MAX) - 1) +#define PERF_REGS_MAX PERF_REG_SW64_MAX +#define PERF_SAMPLE_REGS_ABI PERF_SAMPLE_REGS_ABI_64 + +#define PERF_REG_IP PERF_REG_SW64_PC +#define PERF_REG_SP PERF_REG_SW64_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_SW64_R0: + return "r0"; + case PERF_REG_SW64_R1: + return "r1"; + case PERF_REG_SW64_R2: + return "r2"; + case PERF_REG_SW64_R3: + return "r3"; + case PERF_REG_SW64_R4: + return "r4"; + case PERF_REG_SW64_R5: + return "r5"; + case PERF_REG_SW64_R6: + return "r6"; + case PERF_REG_SW64_R7: + return "r7"; + case PERF_REG_SW64_R8: + return "r8"; + case PERF_REG_SW64_R9: + return "r9"; + case PERF_REG_SW64_R10: + return "r10"; + case PERF_REG_SW64_R11: + return "r11"; + case PERF_REG_SW64_R12: + return "r12"; + case PERF_REG_SW64_R13: + return "r13"; + case PERF_REG_SW64_R14: + return "r14"; + case PERF_REG_SW64_R15: + return "r15"; + case PERF_REG_SW64_R16: + return "r16"; + case PERF_REG_SW64_R17: + return "r17"; + case PERF_REG_SW64_R18: + return "r18"; + case PERF_REG_SW64_R19: + return "r19"; + case PERF_REG_SW64_R20: + return "r20"; + case PERF_REG_SW64_R21: + return "r21"; + case PERF_REG_SW64_R22: + return "r22"; + case PERF_REG_SW64_R23: + return "r23"; + case PERF_REG_SW64_R24: + return "r24"; + case PERF_REG_SW64_R25: + return "r25"; + case PERF_REG_SW64_R26: + return "r26"; + case PERF_REG_SW64_R27: + return "r27"; + case PERF_REG_SW64_R28: + return "r28"; + case PERF_REG_SW64_GP: + return "gp"; + case PERF_REG_SW64_SP: + return "sp"; + case PERF_REG_SW64_PC: + return "pc"; + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/tools/perf/arch/sw_64/tests/Build b/tools/perf/arch/sw_64/tests/Build new file mode 100644 index 0000000000000000000000000000000000000000..b8a38eadfb3562723278e2e6855e56109e77213e --- /dev/null +++ b/tools/perf/arch/sw_64/tests/Build @@ -0,0 +1,3 @@ +perf-y += regs_load.o +perf-y += dwarf-unwind.o +perf-y += arch-tests.o diff --git a/tools/perf/arch/sw_64/tests/arch-tests.c b/tools/perf/arch/sw_64/tests/arch-tests.c new file mode 100644 index 0000000000000000000000000000000000000000..5b1543c980223d2a969976e0259becd6cd583bd9 --- /dev/null +++ b/tools/perf/arch/sw_64/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; diff --git a/tools/perf/arch/sw_64/tests/dwarf-unwind.c b/tools/perf/arch/sw_64/tests/dwarf-unwind.c new file mode 100644 index 0000000000000000000000000000000000000000..cd7047b7a54683c8577b2086ccff6b44865fbd7e --- /dev/null +++ b/tools/perf/arch/sw_64/tests/dwarf-unwind.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "perf_regs.h" +#include "thread.h" +#include "map.h" +#include "maps.h" +#include "event.h" +#include "debug.h" +#include "tests/tests.h" + +#define STACK_SIZE 8192 + +static int sample_ustack(struct perf_sample *sample, + struct thread *thread, u64 *regs) +{ + struct stack_dump *stack = &sample->user_stack; + struct map *map; + unsigned long sp; + u64 stack_size, *buf; + + buf = malloc(STACK_SIZE); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + sp = (unsigned long) regs[PERF_REG_SW64_SP]; + + map = maps__find(thread->maps, (u64)sp); + if (!map) { + printf("failed to get stack map\n"); + free(buf); + return -1; + } + + stack_size = map->end - sp; + stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; + + memcpy(buf, (void *) sp, stack_size); + stack->data = (char *) buf; + stack->size = stack_size; + return 0; +} + +int test__arch_unwind_sample(struct perf_sample *sample, + struct thread *thread) +{ + struct regs_dump *regs = &sample->user_regs; + u64 *buf; + + buf = calloc(1, sizeof(u64) * PERF_REGS_MAX); + if (!buf) { + printf("failed to allocate sample uregs data\n"); + return -1; + } + + perf_regs_load(buf); + regs->abi = PERF_SAMPLE_REGS_ABI; + regs->regs = buf; + regs->mask = PERF_REGS_MASK; + + return sample_ustack(sample, thread, buf); +} diff --git a/tools/perf/arch/sw_64/tests/regs_load.S b/tools/perf/arch/sw_64/tests/regs_load.S new file mode 100644 index 0000000000000000000000000000000000000000..8c5aabc2c6fbde125064ecbcf85787bdd09cbcaf --- /dev/null +++ b/tools/perf/arch/sw_64/tests/regs_load.S @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#include + +.text +.set noat +.type perf_regs_load,%function +#define STL_REG(r) stl $r, (8 * r)($16) +#define LDL_REG(r) ldl $r, (8 * r)($16) +#define SP (8 * 30) +#define PC (8 * 31) +SYM_FUNC_START(perf_regs_load) + STL_REG(0) + STL_REG(1) + STL_REG(2) + STL_REG(3) + STL_REG(4) + STL_REG(5) + STL_REG(6) + STL_REG(7) + STL_REG(8) + STL_REG(9) + STL_REG(10) + STL_REG(11) + STL_REG(12) + STL_REG(13) + STL_REG(14) + STL_REG(15) + STL_REG(16) + STL_REG(17) + STL_REG(18) + STL_REG(19) + STL_REG(20) + STL_REG(21) + STL_REG(22) + STL_REG(23) + STL_REG(24) + STL_REG(25) + STL_REG(26) + STL_REG(27) + STL_REG(28) + STL_REG(29) + mov $30, $17 + stl $17, (SP)($16) + stl $26, (PC)($16) + LDL_REG(17) + ret +SYM_FUNC_END(perf_regs_load) diff --git a/tools/perf/arch/sw_64/util/Build b/tools/perf/arch/sw_64/util/Build new file mode 100644 index 0000000000000000000000000000000000000000..39f459b636a0cf4ef76b07ac9d09a02fcb095a5a --- /dev/null +++ b/tools/perf/arch/sw_64/util/Build @@ -0,0 +1,4 @@ +perf-y += perf_regs.o +perf-$(CONFIG_DWARF) += dwarf-regs.o +perf-$(CONFIG_LIBDW_DWARF_UNWIND) += unwind-libdw.o +perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind.o diff --git a/tools/perf/arch/sw_64/util/dwarf-regs.c b/tools/perf/arch/sw_64/util/dwarf-regs.c new file mode 100644 index 0000000000000000000000000000000000000000..11c1ee5444dab96427749688549a6529a89d83d8 --- /dev/null +++ b/tools/perf/arch/sw_64/util/dwarf-regs.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Mapping of DWARF debug register numbers into register names. + * + * Copyright (C) 2010 Will Deacon, ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include /* for struct user_pt_regs */ +#include +#include "util.h" + +struct pt_regs_dwarfnum { + const char *name; + unsigned int dwarfnum; +}; + +#define REG_DWARFNUM_NAME(r, num) {.name = r, .dwarfnum = num} +#define GPR_DWARFNUM_NAME(num) \ + {.name = __stringify(%x##num), .dwarfnum = num} +#define REG_DWARFNUM_END {.name = NULL, .dwarfnum = 0} +#define DWARFNUM2OFFSET(index) \ + (index * sizeof((struct user_pt_regs *)0)->regs[0]) + +static const struct pt_regs_dwarfnum regdwarfnum_table[] = { + GPR_DWARFNUM_NAME(0), + GPR_DWARFNUM_NAME(1), + GPR_DWARFNUM_NAME(2), + GPR_DWARFNUM_NAME(3), + GPR_DWARFNUM_NAME(4), + GPR_DWARFNUM_NAME(5), + GPR_DWARFNUM_NAME(6), + GPR_DWARFNUM_NAME(7), + GPR_DWARFNUM_NAME(8), + GPR_DWARFNUM_NAME(9), + GPR_DWARFNUM_NAME(10), + GPR_DWARFNUM_NAME(11), + GPR_DWARFNUM_NAME(12), + GPR_DWARFNUM_NAME(13), + GPR_DWARFNUM_NAME(14), + GPR_DWARFNUM_NAME(15), + REG_DWARFNUM_NAME("%fp", 15), + GPR_DWARFNUM_NAME(16), + GPR_DWARFNUM_NAME(17), + GPR_DWARFNUM_NAME(18), + GPR_DWARFNUM_NAME(19), + GPR_DWARFNUM_NAME(20), + GPR_DWARFNUM_NAME(21), + GPR_DWARFNUM_NAME(22), + GPR_DWARFNUM_NAME(23), + GPR_DWARFNUM_NAME(24), + GPR_DWARFNUM_NAME(25), + GPR_DWARFNUM_NAME(26), + GPR_DWARFNUM_NAME(27), + GPR_DWARFNUM_NAME(28), + REG_DWARFNUM_NAME("%gp", 29), + REG_DWARFNUM_NAME("%sp", 30), + REG_DWARFNUM_END, +}; + +/** + * get_arch_regstr() - lookup register name from it's DWARF register number + * @n: the DWARF register number + * + * get_arch_regstr() returns the name of the register in struct + * regdwarfnum_table from it's DWARF register number. If the register is not + * found in the table, this returns NULL; + */ +const char *get_arch_regstr(unsigned int n) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (roff->dwarfnum == n) + return roff->name; + return NULL; +} + +int regs_query_register_offset(const char *name) +{ + const struct pt_regs_dwarfnum *roff; + + for (roff = regdwarfnum_table; roff->name != NULL; roff++) + if (!strcmp(roff->name, name)) + return DWARFNUM2OFFSET(roff->dwarfnum); + return -EINVAL; +} diff --git a/tools/perf/arch/sw_64/util/perf_regs.c b/tools/perf/arch/sw_64/util/perf_regs.c new file mode 100644 index 0000000000000000000000000000000000000000..2833e101a7c6407263130e9948a06a2caa32bc4b --- /dev/null +++ b/tools/perf/arch/sw_64/util/perf_regs.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "../../../util/perf_regs.h" + +const struct sample_reg sample_reg_masks[] = { + SMPL_REG_END +}; diff --git a/tools/perf/arch/sw_64/util/unwind-libdw.c b/tools/perf/arch/sw_64/util/unwind-libdw.c new file mode 100644 index 0000000000000000000000000000000000000000..3e2b6acc40ac3336273f662c1f790adbe8df9a27 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libdw.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "../../util/unwind-libdw.h" +#include "../../util/perf_regs.h" +#include "../../util/event.h" + +bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) +{ + struct unwind_info *ui = arg; + struct regs_dump *user_regs = &ui->sample->user_regs; + Dwarf_Word dwarf_regs[PERF_REG_SW64_MAX], dwarf_pc; + +#define REG(r) ({ \ + Dwarf_Word val = 0; \ + perf_reg_value(&val, user_regs, PERF_REG_SW64_##r); \ + val; \ +}) + + dwarf_regs[0] = REG(R0); + dwarf_regs[1] = REG(R1); + dwarf_regs[2] = REG(R2); + dwarf_regs[3] = REG(R3); + dwarf_regs[4] = REG(R4); + dwarf_regs[5] = REG(R5); + dwarf_regs[6] = REG(R6); + dwarf_regs[7] = REG(R7); + dwarf_regs[8] = REG(R8); + dwarf_regs[9] = REG(R9); + dwarf_regs[10] = REG(R10); + dwarf_regs[11] = REG(R11); + dwarf_regs[12] = REG(R12); + dwarf_regs[13] = REG(R13); + dwarf_regs[14] = REG(R14); + dwarf_regs[15] = REG(R15); + dwarf_regs[16] = REG(R16); + dwarf_regs[17] = REG(R17); + dwarf_regs[18] = REG(R18); + dwarf_regs[19] = REG(R19); + dwarf_regs[20] = REG(R20); + dwarf_regs[21] = REG(R21); + dwarf_regs[22] = REG(R22); + dwarf_regs[23] = REG(R23); + dwarf_regs[24] = REG(R24); + dwarf_regs[25] = REG(R25); + dwarf_regs[26] = REG(R26); + dwarf_regs[27] = REG(R27); + dwarf_regs[28] = REG(R28); + dwarf_regs[29] = REG(R29); + dwarf_regs[30] = REG(R30); + dwarf_regs[31] = REG(R31); + + if (!dwfl_thread_state_registers(thread, 0, PERF_REG_SW64_MAX, + dwarf_regs)) + return false; + + dwarf_pc = REG(PC); + dwfl_thread_state_register_pc(thread, dwarf_pc); + + return true; +} diff --git a/tools/perf/arch/sw_64/util/unwind-libunwind.c b/tools/perf/arch/sw_64/util/unwind-libunwind.c new file mode 100644 index 0000000000000000000000000000000000000000..134e3c2280d297b3374ee4d7a6c471bc4d923b14 --- /dev/null +++ b/tools/perf/arch/sw_64/util/unwind-libunwind.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +#include + +#ifndef REMOTE_UNWIND_LIBUNWIND +#include +#include "perf_regs.h" +#include "../../util/unwind.h" +#include "../../util/debug.h" +#endif + +int LIBUNWIND__ARCH_REG_ID(int regnum) +{ + switch (regnum) { + case UNW_SW_64_R0: + return PERF_REG_SW64_R0; + case UNW_SW_64_R1: + return PERF_REG_SW64_R1; + case UNW_SW_64_R2: + return PERF_REG_SW64_R2; + case UNW_SW_64_R3: + return PERF_REG_SW64_R3; + case UNW_SW_64_R4: + return PERF_REG_SW64_R4; + case UNW_SW_64_R5: + return PERF_REG_SW64_R5; + case UNW_SW_64_R6: + return PERF_REG_SW64_R6; + case UNW_SW_64_R7: + return PERF_REG_SW64_R7; + case UNW_SW_64_R8: + return PERF_REG_SW64_R8; + case UNW_SW_64_R9: + return PERF_REG_SW64_R9; + case UNW_SW_64_R10: + return PERF_REG_SW64_R10; + case UNW_SW_64_R11: + return PERF_REG_SW64_R11; + case UNW_SW_64_R12: + return PERF_REG_SW64_R12; + case UNW_SW_64_R13: + return PERF_REG_SW64_R13; + case UNW_SW_64_R14: + return PERF_REG_SW64_R14; + case UNW_SW_64_R15: + return PERF_REG_SW64_R15; + case UNW_SW_64_R16: + return PERF_REG_SW64_R16; + case UNW_SW_64_R17: + return PERF_REG_SW64_R17; + case UNW_SW_64_R18: + return PERF_REG_SW64_R18; + case UNW_SW_64_R19: + return PERF_REG_SW64_R19; + case UNW_SW_64_R20: + return PERF_REG_SW64_R20; + case UNW_SW_64_R21: + return PERF_REG_SW64_R21; + case UNW_SW_64_R22: + return PERF_REG_SW64_R22; + case UNW_SW_64_R23: + return PERF_REG_SW64_R23; + case UNW_SW_64_R24: + return PERF_REG_SW64_R24; + case UNW_SW_64_R25: + return PERF_REG_SW64_R25; + case UNW_SW_64_R26: + return PERF_REG_SW64_R26; + case UNW_SW_64_R27: + return PERF_REG_SW64_R27; + case UNW_SW_64_R28: + return PERF_REG_SW64_R28; + case UNW_SW_64_R29: + return PERF_REG_SW64_GP; + case UNW_SW_64_R30: + return PERF_REG_SW64_SP; + case UNW_SW_64_PC: + return PERF_REG_SW64_PC; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return -EINVAL; +} diff --git a/tools/perf/arch/x86/util/env.c b/tools/perf/arch/x86/util/env.c index 3e537ffb1353aab2595496ef0848501c1fea42b3..f1de12d20b2ae6bf79216406935dd4735a0c855e 100644 --- a/tools/perf/arch/x86/util/env.c +++ b/tools/perf/arch/x86/util/env.c @@ -17,3 +17,18 @@ bool x86__is_amd_cpu(void) ret: return is_amd >= 1 ? true : false; } + +bool x86__is_hygon_cpu(void) +{ + struct perf_env env = { .total_mem = 0, }; + static int is_hygon; /* 0: Uninitialized, 1: Yes, -1: No */ + + if (is_hygon) + goto ret; + + perf_env__cpuid(&env); + is_hygon = env.cpuid && strstarts(env.cpuid, "HygonGenuine") ? 1 : -1; + perf_env__exit(&env); +ret: + return is_hygon >= 1 ? true : false; +} diff --git a/tools/perf/arch/x86/util/env.h b/tools/perf/arch/x86/util/env.h index d78f080b6b3f889a3b5921d4ce5e3295cf340dcf..904d5e2283606a6ddc0d8be9a0538c74c7f63889 100644 --- a/tools/perf/arch/x86/util/env.h +++ b/tools/perf/arch/x86/util/env.h @@ -3,5 +3,6 @@ #define _X86_ENV_H bool x86__is_amd_cpu(void); +bool x86__is_hygon_cpu(void); #endif /* _X86_ENV_H */ diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c index 191b372f9a2d3630a849a32a52e49c10cd9fd353..f8d9aecbf2f28763b5361d48b03dba54022affa3 100644 --- a/tools/perf/arch/x86/util/mem-events.c +++ b/tools/perf/arch/x86/util/mem-events.c @@ -33,7 +33,7 @@ struct perf_mem_event *perf_mem_events__ptr(int i) if (i >= PERF_MEM_EVENTS__MAX) return NULL; - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return &perf_mem_events_amd[i]; return &perf_mem_events_intel[i]; diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index f428cffb037818fb908f61c325aeef88a97471cc..0af2562364665d6405b9fcb4913190513b999bba 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -174,7 +174,7 @@ const char *pmu_find_alias_name(const char *name) int perf_pmus__num_mem_pmus(void) { /* AMD uses IBS OP pmu and not a core PMU for perf mem/c2c */ - if (x86__is_amd_cpu()) + if (x86__is_amd_cpu() || x86__is_hygon_cpu()) return 1; /* Intel uses core pmus for perf mem/c2c */ diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json new file mode 100644 index 0000000000000000000000000000000000000000..428605c37d10bcb5aef284aaa5b8279085c0002d --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/cmn.json @@ -0,0 +1,266 @@ +[ + { + "EventName": "hnf_cache_miss", + "EventidCode": "0x1", + "NodeType": "0x5", + "BriefDescription": "Counts total cache misses in first lookup result (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_sf_cache_access", + "EventidCode": "0x2", + "NodeType": "0x5", + "BriefDescription": "Counts number of cache accesses in first access (high priority).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_cache_fill", + "EventidCode": "0x3", + "NodeType": "0x5", + "BriefDescription": "Counts total allocations in HN SLC (all cache line allocations to SLC).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_retry", + "EventidCode": "0x4", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried requests.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_pocq_reqs_recvd", + "EventidCode": "0x5", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that HN receives.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_hit", + "EventidCode": "0x6", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF hits.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_sf_evictions", + "EventidCode": "0x7", + "NodeType": "0x5", + "BriefDescription": "Counts number of SF eviction cache invalidations initiated.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_dir_snoops_sent", + "EventidCode": "0x8", + "NodeType": "0x5", + "BriefDescription": "Counts number of directed snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_brd_snoops_sent", + "EventidCode": "0x9", + "NodeType": "0x5", + "BriefDescription": "Counts number of multicast snoops sent (not including SF back invalidation).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_eviction", + "EventidCode": "0xa", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC evictions (dirty only).", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_slc_fill_invalid_way", + "EventidCode": "0xb", + "NodeType": "0x5", + "BriefDescription": "Counts number of SLC fills to an invalid way.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_retries", + "EventidCode": "0xc", + "NodeType": "0x5", + "BriefDescription": "Counts number of retried transactions by the MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_mc_reqs", + "EventidCode": "0xd", + "NodeType": "0x5", + "BriefDescription": "Counts number of requests that are sent to MC.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hnf_qos_hh_retry", + "EventidCode": "0xe", + "NodeType": "0x5", + "BriefDescription": "Counts number of times a HighHigh priority request is protocolretried at the HN‑F.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s0_rdata_beats", + "EventidCode": "0x1", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 0. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s1_rdata_beats", + "EventidCode": "0x2", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 1. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_s2_rdata_beats", + "EventidCode": "0x3", + "NodeType": "0xa", + "BriefDescription": "Number of RData beats (RVALID and RREADY) dispatched on port 2. This event measures the read bandwidth, including CMO responses.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_rxdat_flits", + "EventidCode": "0x4", + "NodeType": "0xa", + "BriefDescription": "Number of RXDAT flits received. This event measures the true read data bandwidth, excluding CMOs.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txdat_flits", + "EventidCode": "0x5", + "NodeType": "0xa", + "BriefDescription": "Number of TXDAT flits dispatched. This event measures the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_total", + "EventidCode": "0x6", + "NodeType": "0xa", + "BriefDescription": "Number of TXREQ flits dispatched. This event measures the total request bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "rnid_txreq_flits_retried", + "EventidCode": "0x7", + "NodeType": "0xa", + "BriefDescription": "Number of retried TXREQ flits dispatched. This event measures the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txrsp_retryack", + "EventidCode": "0x4", + "NodeType": "0x7", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_txdat_flitv", + "EventidCode": "0x5", + "NodeType": "0x7", + "BriefDescription": "Number of TXDAT flits dispatched from XP to SBSX. This event is a measure of the write bandwidth.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_arvalid_no_arready", + "EventidCode": "0x21", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_awvalid_no_awready", + "EventidCode": "0x22", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "sbsx_wvalid_no_wready", + "EventidCode": "0x23", + "NodeType": "0x7", + "BriefDescription": "Number of cycles the SBSX bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txrsp_retryack", + "EventidCode": "0x2a", + "NodeType": "0x4", + "BriefDescription": "Number of RXREQ flits dispatched. This event is a measure of the retry rate.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arvalid_no_arready", + "EventidCode": "0x2b", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AR channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_arready_no_arvalid", + "EventidCode": "0x2c", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AR channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awvalid_no_awready", + "EventidCode": "0x2d", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on AW channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_awready_no_awvalid", + "EventidCode": "0x2e", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the AW channel is waiting for new requests from HN-I bridge.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_wvalid_no_wready", + "EventidCode": "0x2f", + "NodeType": "0x4", + "BriefDescription": "Number of cycles the HN-I bridge is stalled because of backpressure on W channel.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "EventName": "hni_txdat_stall", + "EventidCode": "0x30", + "NodeType": "0x4", + "BriefDescription": "TXDAT valid but no link credit available.", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json new file mode 100644 index 0000000000000000000000000000000000000000..f7823bd265db26b218aa886a32c7a9fc12481ac8 --- /dev/null +++ b/tools/perf/pmu-events/arch/arm64/arm/cmn/sys/metric.json @@ -0,0 +1,74 @@ +[ + { + "MetricName": "slc_miss_rate", + "BriefDescription": "The system level cache miss rate.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_cache_miss / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "hnf_message_retry_rate", + "BriefDescription": "HN-F message retry rate indicates whether a lack of credits is causing the bottlenecks.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_pocq_retry / hnf_pocq_reqs_recvd", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sf_hit_rate", + "BriefDescription": "Snoop filter hit rate can be used to measure the snoop filter efficiency.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_sf_hit / hnf_slc_sf_cache_access", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "mc_message_retry_rate", + "BriefDescription": "The memory controller request retries rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "hnf_mc_retries / hnf_mc_reqs", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_read_bandwidth.all", + "BriefDescription": "This event measure the actual bandwidth that RN-I bridge sends to the interconnect.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_rxdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_actual_write_bandwidth.all", + "BriefDescription": "This event measures the actual write bandwidth at RN-I bridges.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txdat_flits * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "rni_retry_rate", + "BriefDescription": "RN-I bridge retry rate indicates whether the memory controller is the bottleneck.", + "MetricGroup": "cmn", + "MetricExpr": "rnid_txreq_flits_retried / rnid_txreq_flits_total", + "ScaleUnit": "100%", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + }, + { + "MetricName": "sbsx_actual_write_bandwidth.all", + "BriefDescription": "sbsx actual write bandwidth.", + "MetricGroup": "cmn", + "MetricExpr": "sbsx_txdat_flitv * 32 / 1e6 / duration_time", + "ScaleUnit": "1MB/s", + "Unit": "arm_cmn", + "Compat": "(434|436|43c|43a).*" + } +] diff --git a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json index c7e7528db3158f2caecbea67059efbf347a97dc3..4d423b149ad128f64e82fc94483d6b764a66ef52 100644 --- a/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json +++ b/tools/perf/pmu-events/arch/test/test_soc/sys/uncore.json @@ -12,5 +12,13 @@ "EventName": "sys_ccn_pmu.read_cycles", "Unit": "sys_ccn_pmu", "Compat": "0x01" + }, + { + "BriefDescription": "Counts total cache misses in first lookup result (high priority)", + "EventidCode": "0x1", + "NodeType": "0x5", + "EventName": "sys_cmn_pmu.hnf_cache_miss", + "Unit": "sys_cmn_pmu", + "Compat": "(434|436|43c|43a).*" } ] diff --git a/tools/perf/pmu-events/empty-pmu-events.c b/tools/perf/pmu-events/empty-pmu-events.c index 12bd043a05e3579d090ffbc252a8852afad3036f..13727421d424b1e37972d38f90fce4c3d60181c9 100644 --- a/tools/perf/pmu-events/empty-pmu-events.c +++ b/tools/perf/pmu-events/empty-pmu-events.c @@ -244,6 +244,14 @@ static const struct pmu_event pmu_events__test_soc_sys[] = { .topic = "uncore", .pmu = "uncore_sys_ccn_pmu", }, + { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority). Unit: uncore_sys_cmn_pmu ", + .compat = "(434|436|43c|43a).*", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + }, { .name = 0, .event = 0, diff --git a/tools/perf/pmu-events/jevents.py b/tools/perf/pmu-events/jevents.py index 72ba4a9239c6bff6da2685c311e60c19e3b85562..ae2bd49e880562553c2c4bad56d53d8dc2f681bc 100755 --- a/tools/perf/pmu-events/jevents.py +++ b/tools/perf/pmu-events/jevents.py @@ -289,6 +289,7 @@ class JsonEvent: 'cpu_core': 'cpu_core', 'cpu_atom': 'cpu_atom', 'ali_drw': 'ali_drw', + 'arm_cmn': 'arm_cmn', } return table[unit] if unit in table else f'uncore_{unit.lower()}' @@ -298,6 +299,7 @@ class JsonEvent: if 'ExtSel' in jd: eventcode |= int(jd['ExtSel']) << 8 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None + eventidcode = int(jd['EventidCode'], 0) if 'EventidCode' in jd else None self.name = jd['EventName'].lower() if 'EventName' in jd else None self.topic = '' self.compat = jd.get('Compat') @@ -335,7 +337,13 @@ class JsonEvent: if precise and self.desc and '(Precise Event)' not in self.desc: extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise ' 'event)') - event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}' + event = None + if configcode is not None: + event = f'config={llx(configcode)}' + elif eventidcode is not None: + event = f'eventid={llx(eventidcode)}' + else: + event = f'event={llx(eventcode)}' event_fields = [ ('AnyThread', 'any='), ('PortMask', 'ch_mask='), @@ -345,6 +353,7 @@ class JsonEvent: ('Invert', 'inv='), ('SampleAfterValue', 'period='), ('UMask', 'umask='), + ('NodeType', 'type='), ] for key, value in event_fields: if key in jd and jd[key] != '0': diff --git a/tools/perf/tests/Build b/tools/perf/tests/Build index 63d5e6d5f165bfa6704628c402477d0eb29d36c3..5b51f34b5bf3f9d7c26a92a34229b59538d04a79 100644 --- a/tools/perf/tests/Build +++ b/tools/perf/tests/Build @@ -67,7 +67,7 @@ perf-y += sigtrap.o perf-y += event_groups.o perf-y += symbols.o -ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc)) +ifeq ($(SRCARCH),$(filter $(SRCARCH),x86 arm arm64 powerpc sw_64)) perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o endif diff --git a/tools/perf/tests/pmu-events.c b/tools/perf/tests/pmu-events.c index f5321fbdee79d1e591bfd643e73b9b09360123e2..a56d32905743a00aa49d62fde542b426ee96ab5d 100644 --- a/tools/perf/tests/pmu-events.c +++ b/tools/perf/tests/pmu-events.c @@ -245,7 +245,7 @@ static const struct perf_pmu_test_event sys_ddr_pmu_write_cycles = { }, .alias_str = "event=0x2b", .alias_long_desc = "ddr write-cycles event", - .matching_pmu = "uncore_sys_ddr_pmu", + .matching_pmu = "uncore_sys_ddr_pmu0", }; static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { @@ -259,12 +259,27 @@ static const struct perf_pmu_test_event sys_ccn_pmu_read_cycles = { }, .alias_str = "config=0x2c", .alias_long_desc = "ccn read-cycles event", - .matching_pmu = "uncore_sys_ccn_pmu", + .matching_pmu = "uncore_sys_ccn_pmu4", +}; + +static const struct perf_pmu_test_event sys_cmn_pmu_hnf_cache_miss = { + .event = { + .name = "sys_cmn_pmu.hnf_cache_miss", + .event = "eventid=0x1,type=0x5", + .desc = "Counts total cache misses in first lookup result (high priority)", + .topic = "uncore", + .pmu = "uncore_sys_cmn_pmu", + .compat = "(434|436|43c|43a).*", + }, + .alias_str = "eventid=0x1,type=0x5", + .alias_long_desc = "Counts total cache misses in first lookup result (high priority)", + .matching_pmu = "uncore_sys_cmn_pmu0", }; static const struct perf_pmu_test_event *sys_events[] = { &sys_ddr_pmu_write_cycles, &sys_ccn_pmu_read_cycles, + &sys_cmn_pmu_hnf_cache_miss, NULL }; @@ -615,6 +630,12 @@ static int __test_uncore_pmu_event_aliases(struct perf_pmu_test_pmu *test_pmu) .count = &matched_count, }; + if (strcmp(pmu_name, test_event.matching_pmu)) { + pr_debug("testing aliases uncore PMU %s: mismatched matching_pmu, %s vs %s\n", + pmu_name, test_event.matching_pmu, pmu_name); + return -1; + } + err = perf_pmu__find_event(pmu, event->name, &args, test_core_pmu_event_aliases_cb); if (err) { @@ -701,6 +722,46 @@ static struct perf_pmu_test_pmu test_pmus[] = { &sys_ccn_pmu_read_cycles, }, }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43401", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43602", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43c03", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + }, + { + .pmu = { + .name = (char *)"uncore_sys_cmn_pmu0", + .is_uncore = 1, + .id = (char *)"43a01", + }, + .aliases = { + &sys_cmn_pmu_hnf_cache_miss, + }, + } }; /* Test that aliases generated are as expected */ diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 6d657c9927f7db2e72dc966ff067732115911414..89a051732e87d52feb2eee7a8b9b03e9025ca005 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -199,6 +199,7 @@ perf-$(CONFIG_LOCAL_LIBUNWIND) += unwind-libunwind-local.o perf-$(CONFIG_LIBUNWIND) += unwind-libunwind.o perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o +perf-$(CONFIG_LIBUNWIND_SW64) += libunwind/sw64.o ifeq ($(CONFIG_LIBTRACEEVENT),y) perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 82956adf99632d742f777f01d4177ce575ddfbd6..88440bc7cc713c9195711fbb96b4e084f08bbec9 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -180,6 +180,9 @@ static struct arch architectures[] = { .comment_char = '#', }, }, + { + .name = "sw_64", + }, { .name = "x86", .init = x86__annotate_init, diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index a164164001fb512047503186df5143bf698aef2a..3bd6489cd081c7a5d092bb7fa9007194e03a0e0b 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -422,6 +422,8 @@ static const char *normalize_arch(char *arch) return "arm64"; if (!strncmp(arch, "arm", 3) || !strcmp(arch, "sa110")) return "arm"; + if (!strncmp(arch, "sw_64", 5)) + return "sw_64"; if (!strncmp(arch, "s390", 4)) return "s390"; if (!strncmp(arch, "parisc", 6)) diff --git a/tools/perf/util/libunwind/sw64.c b/tools/perf/util/libunwind/sw64.c new file mode 100644 index 0000000000000000000000000000000000000000..12452bf2ab8b8539a317329969fffa8801e38082 --- /dev/null +++ b/tools/perf/util/libunwind/sw64.c @@ -0,0 +1,33 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This file setups defines to compile arch specific binary from the + * generic one. + * + * The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch + * name and the defination of this function is included directly from + * 'arch/arm64/util/unwind-libunwind.c', to make sure that this function + * is defined no matter what arch the host is. + * + * Finally, the arch specific unwind methods are exported which will + * be assigned to each arm64 thread. + */ + +#define REMOTE_UNWIND_LIBUNWIND + +/* Define arch specific functions & regs for libunwind, should be + * defined before including "unwind.h" + */ +#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__sw_64_reg_id(regnum) +#define LIBUNWIND__ARCH_REG_IP PERF_REG_SW64_PC +#define LIBUNWIND__ARCH_REG_SP PERF_REG_SW64_SP + +#include "unwind.h" +#include "debug.h" +#include "libunwind-sw_64.h" +#include <../../../arch/sw_64/include/uapi/asm/perf_regs.h> +#include "../../arch/sw_64/util/unwind-libunwind.c" + +#include "util/unwind-libunwind-local.c" + +struct unwind_libunwind_ops * +sw64_unwind_libunwind_ops = &_unwind_libunwind_ops; diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c index bb5faaa25d510f7ceba173e07ca77d4805310e09..ca3e0404f18720d7a3cc2376896195f55cf1192d 100644 --- a/tools/perf/util/metricgroup.c +++ b/tools/perf/util/metricgroup.c @@ -498,7 +498,7 @@ static int metricgroup__sys_event_iter(const struct pmu_metric *pm, while ((pmu = perf_pmus__scan(pmu))) { - if (!pmu->id || strcmp(pmu->id, pm->compat)) + if (!pmu->id || !pmu_uncore_identifier_match(pm->compat, pmu->id)) continue; return d->fn(pm, table, d->data); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d515ba8a0e160c97d69c7c1e83b964fa7120021c..869e6e566afe1ad3d7dbe79e31569106241ac974 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -28,6 +28,7 @@ #include "strbuf.h" #include "fncache.h" #include "util/evsel_config.h" +#include struct perf_pmu perf_pmu__fake = { .name = "fake", @@ -874,6 +875,28 @@ static bool pmu_uncore_alias_match(const char *pmu_name, const char *name) return res; } +bool pmu_uncore_identifier_match(const char *compat, const char *id) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + + if (regcomp(&re, compat, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", compat); + return false; + } + + match = !regexec(&re, id, 1, pmatch, 0); + if (match) { + /* Ensure a full match. */ + match = pmatch[0].rm_so == 0 && (size_t)pmatch[0].rm_eo == strlen(id); + } + regfree(&re); + + return match; +} + static int pmu_add_cpu_aliases_map_callback(const struct pmu_event *pe, const struct pmu_events_table *table __maybe_unused, void *vdata) @@ -914,8 +937,8 @@ static int pmu_add_sys_aliases_iter_fn(const struct pmu_event *pe, if (!pe->compat || !pe->pmu) return 0; - if (!strcmp(pmu->id, pe->compat) && - pmu_uncore_alias_match(pe->pmu, pmu->name)) { + if (pmu_uncore_alias_match(pe->pmu, pmu->name) && + pmu_uncore_identifier_match(pe->compat, pmu->id)) { perf_pmu__new_alias(pmu, pe->name, pe->desc, diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 6a4e170c61d6be7e7a1960b0498a34261e6fb844..fa54010eae3f2db3be5a39042fcb66e3e2cce85f 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -240,6 +240,7 @@ void pmu_add_cpu_aliases_table(struct perf_pmu *pmu, char *perf_pmu__getcpuid(struct perf_pmu *pmu); const struct pmu_events_table *pmu_events_table__find(void); const struct pmu_metrics_table *pmu_metrics_table__find(void); +bool pmu_uncore_identifier_match(const char *compat, const char *id); int perf_pmu__convert_scale(const char *scale, char **end, double *sval); diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 76cd63de80a8efef85cf91645b4d77a746f6c32b..c2e84a827e33d698f3f19fde7a411a0f36916e88 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -11,6 +11,7 @@ struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops; struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops; +struct unwind_libunwind_ops __weak *sw64_unwind_libunwind_ops; static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops) { @@ -53,6 +54,9 @@ int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) { if (dso_type == DSO__TYPE_64BIT) ops = arm64_unwind_libunwind_ops; + } else if (!strcmp(arch, "sw_64")) { + if (dso_type == DSO__TYPE_64BIT) + ops = sw64_unwind_libunwind_ops; } if (!ops) { diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc index a202b2ea4baf98e8556263b27b125d66b056038a..f1f2f3722e93f9dcfae4a4615888e07747ea9514 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_string.tc @@ -31,6 +31,9 @@ mips*) loongarch*) ARG1=%r4 ;; +sw_64) + ARG1=%r16 +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc index 1df61e13a812b789d11a06cc9a3ddc517a72a359..8de38fb00baef9a5659cbb0e60ae3059a3feb62f 100644 --- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc +++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_syntax.tc @@ -44,6 +44,10 @@ loongarch*) GOODREG=%r4 BADREG=%r12 ;; +sw_64) + GOODREG=%r16 + BADREG=%ps +;; *) echo "Please implement other architecture here" exit_untested diff --git a/tools/testing/selftests/mm/virtual_address_range.c b/tools/testing/selftests/mm/virtual_address_range.c index bae0ceaf95b13baeb997c720d6b4244948b98d51..76efbd5637cb78262884fce4c0c9eab077b277c9 100644 --- a/tools/testing/selftests/mm/virtual_address_range.c +++ b/tools/testing/selftests/mm/virtual_address_range.c @@ -54,6 +54,11 @@ #define HIGH_ADDR_SHIFT 49 #define NR_CHUNKS_LOW NR_CHUNKS_256TB #define NR_CHUNKS_HIGH NR_CHUNKS_3840TB +#elif defined __sw_64__ +#define HIGH_ADDR_MARK ADDR_MARK_128TB * 32UL +#define HIGH_ADDR_SHIFT 53 +#define NR_CHUNKS_LOW NR_CHUNKS_128TB * 32UL +#define NR_CHUNKS_HIGH 0 #else #define HIGH_ADDR_MARK ADDR_MARK_128TB #define HIGH_ADDR_SHIFT 48 diff --git a/tools/testing/selftests/seccomp/seccomp_benchmark.c b/tools/testing/selftests/seccomp/seccomp_benchmark.c index 5b5c9d558dee07bc1f7afd7df280e1189858451e..7004099ce11bdf20247044f08facd87669cd65b8 100644 --- a/tools/testing/selftests/seccomp/seccomp_benchmark.c +++ b/tools/testing/selftests/seccomp/seccomp_benchmark.c @@ -20,6 +20,11 @@ #include "../kselftest.h" +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + unsigned long long timing(clockid_t clk_id, unsigned long long samples) { struct timespec start, finish; diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c index 38f6514699682b8f318e7a1d80c0faf299fe1eba..66954558220ee1003802d62dfef09005ab5495a4 100644 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c @@ -66,6 +66,11 @@ # define PR_SET_PTRACER 0x59616d61 #endif +#ifdef __sw_64__ +#define __NR_getpid 174 +#define __NR_getppid 175 +#endif + #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 @@ -142,6 +147,8 @@ struct seccomp_data { # define __NR_seccomp 372 # elif defined(__mc68000__) # define __NR_seccomp 380 +# elif defined(__sw_64__) +# define __NR_seccomp 514 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff @@ -1850,6 +1857,12 @@ TEST_F(TRACE_poke, getpid_runs_normally) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM(_regs) (_regs).orig_d0 # define SYSCALL_RET(_regs) (_regs).d0 +#elif defined(__sw_64__) +# define ARCH_REGS struct user_pt_regs +# define SYSCALL_NUM(_regs) (_regs).regs[0] +# define SYSCALL_RET(_regs) (_regs).regs[0] +# define SYSCALL_RET_SET(_regs, _val) \ + TH_LOG("Can't modify syscall return on this architecture") #else # error "Do not know how to find your architecture's registers and syscalls" #endif @@ -1914,7 +1927,7 @@ const bool ptrace_entry_set_syscall_ret = * Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ -#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) +#if defined(__x86_64__) || defined(__i386__) || defined(__mips__) || defined(__mc68000__) || defined(__sw_64__) # define ARCH_GETREGS(_regs) ptrace(PTRACE_GETREGS, tracee, 0, &(_regs)) # define ARCH_SETREGS(_regs) ptrace(PTRACE_SETREGS, tracee, 0, &(_regs)) #else diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 486800a7024b373f167f12b48ce1f13b095e6098..8bf0c5b84d3a248ba72a41cf12c4f35c7ff9d440 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -154,6 +154,11 @@ static unsigned long long kvm_active_vms; static DEFINE_PER_CPU(cpumask_var_t, cpu_kick_mask); +#ifdef CONFIG_SW64 +#define DFX_SW64_MAX_VCPU 1024 +#define DFX_SW64_MAX_VCPU_STAT_SIZE 1024 +#endif + __weak void kvm_arch_guest_memory_reclaimed(struct kvm *kvm) { } @@ -4157,6 +4162,9 @@ static long kvm_vcpu_ioctl(struct file *filp, if (oldpid) synchronize_rcu(); put_pid(oldpid); +#ifdef CONFIG_SW64 + vcpu->stat.pid = current->pid; +#endif } r = kvm_arch_vcpu_ioctl_run(vcpu); trace_kvm_userspace_exit(vcpu->run->exit_reason, r); @@ -5748,6 +5756,10 @@ static int kvm_stat_data_get(void *data, u64 *val) r = kvm_get_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset, val); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5770,6 +5782,10 @@ static int kvm_stat_data_clear(void *data, u64 val) r = kvm_clear_stat_per_vcpu(stat_data->kvm, stat_data->desc->desc.offset); break; +#ifdef CONFIG_SW64 + case KVM_STAT_DFX_SW64: + break; +#endif } return r; @@ -5864,6 +5880,116 @@ DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, vcpu_stat_clear, "%llu\n"); DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_readonly_fops, vcpu_stat_get, NULL, "%llu\n"); +#ifdef CONFIG_SW64 +void __weak kvm_arch_vcpu_stat_reset(struct kvm_vcpu_stat *vcpu_stat) +{ +} + +/* + * copy of seq_buf_alloc of kernel, kernel not export it + */ +static void *dfx_sw64_seq_buf_alloc(unsigned long size) +{ + return kvmalloc(size, GFP_KERNEL_ACCOUNT); +} + +static void dfx_sw64_seq_buf_free(const void *buf) +{ + kvfree(buf); +} + +static int dfx_sw64_seq_buf_alloc_vcpu(struct seq_file *p, int vcpu_nr) +{ + char *buf; + size_t size; + + size = (vcpu_nr + 1) * DFX_SW64_MAX_VCPU_STAT_SIZE; + buf = dfx_sw64_seq_buf_alloc(size); + if (!buf) + return -ENOMEM; + if (p->buf) + dfx_sw64_seq_buf_free(p->buf); + p->buf = buf; + p->size = size; + return 0; +} + +static int __dfx_sw64_vcpu_stats_get(struct seq_file *p, void *v) +{ + struct kvm *kvm; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_stat *vcpu_stats; + struct dfx_sw64_kvm_stats_debugfs_item *dp; + int vcpu_nr = 0; + int index = 0; + unsigned long i; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) + kvm_for_each_vcpu(i, vcpu, kvm) { + vcpu_nr++; + } + mutex_unlock(&kvm_lock); + vcpu_nr = min(vcpu_nr, DFX_SW64_MAX_VCPU); + if (!vcpu_nr) { + seq_putc(p, '\n'); + return 0; + } + + if (dfx_sw64_seq_buf_alloc_vcpu(p, vcpu_nr)) + return -ENOMEM; + + vcpu_stats = vmalloc(vcpu_nr * sizeof(struct kvm_vcpu_stat)); + if (!vcpu_stats) + return -ENOMEM; + + mutex_lock(&kvm_lock); + list_for_each_entry(kvm, &vm_list, vm_list) { + kvm_for_each_vcpu(i, vcpu, kvm) { + if (index >= vcpu_nr) + break; + memcpy(vcpu_stats + index, &(vcpu->stat), + sizeof(struct kvm_vcpu_stat)); + kvm_arch_vcpu_stat_reset(&vcpu->stat); + ++index; + } + } + mutex_unlock(&kvm_lock); + for (i = 0; i < vcpu_nr; i++) { + for (dp = dfx_sw64_debugfs_entries; dp->name; ++dp) { + switch (dp->dfx_kind) { + case DFX_SW64_STAT_U64: + seq_put_decimal_ull(p, " ", + *(u64 *)((void *)&vcpu_stats[i] + dp->offset)); + break; + case DFX_SW64_STAT_CPUTIME: + pr_warn("DFX_SW64_STAT_CPUTIME not supported currently!"); + break; + default: + pr_warn("Bad dfx_sw64_kind in dfx_debugfs_entries!"); + break; + } + } + seq_putc(p, '\n'); + } + + vfree(vcpu_stats); + return 0; +} + +static int dfx_sw64_vcpu_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, __dfx_sw64_vcpu_stats_get, NULL); +} + +static const struct file_operations dfx_sw64_stat_fops = { + .open = dfx_sw64_vcpu_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) { struct kobj_uevent_env *env;